[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2.1\n\n# this allows you to use CircleCI's dynamic configuration feature\nsetup: true\n\n# the path-filtering orb is required to continue a pipeline based on\n# the path of an updated fileset\norbs:\n  path-filtering: circleci/path-filtering@0.1.2\n\nworkflows:\n  # the always-run workflow is always triggered, regardless of the pipeline parameters.\n  always-run:\n    jobs:\n      # the path-filtering/filter job determines which pipeline\n      # parameters to update.\n      - path-filtering/filter:\n          name: check-updated-files\n          # 3-column, whitespace-delimited mapping. One mapping per\n          # line:\n          # <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>\n          mapping: |\n            mmdet/.* lint_only false\n            requirements/.* lint_only false\n            tests/.* lint_only false\n            tools/.* lint_only false\n            configs/.* lint_only false\n            .circleci/.* lint_only false\n          base-revision: dev-3.x\n          # this is the path of the configuration we should trigger once\n          # path filtering and pipeline parameter value updates are\n          # complete. In this case, we are using the parent dynamic\n          # configuration itself.\n          config-path: .circleci/test.yml\n"
  },
  {
    "path": ".circleci/docker/Dockerfile",
    "content": "ARG PYTORCH=\"1.8.1\"\nARG CUDA=\"10.2\"\nARG CUDNN=\"7\"\n\nFROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel\n\n# To fix GPG key error when running apt-get update\nRUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub\nRUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub\n\nRUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx\n"
  },
  {
    "path": ".circleci/test.yml",
    "content": "version: 2.1\n\n# the default pipeline parameters, which will be updated according to\n# the results of the path-filtering orb\nparameters:\n  lint_only:\n    type: boolean\n    default: true\n\njobs:\n  lint:\n    docker:\n      - image: cimg/python:3.7.4\n    steps:\n      - checkout\n      - run:\n          name: Install pre-commit hook\n          command: |\n            pip install pre-commit\n            pre-commit install\n      - run:\n          name: Linting\n          command: pre-commit run --all-files\n      - run:\n          name: Check docstring coverage\n          command: |\n            pip install interrogate\n            interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex \"__repr__\" --fail-under 85 mmdet\n\n  build_cpu:\n    parameters:\n      # The python version must match available image tags in\n      # https://circleci.com/developer/images/image/cimg/python\n      python:\n        type: string\n      torch:\n        type: string\n      torchvision:\n        type: string\n    docker:\n      - image: cimg/python:<< parameters.python >>\n    resource_class: large\n    steps:\n      - checkout\n      - run:\n          name: Install Libraries\n          command: |\n            sudo apt-get update\n            sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5\n      - run:\n          name: Configure Python & pip\n          command: |\n            pip install --upgrade pip\n            pip install wheel\n      - run:\n          name: Install PyTorch\n          command: |\n            python -V\n            python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html\n      - when:\n          condition:\n            equal: [\"3.9.0\", << parameters.python >>]\n          steps:\n            - run: pip install \"protobuf <= 3.20.1\" && sudo apt-get update && sudo apt-get -y install libprotobuf-dev protobuf-compiler cmake\n      - run:\n          name: Install mmdet dependencies\n          # numpy may be downgraded after building pycocotools, which causes `ImportError: numpy.core.multiarray failed to import`\n          # force reinstall pycocotools to ensure pycocotools being built under the currenct numpy\n          command: |\n            python -m pip install git+ssh://git@github.com/open-mmlab/mmengine.git@main\n            pip install -U openmim\n            mim install 'mmcv >= 2.0.0rc4'\n            pip install -r requirements/tests.txt -r requirements/optional.txt\n            pip install --force-reinstall pycocotools\n            pip install albumentations>=0.3.2 --no-binary imgaug,albumentations\n            pip install git+https://github.com/cocodataset/panopticapi.git\n      - run:\n          name: Build and install\n          command: |\n            pip install -e .\n      - run:\n          name: Run unittests\n          command: |\n            python -m coverage run --branch --source mmdet -m pytest tests/\n            python -m coverage xml\n            python -m coverage report -m\n\n  build_cuda:\n    parameters:\n      torch:\n        type: string\n      cuda:\n        type: enum\n        enum: [\"10.1\", \"10.2\", \"11.1\"]\n      cudnn:\n        type: integer\n        default: 7\n    machine:\n      image: ubuntu-2004-cuda-11.4:202110-01\n      # docker_layer_caching: true\n    resource_class: gpu.nvidia.small\n    steps:\n      - checkout\n      - run:\n          # CLoning repos in VM since Docker doesn't have access to the private key\n          name: Clone Repos\n          command: |\n            git clone -b main --depth 1 ssh://git@github.com/open-mmlab/mmengine.git /home/circleci/mmengine\n      - run:\n          name: Build Docker image\n          command: |\n            docker build .circleci/docker -t mmdetection:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>\n            docker run --gpus all -t -d -v /home/circleci/project:/mmdetection -v /home/circleci/mmengine:/mmengine -w /mmdetection --name mmdetection mmdetection:gpu\n            docker exec mmdetection apt-get install -y git\n      - run:\n          name: Install mmdet dependencies\n          command: |\n            docker exec mmdetection pip install -e /mmengine\n            docker exec mmdetection pip install -U openmim\n            docker exec mmdetection mim install 'mmcv >= 2.0.0rc4'\n            docker exec mmdetection pip install -r requirements/tests.txt -r requirements/optional.txt\n            docker exec mmdetection pip install pycocotools\n            docker exec mmdetection pip install albumentations>=0.3.2 --no-binary imgaug,albumentations\n            docker exec mmdetection pip install git+https://github.com/cocodataset/panopticapi.git\n            docker exec mmdetection python -c 'import mmcv; print(mmcv.__version__)'\n      - run:\n          name: Build and install\n          command: |\n            docker exec mmdetection pip install -e .\n      - run:\n          name: Run unittests\n          command: |\n            docker exec mmdetection python -m pytest tests/\n\nworkflows:\n  pr_stage_lint:\n    when: << pipeline.parameters.lint_only >>\n    jobs:\n      - lint:\n          name: lint\n          filters:\n            branches:\n              ignore:\n                - dev-3.x\n  pr_stage_test:\n    when:\n      not: << pipeline.parameters.lint_only >>\n    jobs:\n      - lint:\n          name: lint\n          filters:\n            branches:\n              ignore:\n                - dev-3.x\n      - build_cpu:\n          name: minimum_version_cpu\n          torch: 1.6.0\n          torchvision: 0.7.0\n          python: 3.7.4 # The lowest python 3.7.x version available on CircleCI images\n          requires:\n            - lint\n      - build_cpu:\n          name: maximum_version_cpu\n          torch: 1.13.0\n          torchvision: 0.14.0\n          python: 3.9.0\n          requires:\n            - minimum_version_cpu\n      - hold:\n          type: approval\n          requires:\n            - maximum_version_cpu\n      - build_cuda:\n          name: mainstream_version_gpu\n          torch: 1.8.1\n          # Use double quotation mark to explicitly specify its type\n          # as string instead of number\n          cuda: \"10.2\"\n          requires:\n            - hold\n  merge_stage_test:\n    when:\n      not: << pipeline.parameters.lint_only >>\n    jobs:\n      - build_cuda:\n          name: minimum_version_gpu\n          torch: 1.6.0\n          cuda: \"10.1\"\n          filters:\n            branches:\n              only:\n                - dev-3.x\n"
  },
  {
    "path": ".dev_scripts/batch_test_list.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# missing wider_face/timm_example/strong_baselines/simple_copy_paste/\n# selfsup_pretrain/seesaw_loss/pascal_voc/openimages/lvis/ld/lad/cityscapes/deepfashion\n\n# yapf: disable\natss = dict(\n    config='configs/atss/atss_r50_fpn_1x_coco.py',\n    checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=39.4),\n)\nautoassign = dict(\n    config='configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py',\n    checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.4),\n)\ncarafe = dict(\n    config='configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py',\n    checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.6),\n)\ncascade_rcnn = [\n    dict(\n        config='configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',\n        checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',\n        eval='bbox',\n        url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth', # noqa\n        metric=dict(bbox_mAP=40.3),\n    ),\n    dict(\n        config='configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',\n        checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',\n        url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth', # noqa\n        eval=['bbox', 'segm'],\n        metric=dict(bbox_mAP=41.2, segm_mAP=35.9),\n    ),\n]\ncascade_rpn = dict(\n    config='configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py', # noqa\n    checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.4),\n)\ncenternet = dict(\n    config='configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py',\n    checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=29.5),\n)\ncentripetalnet = dict(\n    config='configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py',  # noqa\n    checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=44.7),\n)\nconvnext = dict(\n    config='configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py', # noqa\n    checkpoint='cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=51.8, segm_mAP=44.8),\n)\ncornernet = dict(\n    config='configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py',\n    checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=41.2),\n)\ndcn = dict(\n    config='configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py',\n    checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=41.3),\n)\ndcnv2 = dict(\n    config='configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py',\n    checkpoint='faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.7),\n)\nddod = dict(\n    config='configs/ddod/ddod_r50_fpn_1x_coco.py',\n    checkpoint='ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=41.7),\n)\ndeformable_detr = dict(\n    config='configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py',\n    checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=44.5),\n)\ndetectors = dict(\n    config='configs/detectors/detectors_htc-r50_1x_coco.py',\n    checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=49.1, segm_mAP=42.6),\n)\ndetr = dict(\n    config='configs/detr/detr_r50_8xb2-150e_coco.py',\n    checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.1),\n)\ndouble_heads = dict(\n    config='configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py',\n    checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.0),\n)\ndyhead = dict(\n    config='configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py',\n    checkpoint='atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=43.3),\n)\ndynamic_rcnn = dict(\n    config='configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py',\n    checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.9),\n)\nefficientnet = dict(\n    config='configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py',\n    checkpoint='retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.5),\n)\nempirical_attention = dict(\n    config='configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py',  # noqa\n    checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.0),\n)\nfaster_rcnn = dict(\n    config='configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',\n    checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.4),\n)\nfcos = dict(\n    config='configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py',  # noqa\n    checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.7),\n)\nfoveabox = dict(\n    config='configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py',\n    checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.9),\n)\nfpg = dict(\n    config='configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py',\n    checkpoint='mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=43.0, segm_mAP=38.1),\n)\nfree_anchor = dict(\n    config='configs/free_anchor/freeanchor_r50_fpn_1x_coco.py',\n    checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.7),\n)\nfsaf = dict(\n    config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',\n    checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.4),\n)\ngcnet = dict(\n    config='configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py',  # noqa\n    checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=40.4, segm_mAP=36.2),\n)\ngfl = dict(\n    config='configs/gfl/gfl_r50_fpn_1x_coco.py',\n    checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.2),\n)\nghm = dict(\n    config='configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py',\n    checkpoint='retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.0),\n)\ngn = dict(\n    config='configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py',\n    checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=40.1, segm_mAP=36.4),\n)\ngn_ws = dict(\n    config='configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py',\n    checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=39.7),\n)\ngrid_rcnn = dict(\n    config='configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py',\n    checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.4),\n)\ngroie = dict(\n    config='configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py',\n    checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.3),\n)\nguided_anchoring = dict(\n        config='configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py',  # noqa\n        checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',\n        url='https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth', # noqa\n        eval='bbox',\n        metric=dict(bbox_mAP=36.9),\n    )\nhrnet = dict(\n    config='configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py',\n    checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=36.9),\n)\nhtc = dict(\n    config='configs/htc/htc_r50_fpn_1x_coco.py',\n    checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=42.3, segm_mAP=37.4),\n)\ninstaboost = dict(\n    config='configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py',\n    checkpoint='mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=40.6, segm_mAP=36.6),\n)\nlibra_rcnn = dict(\n    config='configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py',\n    checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.3),\n)\nmask2former = dict(\n    config='configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py',\n    checkpoint='mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa\n    eval=['bbox', 'segm', 'PQ'],\n    metric=dict(PQ=51.9, bbox_mAP=44.8, segm_mAP=41.9),\n)\nmask_rcnn = dict(\n    config='configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',\n    checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=38.2, segm_mAP=34.7),\n)\nmaskformer = dict(\n    config='configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py',\n    checkpoint='maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa\n    eval='PQ',\n    metric=dict(PQ=46.9),\n)\nms_rcnn = dict(\n    config='configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py',\n    checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=38.2, segm_mAP=36.0),\n)\nnas_fcos = dict(\n    config='configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py',  # noqa\n    checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=39.4),\n)\nnas_fpn = dict(\n    config='configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py',\n    checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.5),\n)\npaa = dict(\n    config='configs/paa/paa_r50_fpn_1x_coco.py',\n    checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.4),\n)\npafpn = dict(\n    config='configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py',\n    checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.5),\n)\npanoptic_fpn = dict(\n    config='configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py',\n    checkpoint='panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth', # noqa\n    eval='PQ',\n    metric=dict(PQ=40.2),\n)\npisa = dict(\n    config='configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py',\n    checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=38.4),\n)\npoint_rend = dict(\n    config='configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py',\n    checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=38.4, segm_mAP=36.3),\n)\npvt = dict(\n    config='configs/pvt/retinanet_pvt-s_fpn_1x_coco.py',\n    checkpoint='retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=40.4),\n)\nqueryinst = dict(\n    config='configs/queryinst/queryinst_r50_fpn_1x_coco.py',\n    checkpoint='queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=42.0, segm_mAP=37.5),\n)\nregnet = dict(\n    config='configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py',\n    checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=40.4, segm_mAP=36.7),\n)\nreppoints = dict(\n    config='configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py',\n    checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.0),\n)\nres2net = dict(\n    config='configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py',\n    checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=43.0),\n)\nresnest = dict(\n    config='configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py',  # noqa\n    checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=42.0),\n)\nresnet_strikes_back = dict(\n    config='configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py', # noqa\n    checkpoint='mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=41.2, segm_mAP=38.2),\n)\nretinanet = dict(\n    config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',\n    checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=36.5),\n)\nrpn = dict(\n    config='configs/rpn/rpn_r50_fpn_1x_coco.py',\n    checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth', # noqa\n    eval='proposal_fast',\n    metric=dict(AR_1000=58.2),\n)\nsabl = [\n    dict(\n        config='configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py',\n        checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',\n        url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth', # noqa\n        eval='bbox',\n        metric=dict(bbox_mAP=37.7),\n    ),\n    dict(\n        config='configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py',\n        checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',\n        url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth', # noqa\n        eval='bbox',\n        metric=dict(bbox_mAP=39.9),\n    ),\n]\nscnet = dict(\n    config='configs/scnet/scnet_r50_fpn_1x_coco.py',\n    checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=43.5),\n)\nscratch = dict(\n    config='configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py',\n    checkpoint='scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=41.2, segm_mAP=37.4),\n)\nsolo = dict(\n    config='configs/solo/decoupled-solo_r50_fpn_1x_coco.py',\n    checkpoint='decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth', # noqa\n    eval='segm',\n    metric=dict(segm_mAP=33.9),\n)\nsolov2 = dict(\n    config='configs/solov2/solov2_r50_fpn_1x_coco.py',\n    checkpoint='solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth', # noqa\n    eval='segm',\n    metric=dict(segm_mAP=34.8),\n)\nsparse_rcnn = dict(\n    config='configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',\n    checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.9),\n)\nssd = [\n    dict(\n        config='configs/ssd/ssd300_coco.py',\n        checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',\n        url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth', # noqa\n        eval='bbox',\n        metric=dict(bbox_mAP=25.5),\n    ),\n    dict(\n        config='configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py',\n        checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',  # noqa\n        url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa\n        eval='bbox',\n        metric=dict(bbox_mAP=21.3),\n    ),\n]\nswin = dict(\n    config='configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py',\n    checkpoint='mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=42.7, segm_mAP=39.3),\n)\ntood = dict(\n    config='configs/tood/tood_r50_fpn_1x_coco.py',\n    checkpoint='tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=42.4),\n)\ntridentnet = dict(\n    config='configs/tridentnet/tridentnet_r50-caffe_1x_coco.py',\n    checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.6),\n)\nvfnet = dict(\n    config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',\n    checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=41.6),\n)\nyolact = dict(\n    config='configs/yolact/yolact_r50_1xb8-55e_coco.py',\n    checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth', # noqa\n    eval=['bbox', 'segm'],\n    metric=dict(bbox_mAP=31.2, segm_mAP=29.0),\n)\nyolo = dict(\n    config='configs/yolo/yolov3_d53_8xb8-320-273e_coco.py',\n    checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=27.9),\n)\nyolof = dict(\n    config='configs/yolof/yolof_r50-c5_8xb8-1x_coco.py',\n    checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',\n    url='https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=37.5),\n)\nyolox = dict(\n    config='configs/yolox/yolox_tiny_8xb8-300e_coco.py',\n    checkpoint='yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth',  # noqa\n    url='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa\n    eval='bbox',\n    metric=dict(bbox_mAP=31.8),\n)\n# yapf: enable\n"
  },
  {
    "path": ".dev_scripts/batch_train_list.txt",
    "content": "configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py\nconfigs/atss/atss_r50_fpn_1x_coco.py\nconfigs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py\nconfigs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py\nconfigs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py\nconfigs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py\nconfigs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py\nconfigs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py\nconfigs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py\nconfigs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py\nconfigs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py\nconfigs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py\nconfigs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py\nconfigs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py\nconfigs/ddod/ddod_r50_fpn_1x_coco.py\nconfigs/detectors/detectors_htc-r50_1x_coco.py\nconfigs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py\nconfigs/detr/detr_r50_8xb2-150e_coco.py\nconfigs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py\nconfigs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py\nconfigs/dyhead/atss_r50_fpn_dyhead_1x_coco.py\nconfigs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py\nconfigs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py\nconfigs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py\nconfigs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py\nconfigs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py\nconfigs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py\nconfigs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py\nconfigs/free_anchor/freeanchor_r50_fpn_1x_coco.py\nconfigs/fsaf/fsaf_r50_fpn_1x_coco.py\nconfigs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py\nconfigs/gfl/gfl_r50_fpn_1x_coco.py\nconfigs/ghm/retinanet_r50_fpn_ghm-1x_coco.py\nconfigs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py\nconfigs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py\nconfigs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py\nconfigs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py\nconfigs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py\nconfigs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py\nconfigs/htc/htc_r50_fpn_1x_coco.py\nconfigs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py\nconfigs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py\nconfigs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py\nconfigs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py\nconfigs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py\nconfigs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py\nconfigs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py\nconfigs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py\nconfigs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py\nconfigs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py\nconfigs/paa/paa_r50_fpn_1x_coco.py\nconfigs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py\nconfigs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py\nconfigs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py\nconfigs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py\nconfigs/pvt/retinanet_pvt-t_fpn_1x_coco.py\nconfigs/queryinst/queryinst_r50_fpn_1x_coco.py\nconfigs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py\nconfigs/reppoints/reppoints-moment_r50_fpn_1x_coco.py\nconfigs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py\nconfigs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py\nconfigs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py\nconfigs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py\nconfigs/rpn/rpn_r50_fpn_1x_coco.py\nconfigs/sabl/sabl-retinanet_r50_fpn_1x_coco.py\nconfigs/scnet/scnet_r50_fpn_1x_coco.py\nconfigs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py\nconfigs/solo/solo_r50_fpn_1x_coco.py\nconfigs/solov2/solov2_r50_fpn_1x_coco.py\nconfigs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py\nconfigs/ssd/ssd300_coco.py\nconfigs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py\nconfigs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py\nconfigs/tood/tood_r50_fpn_1x_coco.py\n'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py\nconfigs/vfnet/vfnet_r50_fpn_1x_coco.py\nconfigs/yolact/yolact_r50_8xb8-55e_coco.py\nconfigs/yolo/yolov3_d53_8xb8-320-273e_coco.py\nconfigs/yolof/yolof_r50-c5_8xb8-1x_coco.py\nconfigs/yolox/yolox_tiny_8xb8-300e_coco.py\n"
  },
  {
    "path": ".dev_scripts/benchmark_filter.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Filter configs to train')\n    parser.add_argument(\n        '--basic-arch',\n        action='store_true',\n        help='to train models in basic arch')\n    parser.add_argument(\n        '--datasets', action='store_true', help='to train models in dataset')\n    parser.add_argument(\n        '--data-pipeline',\n        action='store_true',\n        help='to train models related to data pipeline, e.g. augmentations')\n    parser.add_argument(\n        '--nn-module',\n        action='store_true',\n        help='to train models related to neural network modules')\n    parser.add_argument(\n        '--model-options',\n        nargs='+',\n        help='custom options to special model benchmark')\n    parser.add_argument(\n        '--out',\n        type=str,\n        default='batch_train_list.txt',\n        help='output path of gathered metrics to be stored')\n    args = parser.parse_args()\n    return args\n\n\nbasic_arch_root = [\n    'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',\n    'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',\n    'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',\n    'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',\n    'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',\n    'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',\n    'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'\n]\n\ndatasets_root = [\n    'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'\n]\n\ndata_pipeline_root = ['albu_example', 'instaboost']\n\nnn_module_root = [\n    'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',\n    'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'\n]\n\nbenchmark_pool = [\n    'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',\n    'configs/atss/atss_r50_fpn_1x_coco.py',\n    'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',\n    'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',\n    'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',\n    'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',\n    'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',\n    'configs/centripetalnet/'\n    'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',\n    'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',\n    'configs/cornernet/'\n    'cornernet_hourglass104_mstest_8x6_210e_coco.py',\n    'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',\n    'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',\n    'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',\n    'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',\n    'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',\n    'configs/detectors/detectors_htc_r50_1x_coco.py',\n    'configs/detr/detr_r50_8x2_150e_coco.py',\n    'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',\n    'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',\n    'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py',  # noqa\n    'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',\n    'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',\n    'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',\n    'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',\n    'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',\n    'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',\n    'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',\n    'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',\n    'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',\n    'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',\n    'configs/fsaf/fsaf_r50_fpn_1x_coco.py',\n    'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',\n    'configs/gfl/gfl_r50_fpn_1x_coco.py',\n    'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',\n    'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',\n    'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',\n    'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',\n    'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',\n    'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',\n    'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',\n    'configs/htc/htc_r50_fpn_1x_coco.py',\n    'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',\n    'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',\n    'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',\n    'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',\n    'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',\n    'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',\n    'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',\n    'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',\n    'configs/paa/paa_r50_fpn_1x_coco.py',\n    'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',\n    'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',\n    'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',\n    'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',\n    'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',\n    'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',\n    'configs/resnest/'\n    'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',\n    'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',\n    'configs/rpn/rpn_r50_fpn_1x_coco.py',\n    'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',\n    'configs/ssd/ssd300_coco.py',\n    'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',\n    'configs/vfnet/vfnet_r50_fpn_1x_coco.py',\n    'configs/yolact/yolact_r50_1x8_coco.py',\n    'configs/yolo/yolov3_d53_320_273e_coco.py',\n    'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',\n    'configs/scnet/scnet_r50_fpn_1x_coco.py',\n    'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',\n]\n\n\ndef main():\n    args = parse_args()\n\n    benchmark_type = []\n    if args.basic_arch:\n        benchmark_type += basic_arch_root\n    if args.datasets:\n        benchmark_type += datasets_root\n    if args.data_pipeline:\n        benchmark_type += data_pipeline_root\n    if args.nn_module:\n        benchmark_type += nn_module_root\n\n    special_model = args.model_options\n    if special_model is not None:\n        benchmark_type += special_model\n\n    config_dpath = 'configs/'\n    benchmark_configs = []\n    for cfg_root in benchmark_type:\n        cfg_dir = osp.join(config_dpath, cfg_root)\n        configs = os.scandir(cfg_dir)\n        for cfg in configs:\n            config_path = osp.join(cfg_dir, cfg.name)\n            if (config_path in benchmark_pool\n                    and config_path not in benchmark_configs):\n                benchmark_configs.append(config_path)\n\n    print(f'Totally found {len(benchmark_configs)} configs to benchmark')\n    with open(args.out, 'w') as f:\n        for config in benchmark_configs:\n            f.write(config + '\\n')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": ".dev_scripts/benchmark_full_models.txt",
    "content": "albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py\natss/atss_r50_fpn_1x_coco.py\nautoassign/autoassign_r50-caffe_fpn_1x_coco.py\nboxinst/boxinst_r50_fpn_ms-90k_coco.py\ncarafe/faster-rcnn_r50_fpn-carafe_1x_coco.py\ncascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py\ncascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py\ncascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py\ncenternet/centernet-update_r50-caffe_fpn_ms-1x_coco.py\ncentripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py\ncondinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py\nconditional_detr/conditional-detr_r50_8xb2-50e_coco.py\nconvnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py\ncornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py\ndab_detr/dab-detr_r50_8xb2-50e_coco.py\ndcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py\ndcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py\nddod/ddod_r50_fpn_1x_coco.py\ndeformable_detr/deformable-detr_r50_16xb2-50e_coco.py\ndetectors/detectors_htc-r50_1x_coco.py\ndetr/detr_r50_8xb2-150e_coco.py\ndino/dino-4scale_r50_8xb2-12e_coco.py\ndouble_heads/dh-faster-rcnn_r50_fpn_1x_coco.py\ndyhead/atss_r50_fpn_dyhead_1x_coco.py\ndynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py\nefficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py\nempirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py\nfaster_rcnn/faster-rcnn_r50_fpn_1x_coco.py\nfcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py\nfoveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py\nfpg/retinanet_r50_fpg_crop640_50e_coco.py\nfree_anchor/freeanchor_r50_fpn_1x_coco.py\nfsaf/fsaf_r50_fpn_1x_coco.py\ngcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py\ngfl/gfl_r50_fpn_1x_coco.py\nghm/retinanet_r50_fpn_ghm-1x_coco.py\ngn/mask-rcnn_r50_fpn_gn-all_2x_coco.py\ngn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py\ngrid_rcnn/grid-rcnn_r50_fpn_gn-head_1x_coco.py\ngroie/faste-rcnn_r50_fpn_groie_1x_coco.py\nguided_anchoring/ga-faster-rcnn_r50_fpn_1x_coco.py\nhrnet/htc_hrnetv2p-w18_20e_coco.py\nhtc/htc_r50_fpn_1x_coco.py\ninstaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py\nlad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py\nld/ld_r18-gflv1-r101_fpn_1x_coco.py\nlibra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py\nlvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py\nmask2former/mask2former_r50_8xb2-lsj-50e_coco.py\nmask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py\nmask_rcnn/mask-rcnn_r50_fpn_1x_coco.py\nmaskformer/maskformer_r50_ms-16xb1-75e_coco.py\nms_rcnn/ms-rcnn_r50_fpn_1x_coco.py\nnas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py\nnas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py\npaa/paa_r50_fpn_1x_coco.py\npafpn/faster-rcnn_r50_pafpn_1x_coco.py\npanoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py\npisa/faster-rcnn_r50_fpn_pisa_1x_coco.py\npoint_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py\npvt/retinanet_pvtv2-b0_fpn_1x_coco.py\nqueryinst/queryinst_r50_fpn_1x_coco.py\nregnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py\nreppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py\nres2net/faster-rcnn_res2net-101_fpn_2x_coco.py\nresnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py\nresnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py\nretinanet/retinanet_r50_fpn_1x_coco.py\nrpn/rpn_r50_fpn_1x_coco.py\nrtmdet/rtmdet_s_8xb32-300e_coco.py\nrtmdet/rtmdet-ins_s_8xb32-300e_coco.py\nsabl/sabl-retinanet_r50_fpn_1x_coco.py\nscnet/scnet_r50_fpn_1x_coco.py\nscratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py\nseesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py\nsimple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py\nsoft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py\nsolo/solo_r50_fpn_1x_coco.py\nsolov2/solov2_r50_fpn_1x_coco.py\nsparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py\nssd/ssd300_coco.py\nstrong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py\nswin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py\ntimm_example/retinanet_timm-tv-resnet50_fpn_1x_coco.py\ntood/tood_r50_fpn_1x_coco.py\ntridentnet/tridentnet_r50-caffe_1x_coco.py\nvfnet/vfnet_r50_fpn_1x_coco.py\nyolact/yolact_r50_8xb8-55e_coco.py\nyolo/yolov3_d53_8xb8-320-273e_coco.py\nyolof/yolof_r50-c5_8xb8-1x_coco.py\nyolox/yolox_s_8xb8-300e_coco.py\n"
  },
  {
    "path": ".dev_scripts/benchmark_inference_fps.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\nfrom mmengine.config import Config, DictAction\nfrom mmengine.dist import init_dist\nfrom mmengine.fileio import dump\nfrom mmengine.utils import mkdir_or_exist\nfrom terminaltables import GithubFlavoredMarkdownTable\n\nfrom tools.analysis_tools.benchmark import repeat_measure_inference_speed\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='MMDet benchmark a model of FPS')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('checkpoint_root', help='Checkpoint file root path')\n    parser.add_argument(\n        '--round-num',\n        type=int,\n        default=1,\n        help='round a number to a given precision in decimal digits')\n    parser.add_argument(\n        '--repeat-num',\n        type=int,\n        default=1,\n        help='number of repeat times of measurement for averaging the results')\n    parser.add_argument(\n        '--out', type=str, help='output path of gathered fps to be stored')\n    parser.add_argument(\n        '--max-iter', type=int, default=2000, help='num of max iter')\n    parser.add_argument(\n        '--log-interval', type=int, default=50, help='interval of logging')\n    parser.add_argument(\n        '--fuse-conv-bn',\n        action='store_true',\n        help='Whether to fuse conv and bn, this will slightly increase'\n        'the inference speed')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n    return args\n\n\ndef results2markdown(result_dict):\n    table_data = []\n    is_multiple_results = False\n    for cfg_name, value in result_dict.items():\n        name = cfg_name.replace('configs/', '')\n        fps = value['fps']\n        ms_times_pre_image = value['ms_times_pre_image']\n        if isinstance(fps, list):\n            is_multiple_results = True\n            mean_fps = value['mean_fps']\n            mean_times_pre_image = value['mean_times_pre_image']\n            fps_str = ','.join([str(s) for s in fps])\n            ms_times_pre_image_str = ','.join(\n                [str(s) for s in ms_times_pre_image])\n            table_data.append([\n                name, fps_str, mean_fps, ms_times_pre_image_str,\n                mean_times_pre_image\n            ])\n        else:\n            table_data.append([name, fps, ms_times_pre_image])\n\n    if is_multiple_results:\n        table_data.insert(0, [\n            'model', 'fps', 'mean_fps', 'times_pre_image(ms)',\n            'mean_times_pre_image(ms)'\n        ])\n\n    else:\n        table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])\n    table = GithubFlavoredMarkdownTable(table_data)\n    print(table.table, flush=True)\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    assert args.round_num >= 0\n    assert args.repeat_num >= 1\n\n    config = Config.fromfile(args.config)\n\n    if args.launcher == 'none':\n        raise NotImplementedError('Only supports distributed mode')\n    else:\n        init_dist(args.launcher)\n\n    result_dict = {}\n    for model_key in config:\n        model_infos = config[model_key]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            record_metrics = model_info['metric']\n            cfg_path = model_info['config'].strip()\n            cfg = Config.fromfile(cfg_path)\n            checkpoint = osp.join(args.checkpoint_root,\n                                  model_info['checkpoint'].strip())\n            try:\n                fps = repeat_measure_inference_speed(cfg, checkpoint,\n                                                     args.max_iter,\n                                                     args.log_interval,\n                                                     args.fuse_conv_bn,\n                                                     args.repeat_num)\n                if args.repeat_num > 1:\n                    fps_list = [round(fps_, args.round_num) for fps_ in fps]\n                    times_pre_image_list = [\n                        round(1000 / fps_, args.round_num) for fps_ in fps\n                    ]\n                    mean_fps = round(\n                        sum(fps_list) / len(fps_list), args.round_num)\n                    mean_times_pre_image = round(\n                        sum(times_pre_image_list) / len(times_pre_image_list),\n                        args.round_num)\n                    print(\n                        f'{cfg_path} '\n                        f'Overall fps: {fps_list}[{mean_fps}] img / s, '\n                        f'times per image: '\n                        f'{times_pre_image_list}[{mean_times_pre_image}] '\n                        f'ms / img',\n                        flush=True)\n                    result_dict[cfg_path] = dict(\n                        fps=fps_list,\n                        mean_fps=mean_fps,\n                        ms_times_pre_image=times_pre_image_list,\n                        mean_times_pre_image=mean_times_pre_image)\n                else:\n                    print(\n                        f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '\n                        f'times per image: {1000 / fps:.{args.round_num}f} '\n                        f'ms / img',\n                        flush=True)\n                    result_dict[cfg_path] = dict(\n                        fps=round(fps, args.round_num),\n                        ms_times_pre_image=round(1000 / fps, args.round_num))\n            except Exception as e:\n                print(f'{cfg_path} error: {repr(e)}')\n                if args.repeat_num > 1:\n                    result_dict[cfg_path] = dict(\n                        fps=[0],\n                        mean_fps=0,\n                        ms_times_pre_image=[0],\n                        mean_times_pre_image=0)\n                else:\n                    result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)\n\n    if args.out:\n        mkdir_or_exist(args.out)\n        dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))\n\n    results2markdown(result_dict)\n"
  },
  {
    "path": ".dev_scripts/benchmark_options.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nthird_part_libs = [\n    'pip install -r ../requirements/albu.txt',\n    'pip install instaboostfast',\n    'pip install git+https://github.com/cocodataset/panopticapi.git',\n    'pip install timm',\n    'pip install mmcls>=1.0.0rc0',\n    'pip install git+https://github.com/lvis-dataset/lvis-api.git',\n]\n\ndefault_floating_range = 0.5\nmodel_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}\n"
  },
  {
    "path": ".dev_scripts/benchmark_test.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport logging\nimport os\nimport os.path as osp\nfrom argparse import ArgumentParser\n\nfrom mmengine.config import Config, DictAction\nfrom mmengine.logging import MMLogger\nfrom mmengine.registry import RUNNERS\nfrom mmengine.runner import Runner\n\nfrom mmdet.testing import replace_to_ceph\nfrom mmdet.utils import register_all_modules, replace_cfg_vals\n\n\ndef parse_args():\n    parser = ArgumentParser()\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('checkpoint_root', help='Checkpoint file root path')\n    parser.add_argument('--work-dir', help='the dir to save logs')\n    parser.add_argument('--ceph', action='store_true')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n    args = parser.parse_args()\n    return args\n\n\n# TODO: Need to refactor test.py so that it can be reused.\ndef fast_test_model(config_name, checkpoint, args, logger=None):\n    cfg = Config.fromfile(config_name)\n    cfg = replace_cfg_vals(cfg)\n    cfg.launcher = args.launcher\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # work_dir is determined in this priority: CLI > segment in file > filename\n    if args.work_dir is not None:\n        # update configs according to CLI args if args.work_dir is not None\n        cfg.work_dir = osp.join(args.work_dir,\n                                osp.splitext(osp.basename(config_name))[0])\n    elif cfg.get('work_dir', None) is None:\n        # use config filename as default work_dir if cfg.work_dir is None\n        cfg.work_dir = osp.join('./work_dirs',\n                                osp.splitext(osp.basename(config_name))[0])\n\n    if args.ceph:\n        replace_to_ceph(cfg)\n\n    cfg.load_from = checkpoint\n\n    # TODO: temporary plan\n    if 'visualizer' in cfg:\n        if 'name' in cfg.visualizer:\n            del cfg.visualizer.name\n\n    # build the runner from config\n    if 'runner_type' not in cfg:\n        # build the default runner\n        runner = Runner.from_cfg(cfg)\n    else:\n        # build customized runner from the registry\n        # if 'runner_type' is set in the cfg\n        runner = RUNNERS.build(cfg)\n\n    runner.test()\n\n\n# Sample test whether the inference code is correct\ndef main(args):\n    # register all modules in mmdet into the registries\n    register_all_modules(init_default_scope=False)\n\n    config = Config.fromfile(args.config)\n\n    # test all model\n    logger = MMLogger.get_instance(\n        name='MMLogger',\n        log_file='benchmark_test.log',\n        log_level=logging.ERROR)\n\n    for model_key in config:\n        model_infos = config[model_key]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            print('processing: ', model_info['config'], flush=True)\n            config_name = model_info['config'].strip()\n            checkpoint = osp.join(args.checkpoint_root,\n                                  model_info['checkpoint'].strip())\n            try:\n                fast_test_model(config_name, checkpoint, args, logger)\n            except Exception as e:\n                logger.error(f'{config_name} \" : {repr(e)}')\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    main(args)\n"
  },
  {
    "path": ".dev_scripts/benchmark_test_image.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport logging\nimport os.path as osp\nfrom argparse import ArgumentParser\n\nimport mmcv\nfrom mmengine.config import Config\nfrom mmengine.logging import MMLogger\nfrom mmengine.utils import mkdir_or_exist\n\nfrom mmdet.apis import inference_detector, init_detector\nfrom mmdet.registry import VISUALIZERS\nfrom mmdet.utils import register_all_modules\n\n\ndef parse_args():\n    parser = ArgumentParser()\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('checkpoint_root', help='Checkpoint file root path')\n    parser.add_argument('--img', default='demo/demo.jpg', help='Image file')\n    parser.add_argument('--aug', action='store_true', help='aug test')\n    parser.add_argument('--model-name', help='model name to inference')\n    parser.add_argument('--show', action='store_true', help='show results')\n    parser.add_argument('--out-dir', default=None, help='Dir to output file')\n    parser.add_argument(\n        '--wait-time',\n        type=float,\n        default=1,\n        help='the interval of show (s), 0 is block')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for inference')\n    parser.add_argument(\n        '--palette',\n        default='coco',\n        choices=['coco', 'voc', 'citys', 'random'],\n        help='Color palette used for visualization')\n    parser.add_argument(\n        '--score-thr', type=float, default=0.3, help='bbox score threshold')\n    args = parser.parse_args()\n    return args\n\n\ndef inference_model(config_name, checkpoint, visualizer, args, logger=None):\n    cfg = Config.fromfile(config_name)\n    if args.aug:\n        raise NotImplementedError()\n\n    model = init_detector(\n        cfg, checkpoint, palette=args.palette, device=args.device)\n    visualizer.dataset_meta = model.dataset_meta\n\n    # test a single image\n    result = inference_detector(model, args.img)\n\n    # show the results\n    if args.show or args.out_dir is not None:\n        img = mmcv.imread(args.img)\n        img = mmcv.imconvert(img, 'bgr', 'rgb')\n        out_file = None\n        if args.out_dir is not None:\n            out_dir = args.out_dir\n            mkdir_or_exist(out_dir)\n\n            out_file = osp.join(\n                out_dir,\n                config_name.split('/')[-1].replace('py', 'jpg'))\n\n        visualizer.add_datasample(\n            'result',\n            img,\n            data_sample=result,\n            draw_gt=False,\n            show=args.show,\n            wait_time=args.wait_time,\n            out_file=out_file,\n            pred_score_thr=args.score_thr)\n\n    return result\n\n\n# Sample test whether the inference code is correct\ndef main(args):\n    # register all modules in mmdet into the registries\n    register_all_modules()\n\n    config = Config.fromfile(args.config)\n\n    # init visualizer\n    visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')\n    visualizer = VISUALIZERS.build(visualizer_cfg)\n\n    # test single model\n    if args.model_name:\n        if args.model_name in config:\n            model_infos = config[args.model_name]\n            if not isinstance(model_infos, list):\n                model_infos = [model_infos]\n            model_info = model_infos[0]\n            config_name = model_info['config'].strip()\n            print(f'processing: {config_name}', flush=True)\n            checkpoint = osp.join(args.checkpoint_root,\n                                  model_info['checkpoint'].strip())\n            # build the model from a config file and a checkpoint file\n            inference_model(config_name, checkpoint, visualizer, args)\n            return\n        else:\n            raise RuntimeError('model name input error.')\n\n    # test all model\n    logger = MMLogger.get_instance(\n        name='MMLogger',\n        log_file='benchmark_test_image.log',\n        log_level=logging.ERROR)\n\n    for model_key in config:\n        model_infos = config[model_key]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            print('processing: ', model_info['config'], flush=True)\n            config_name = model_info['config'].strip()\n            checkpoint = osp.join(args.checkpoint_root,\n                                  model_info['checkpoint'].strip())\n            try:\n                # build the model from a config file and a checkpoint file\n                inference_model(config_name, checkpoint, visualizer, args,\n                                logger)\n            except Exception as e:\n                logger.error(f'{config_name} \" : {repr(e)}')\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    main(args)\n"
  },
  {
    "path": ".dev_scripts/benchmark_train.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport logging\nimport os\nimport os.path as osp\nfrom argparse import ArgumentParser\n\nfrom mmengine.config import Config, DictAction\nfrom mmengine.logging import MMLogger, print_log\nfrom mmengine.registry import RUNNERS\nfrom mmengine.runner import Runner\n\nfrom mmdet.testing import replace_to_ceph\nfrom mmdet.utils import register_all_modules, replace_cfg_vals\n\n\ndef parse_args():\n    parser = ArgumentParser()\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('--work-dir', help='the dir to save logs and models')\n    parser.add_argument('--ceph', action='store_true')\n    parser.add_argument('--save-ckpt', action='store_true')\n    parser.add_argument(\n        '--amp',\n        action='store_true',\n        default=False,\n        help='enable automatic-mixed-precision training')\n    parser.add_argument(\n        '--auto-scale-lr',\n        action='store_true',\n        help='enable automatically scaling LR.')\n    parser.add_argument(\n        '--resume',\n        action='store_true',\n        help='resume from the latest checkpoint in the work_dir automatically')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n    args = parser.parse_args()\n    return args\n\n\n# TODO: Need to refactor train.py so that it can be reused.\ndef fast_train_model(config_name, args, logger=None):\n    cfg = Config.fromfile(config_name)\n    cfg = replace_cfg_vals(cfg)\n    cfg.launcher = args.launcher\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # work_dir is determined in this priority: CLI > segment in file > filename\n    if args.work_dir is not None:\n        # update configs according to CLI args if args.work_dir is not None\n        cfg.work_dir = osp.join(args.work_dir,\n                                osp.splitext(osp.basename(config_name))[0])\n    elif cfg.get('work_dir', None) is None:\n        # use config filename as default work_dir if cfg.work_dir is None\n        cfg.work_dir = osp.join('./work_dirs',\n                                osp.splitext(osp.basename(config_name))[0])\n\n    ckpt_hook = cfg.default_hooks.checkpoint\n    by_epoch = ckpt_hook.get('by_epoch', True)\n    fast_stop_hook = dict(type='FastStopTrainingHook')\n    fast_stop_hook['by_epoch'] = by_epoch\n    if args.save_ckpt:\n        if by_epoch:\n            interval = 1\n            stop_iter_or_epoch = 2\n        else:\n            interval = 4\n            stop_iter_or_epoch = 10\n        fast_stop_hook['stop_iter_or_epoch'] = stop_iter_or_epoch\n        fast_stop_hook['save_ckpt'] = True\n        ckpt_hook.interval = interval\n\n    if 'custom_hooks' in cfg:\n        cfg.custom_hooks.append(fast_stop_hook)\n    else:\n        custom_hooks = [fast_stop_hook]\n        cfg.custom_hooks = custom_hooks\n\n    # TODO: temporary plan\n    if 'visualizer' in cfg:\n        if 'name' in cfg.visualizer:\n            del cfg.visualizer.name\n\n    # enable automatic-mixed-precision training\n    if args.amp is True:\n        optim_wrapper = cfg.optim_wrapper.type\n        if optim_wrapper == 'AmpOptimWrapper':\n            print_log(\n                'AMP training is already enabled in your config.',\n                logger='current',\n                level=logging.WARNING)\n        else:\n            assert optim_wrapper == 'OptimWrapper', (\n                '`--amp` is only supported when the optimizer wrapper type is '\n                f'`OptimWrapper` but got {optim_wrapper}.')\n            cfg.optim_wrapper.type = 'AmpOptimWrapper'\n            cfg.optim_wrapper.loss_scale = 'dynamic'\n\n    # enable automatically scaling LR\n    if args.auto_scale_lr:\n        if 'auto_scale_lr' in cfg and \\\n                'enable' in cfg.auto_scale_lr and \\\n                'base_batch_size' in cfg.auto_scale_lr:\n            cfg.auto_scale_lr.enable = True\n        else:\n            raise RuntimeError('Can not find \"auto_scale_lr\" or '\n                               '\"auto_scale_lr.enable\" or '\n                               '\"auto_scale_lr.base_batch_size\" in your'\n                               ' configuration file.')\n\n    if args.ceph:\n        replace_to_ceph(cfg)\n\n    cfg.resume = args.resume\n\n    # build the runner from config\n    if 'runner_type' not in cfg:\n        # build the default runner\n        runner = Runner.from_cfg(cfg)\n    else:\n        # build customized runner from the registry\n        # if 'runner_type' is set in the cfg\n        runner = RUNNERS.build(cfg)\n\n    runner.train()\n\n\n# Sample test whether the train code is correct\ndef main(args):\n    # register all modules in mmdet into the registries\n    register_all_modules(init_default_scope=False)\n\n    config = Config.fromfile(args.config)\n\n    # test all model\n    logger = MMLogger.get_instance(\n        name='MMLogger',\n        log_file='benchmark_train.log',\n        log_level=logging.ERROR)\n\n    for model_key in config:\n        model_infos = config[model_key]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            print('processing: ', model_info['config'], flush=True)\n            config_name = model_info['config'].strip()\n            try:\n                fast_train_model(config_name, args, logger)\n            except RuntimeError as e:\n                # quick exit is the normal exit message\n                if 'quick exit' not in repr(e):\n                    logger.error(f'{config_name} \" : {repr(e)}')\n            except Exception as e:\n                logger.error(f'{config_name} \" : {repr(e)}')\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    main(args)\n"
  },
  {
    "path": ".dev_scripts/benchmark_train_models.txt",
    "content": "atss/atss_r50_fpn_1x_coco.py\nfaster_rcnn/faster-rcnn_r50_fpn_1x_coco.py\nmask_rcnn/mask-rcnn_r50_fpn_1x_coco.py\ncascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py\npanoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py\nretinanet/retinanet_r50_fpn_1x_coco.py\nrtmdet/rtmdet_s_8xb32-300e_coco.py\nrtmdet/rtmdet-ins_s_8xb32-300e_coco.py\ndeformable_detr/deformable-detr_r50_16xb2-50e_coco.py\nfcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py\ncenternet/centernet-update_r50-caffe_fpn_ms-1x_coco.py\ndino/dino-4scale_r50_8xb2-12e_coco.py\nhtc/htc_r50_fpn_1x_coco.py\nmask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py\nswin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py\ncondinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py\nlvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py\nconvnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py\n"
  },
  {
    "path": ".dev_scripts/benchmark_valid_flops.py",
    "content": "import logging\nimport re\nimport tempfile\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\nfrom functools import partial\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom mmengine import Config, DictAction\nfrom mmengine.analysis import get_model_complexity_info\nfrom mmengine.analysis.print_helper import _format_size\nfrom mmengine.fileio import FileClient\nfrom mmengine.logging import MMLogger\nfrom mmengine.model import revert_sync_batchnorm\nfrom mmengine.runner import Runner\nfrom modelindex.load_model_index import load\nfrom rich.console import Console\nfrom rich.table import Table\nfrom rich.text import Text\nfrom tqdm import tqdm\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import register_all_modules\n\nconsole = Console()\nMMDET_ROOT = Path(__file__).absolute().parents[1]\n\n\ndef parse_args():\n    parser = ArgumentParser(description='Valid all models in model-index.yml')\n    parser.add_argument(\n        '--shape',\n        type=int,\n        nargs='+',\n        default=[1280, 800],\n        help='input image size')\n    parser.add_argument(\n        '--checkpoint_root',\n        help='Checkpoint file root path. If set, load checkpoint before test.')\n    parser.add_argument('--img', default='demo/demo.jpg', help='Image file')\n    parser.add_argument('--models', nargs='+', help='models name to inference')\n    parser.add_argument(\n        '--batch-size',\n        type=int,\n        default=1,\n        help='The batch size during the inference.')\n    parser.add_argument(\n        '--flops', action='store_true', help='Get Flops and Params of models')\n    parser.add_argument(\n        '--flops-str',\n        action='store_true',\n        help='Output FLOPs and params counts in a string form.')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--size_divisor',\n        type=int,\n        default=32,\n        help='Pad the input image, the minimum size that is divisible '\n        'by size_divisor, -1 means do not pad the image.')\n    args = parser.parse_args()\n    return args\n\n\ndef inference(config_file, checkpoint, work_dir, args, exp_name):\n    logger = MMLogger.get_instance(name='MMLogger')\n    logger.warning('if you want test flops, please make sure torch>=1.12')\n    cfg = Config.fromfile(config_file)\n    cfg.work_dir = work_dir\n    cfg.load_from = checkpoint\n    cfg.log_level = 'WARN'\n    cfg.experiment_name = exp_name\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # forward the model\n    result = {'model': config_file.stem}\n\n    if args.flops:\n\n        if len(args.shape) == 1:\n            h = w = args.shape[0]\n        elif len(args.shape) == 2:\n            h, w = args.shape\n        else:\n            raise ValueError('invalid input shape')\n        divisor = args.size_divisor\n        if divisor > 0:\n            h = int(np.ceil(h / divisor)) * divisor\n            w = int(np.ceil(w / divisor)) * divisor\n\n        input_shape = (3, h, w)\n        result['resolution'] = input_shape\n\n        try:\n            cfg = Config.fromfile(config_file)\n            if hasattr(cfg, 'head_norm_cfg'):\n                cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)\n                cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(\n                    type='SyncBN', requires_grad=True)\n                cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(\n                    type='SyncBN', requires_grad=True)\n\n            if args.cfg_options is not None:\n                cfg.merge_from_dict(args.cfg_options)\n\n            model = MODELS.build(cfg.model)\n            input = torch.rand(1, *input_shape)\n            if torch.cuda.is_available():\n                model.cuda()\n                input = input.cuda()\n            model = revert_sync_batchnorm(model)\n            inputs = (input, )\n            model.eval()\n            outputs = get_model_complexity_info(\n                model, input_shape, inputs, show_table=False, show_arch=False)\n            flops = outputs['flops']\n            params = outputs['params']\n            activations = outputs['activations']\n            result['Get Types'] = 'direct'\n        except:  # noqa 772\n            logger = MMLogger.get_instance(name='MMLogger')\n            logger.warning(\n                'Direct get flops failed, try to get flops with data')\n            cfg = Config.fromfile(config_file)\n            if hasattr(cfg, 'head_norm_cfg'):\n                cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)\n                cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(\n                    type='SyncBN', requires_grad=True)\n                cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(\n                    type='SyncBN', requires_grad=True)\n            data_loader = Runner.build_dataloader(cfg.val_dataloader)\n            data_batch = next(iter(data_loader))\n            model = MODELS.build(cfg.model)\n            if torch.cuda.is_available():\n                model = model.cuda()\n            model = revert_sync_batchnorm(model)\n            model.eval()\n            _forward = model.forward\n            data = model.data_preprocessor(data_batch)\n            del data_loader\n            model.forward = partial(\n                _forward, data_samples=data['data_samples'])\n            outputs = get_model_complexity_info(\n                model,\n                input_shape,\n                data['inputs'],\n                show_table=False,\n                show_arch=False)\n            flops = outputs['flops']\n            params = outputs['params']\n            activations = outputs['activations']\n            result['Get Types'] = 'dataloader'\n\n        if args.flops_str:\n            flops = _format_size(flops)\n            params = _format_size(params)\n            activations = _format_size(activations)\n\n        result['flops'] = flops\n        result['params'] = params\n\n    return result\n\n\ndef show_summary(summary_data, args):\n    table = Table(title='Validation Benchmark Regression Summary')\n    table.add_column('Model')\n    table.add_column('Validation')\n    table.add_column('Resolution (c, h, w)')\n    if args.flops:\n        table.add_column('Flops', justify='right', width=11)\n        table.add_column('Params', justify='right')\n\n    for model_name, summary in summary_data.items():\n        row = [model_name]\n        valid = summary['valid']\n        color = 'green' if valid == 'PASS' else 'red'\n        row.append(f'[{color}]{valid}[/{color}]')\n        if valid == 'PASS':\n            row.append(str(summary['resolution']))\n            if args.flops:\n                row.append(str(summary['flops']))\n                row.append(str(summary['params']))\n        table.add_row(*row)\n\n    console.print(table)\n    table_data = {\n        x.header: [Text.from_markup(y).plain for y in x.cells]\n        for x in table.columns\n    }\n    table_pd = pd.DataFrame(table_data)\n    table_pd.to_csv('./mmdetection_flops.csv')\n\n\n# Sample test whether the inference code is correct\ndef main(args):\n    register_all_modules()\n    model_index_file = MMDET_ROOT / 'model-index.yml'\n    model_index = load(str(model_index_file))\n    model_index.build_models_with_collections()\n    models = OrderedDict({model.name: model for model in model_index.models})\n\n    logger = MMLogger(\n        'validation',\n        logger_name='validation',\n        log_file='benchmark_test_image.log',\n        log_level=logging.INFO)\n\n    if args.models:\n        patterns = [\n            re.compile(pattern.replace('+', '_')) for pattern in args.models\n        ]\n        filter_models = {}\n        for k, v in models.items():\n            k = k.replace('+', '_')\n            if any([re.match(pattern, k) for pattern in patterns]):\n                filter_models[k] = v\n        if len(filter_models) == 0:\n            print('No model found, please specify models in:')\n            print('\\n'.join(models.keys()))\n            return\n        models = filter_models\n\n    summary_data = {}\n    tmpdir = tempfile.TemporaryDirectory()\n    for model_name, model_info in tqdm(models.items()):\n\n        if model_info.config is None:\n            continue\n\n        model_info.config = model_info.config.replace('%2B', '+')\n        config = Path(model_info.config)\n\n        try:\n            config.exists()\n        except:  # noqa 722\n            logger.error(f'{model_name}: {config} not found.')\n            continue\n\n        logger.info(f'Processing: {model_name}')\n\n        http_prefix = 'https://download.openmmlab.com/mmdetection/'\n        if args.checkpoint_root is not None:\n            root = args.checkpoint_root\n            if 's3://' in args.checkpoint_root:\n                from petrel_client.common.exception import AccessDeniedError\n                file_client = FileClient.infer_client(uri=root)\n                checkpoint = file_client.join_path(\n                    root, model_info.weights[len(http_prefix):])\n                try:\n                    exists = file_client.exists(checkpoint)\n                except AccessDeniedError:\n                    exists = False\n            else:\n                checkpoint = Path(root) / model_info.weights[len(http_prefix):]\n                exists = checkpoint.exists()\n            if exists:\n                checkpoint = str(checkpoint)\n            else:\n                print(f'WARNING: {model_name}: {checkpoint} not found.')\n                checkpoint = None\n        else:\n            checkpoint = None\n\n        try:\n            # build the model from a config file and a checkpoint file\n            result = inference(MMDET_ROOT / config, checkpoint, tmpdir.name,\n                               args, model_name)\n            result['valid'] = 'PASS'\n        except Exception:  # noqa 722\n            import traceback\n            logger.error(f'\"{config}\" :\\n{traceback.format_exc()}')\n            result = {'valid': 'FAIL'}\n\n        summary_data[model_name] = result\n\n    tmpdir.cleanup()\n    show_summary(summary_data, args)\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    main(args)\n"
  },
  {
    "path": ".dev_scripts/check_links.py",
    "content": "# Modified from:\n# https://github.com/allenai/allennlp/blob/main/scripts/check_links.py\n\nimport argparse\nimport logging\nimport os\nimport pathlib\nimport re\nimport sys\nfrom multiprocessing.dummy import Pool\nfrom typing import NamedTuple, Optional, Tuple\n\nimport requests\nfrom mmengine.logging import MMLogger\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Goes through all the inline-links '\n        'in markdown files and reports the breakages')\n    parser.add_argument(\n        '--num-threads',\n        type=int,\n        default=100,\n        help='Number of processes to confirm the link')\n    parser.add_argument('--https-proxy', type=str, help='https proxy')\n    parser.add_argument(\n        '--out',\n        type=str,\n        default='link_reports.txt',\n        help='output path of reports')\n    args = parser.parse_args()\n    return args\n\n\nOK_STATUS_CODES = (\n    200,\n    401,  # the resource exists but may require some sort of login.\n    403,  # ^ same\n    405,  # HEAD method not allowed.\n    # the resource exists, but our default 'Accept-' header may not\n    # match what the server can provide.\n    406,\n)\n\n\nclass MatchTuple(NamedTuple):\n    source: str\n    name: str\n    link: str\n\n\ndef check_link(\n        match_tuple: MatchTuple,\n        http_session: requests.Session,\n        logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:\n    reason: Optional[str] = None\n    if match_tuple.link.startswith('http'):\n        result_ok, reason = check_url(match_tuple, http_session)\n    else:\n        result_ok = check_path(match_tuple)\n    if logger is None:\n        print(f\"  {'✓' if result_ok else '✗'} {match_tuple.link}\")\n    else:\n        logger.info(f\"  {'✓' if result_ok else '✗'} {match_tuple.link}\")\n    return match_tuple, result_ok, reason\n\n\ndef check_url(match_tuple: MatchTuple,\n              http_session: requests.Session) -> Tuple[bool, str]:\n    \"\"\"Check if a URL is reachable.\"\"\"\n    try:\n        result = http_session.head(\n            match_tuple.link, timeout=5, allow_redirects=True)\n        return (\n            result.ok or result.status_code in OK_STATUS_CODES,\n            f'status code = {result.status_code}',\n        )\n    except (requests.ConnectionError, requests.Timeout):\n        return False, 'connection error'\n\n\ndef check_path(match_tuple: MatchTuple) -> bool:\n    \"\"\"Check if a file in this repository exists.\"\"\"\n    relative_path = match_tuple.link.split('#')[0]\n    full_path = os.path.join(\n        os.path.dirname(str(match_tuple.source)), relative_path)\n    return os.path.exists(full_path)\n\n\ndef main():\n    args = parse_args()\n\n    # setup logger\n    logger = MMLogger.get_instance(name='mmdet', log_file=args.out)\n\n    # setup https_proxy\n    if args.https_proxy:\n        os.environ['https_proxy'] = args.https_proxy\n\n    # setup http_session\n    http_session = requests.Session()\n    for resource_prefix in ('http://', 'https://'):\n        http_session.mount(\n            resource_prefix,\n            requests.adapters.HTTPAdapter(\n                max_retries=5,\n                pool_connections=20,\n                pool_maxsize=args.num_threads),\n        )\n\n    logger.info('Finding all markdown files in the current directory...')\n\n    project_root = (pathlib.Path(__file__).parent / '..').resolve()\n    markdown_files = project_root.glob('**/*.md')\n\n    all_matches = set()\n    url_regex = re.compile(r'\\[([^!][^\\]]+)\\]\\(([^)(]+)\\)')\n    for markdown_file in markdown_files:\n        with open(markdown_file) as handle:\n            for line in handle.readlines():\n                matches = url_regex.findall(line)\n                for name, link in matches:\n                    if 'localhost' not in link:\n                        all_matches.add(\n                            MatchTuple(\n                                source=str(markdown_file),\n                                name=name,\n                                link=link))\n\n    logger.info(f'  {len(all_matches)} markdown files found')\n    logger.info('Checking to make sure we can retrieve each link...')\n\n    with Pool(processes=args.num_threads) as pool:\n        results = pool.starmap(check_link, [(match, http_session, logger)\n                                            for match in list(all_matches)])\n\n    # collect unreachable results\n    unreachable_results = [(match_tuple, reason)\n                           for match_tuple, success, reason in results\n                           if not success]\n\n    if unreachable_results:\n        logger.info('================================================')\n        logger.info(f'Unreachable links ({len(unreachable_results)}):')\n        for match_tuple, reason in unreachable_results:\n            logger.info('  > Source: ' + match_tuple.source)\n            logger.info('    Name: ' + match_tuple.name)\n            logger.info('    Link: ' + match_tuple.link)\n            if reason is not None:\n                logger.info('    Reason: ' + reason)\n        sys.exit(1)\n    logger.info('No Unreachable link found.')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": ".dev_scripts/convert_test_benchmark_script.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\nfrom mmengine import Config\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Convert benchmark model list to script')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('--port', type=int, default=29666, help='dist port')\n    parser.add_argument(\n        '--run', action='store_true', help='run script directly')\n    parser.add_argument(\n        '--out', type=str, help='path to save model benchmark script')\n\n    args = parser.parse_args()\n    return args\n\n\ndef process_model_info(model_info, work_dir):\n    config = model_info['config'].strip()\n    fname, _ = osp.splitext(osp.basename(config))\n    job_name = fname\n    work_dir = '$WORK_DIR/' + fname\n    checkpoint = model_info['checkpoint'].strip()\n    return dict(\n        config=config,\n        job_name=job_name,\n        work_dir=work_dir,\n        checkpoint=checkpoint)\n\n\ndef create_test_bash_info(commands, model_test_dict, port, script_name,\n                          partition):\n    config = model_test_dict['config']\n    job_name = model_test_dict['job_name']\n    checkpoint = model_test_dict['checkpoint']\n    work_dir = model_test_dict['work_dir']\n\n    echo_info = f' \\necho \\'{config}\\' &'\n    commands.append(echo_info)\n    commands.append('\\n')\n\n    command_info = f'GPUS=8  GPUS_PER_NODE=8  ' \\\n                   f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '\n\n    command_info += f'{partition} '\n    command_info += f'{job_name} '\n    command_info += f'{config} '\n    command_info += f'$CHECKPOINT_DIR/{checkpoint} '\n    command_info += f'--work-dir {work_dir} '\n\n    command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '\n    command_info += ' &'\n\n    commands.append(command_info)\n\n\ndef main():\n    args = parse_args()\n    if args.out:\n        out_suffix = args.out.split('.')[-1]\n        assert args.out.endswith('.sh'), \\\n            f'Expected out file path suffix is .sh, but get .{out_suffix}'\n    assert args.out or args.run, \\\n        ('Please specify at least one operation (save/run/ the '\n         'script) with the argument \"--out\" or \"--run\"')\n\n    commands = []\n    partition_name = 'PARTITION=$1 '\n    commands.append(partition_name)\n    commands.append('\\n')\n\n    checkpoint_root = 'CHECKPOINT_DIR=$2 '\n    commands.append(checkpoint_root)\n    commands.append('\\n')\n\n    work_dir = 'WORK_DIR=$3 '\n    commands.append(work_dir)\n    commands.append('\\n')\n\n    cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '\n    commands.append(cpus_pre_task)\n    commands.append('\\n')\n\n    script_name = osp.join('tools', 'slurm_test.sh')\n    port = args.port\n\n    cfg = Config.fromfile(args.config)\n\n    for model_key in cfg:\n        model_infos = cfg[model_key]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            print('processing: ', model_info['config'])\n            model_test_dict = process_model_info(model_info, work_dir)\n            create_test_bash_info(commands, model_test_dict, port, script_name,\n                                  '$PARTITION')\n            port += 1\n\n    command_str = ''.join(commands)\n    if args.out:\n        with open(args.out, 'w') as f:\n            f.write(command_str)\n    if args.run:\n        os.system(command_str)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": ".dev_scripts/convert_train_benchmark_script.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Convert benchmark model json to script')\n    parser.add_argument(\n        'txt_path', type=str, help='txt path output by benchmark_filter')\n    parser.add_argument(\n        '--run', action='store_true', help='run script directly')\n    parser.add_argument(\n        '--out', type=str, help='path to save model benchmark script')\n\n    args = parser.parse_args()\n    return args\n\n\ndef determine_gpus(cfg_name):\n    gpus = 8\n    gpus_pre_node = 8\n\n    if cfg_name.find('16x') >= 0:\n        gpus = 16\n    elif cfg_name.find('4xb4') >= 0:\n        gpus = 4\n        gpus_pre_node = 4\n    elif 'lad' in cfg_name:\n        gpus = 2\n        gpus_pre_node = 2\n\n    return gpus, gpus_pre_node\n\n\ndef main():\n    args = parse_args()\n    if args.out:\n        out_suffix = args.out.split('.')[-1]\n        assert args.out.endswith('.sh'), \\\n            f'Expected out file path suffix is .sh, but get .{out_suffix}'\n    assert args.out or args.run, \\\n        ('Please specify at least one operation (save/run/ the '\n         'script) with the argument \"--out\" or \"--run\"')\n\n    root_name = './tools'\n    train_script_name = osp.join(root_name, 'slurm_train.sh')\n\n    commands = []\n    partition_name = 'PARTITION=$1 '\n    commands.append(partition_name)\n    commands.append('\\n')\n\n    work_dir = 'WORK_DIR=$2 '\n    commands.append(work_dir)\n    commands.append('\\n')\n\n    cpus_pre_task = 'CPUS_PER_TASK=${3:-4} '\n    commands.append(cpus_pre_task)\n    commands.append('\\n')\n    commands.append('\\n')\n\n    with open(args.txt_path, 'r') as f:\n        model_cfgs = f.readlines()\n        for i, cfg in enumerate(model_cfgs):\n            cfg = cfg.strip()\n            if len(cfg) == 0:\n                continue\n            # print cfg name\n            echo_info = f'echo \\'{cfg}\\' &'\n            commands.append(echo_info)\n            commands.append('\\n')\n\n            fname, _ = osp.splitext(osp.basename(cfg))\n            out_fname = '$WORK_DIR/' + fname\n\n            gpus, gpus_pre_node = determine_gpus(cfg)\n            command_info = f'GPUS={gpus}  GPUS_PER_NODE={gpus_pre_node}  ' \\\n                           f'CPUS_PER_TASK=$CPUS_PRE_TASK {train_script_name} '\n            command_info += '$PARTITION '\n            command_info += f'{fname} '\n            command_info += f'{cfg} '\n            command_info += f'{out_fname} '\n\n            command_info += '--cfg-options default_hooks.checkpoint.' \\\n                            'max_keep_ckpts=1 '\n            command_info += '&'\n\n            commands.append(command_info)\n\n            if i < len(model_cfgs):\n                commands.append('\\n')\n\n        command_str = ''.join(commands)\n        if args.out:\n            with open(args.out, 'w') as f:\n                f.write(command_str)\n        if args.run:\n            os.system(command_str)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": ".dev_scripts/covignore.cfg",
    "content": "# Each line should be the relative path to the root directory\n# of this repo. Support regular expression as well.\n# For example:\n\n.*/__init__.py\n"
  },
  {
    "path": ".dev_scripts/diff_coverage_test.sh",
    "content": "#!/bin/bash\n\nreadarray -t IGNORED_FILES < $( dirname \"$0\" )/covignore.cfg\nREUSE_COVERAGE_REPORT=${REUSE_COVERAGE_REPORT:-0}\nREPO=${1:-\"origin\"}\nBRANCH=${2:-\"refactor_dev\"}\n\ngit fetch $REPO $BRANCH\n\nPY_FILES=\"\"\nfor FILE_NAME in $(git diff --name-only ${REPO}/${BRANCH}); do\n    # Only test python files in mmdet/ existing in current branch, and not ignored in covignore.cfg\n    if [ ${FILE_NAME: -3} == \".py\" ] && [ ${FILE_NAME:0:6} == \"mmdet/\" ] && [ -f \"$FILE_NAME\" ]; then\n        IGNORED=false\n        for IGNORED_FILE_NAME in \"${IGNORED_FILES[@]}\"; do\n            # Skip blank lines\n            if [ -z \"$IGNORED_FILE_NAME\" ]; then\n                continue\n            fi\n            if [ \"${IGNORED_FILE_NAME::1}\" != \"#\" ] && [[ \"$FILE_NAME\" =~ $IGNORED_FILE_NAME ]]; then\n                echo \"Ignoring $FILE_NAME\"\n                IGNORED=true\n                break\n            fi\n        done\n        if [ \"$IGNORED\" = false ]; then\n            PY_FILES=\"$PY_FILES $FILE_NAME\"\n        fi\n    fi\ndone\n\n# Only test the coverage when PY_FILES are not empty, otherwise they will test the entire project\nif [ ! -z \"${PY_FILES}\" ]\nthen\n    if [ \"$REUSE_COVERAGE_REPORT\" == \"0\" ]; then\n        coverage run --branch --source mmdet -m pytest tests/\n    fi\n    coverage report --fail-under 80 -m $PY_FILES\n    interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex \"__repr__\" --fail-under 95 $PY_FILES\nfi\n"
  },
  {
    "path": ".dev_scripts/download_checkpoints.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nimport argparse\nimport math\nimport os\nimport os.path as osp\nfrom multiprocessing import Pool\n\nimport torch\nfrom mmengine.config import Config\nfrom mmengine.utils import mkdir_or_exist\n\n\ndef download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):\n    # math.pow(1024, 2) is mean 1 MB\n    assert_msg = f\"Downloaded url '{url}' does not exist \" \\\n                 f'or size is < min_bytes={min_bytes}'\n    try:\n        print(f'Downloading {url} to {out_file}...')\n        torch.hub.download_url_to_file(url, str(out_file), progress=progress)\n        assert osp.exists(\n            out_file) and osp.getsize(out_file) > min_bytes, assert_msg\n    except Exception as e:\n        if osp.exists(out_file):\n            os.remove(out_file)\n        print(f'ERROR: {e}\\nRe-attempting {url} to {out_file} ...')\n        os.system(f\"curl -L '{url}' -o '{out_file}' --retry 3 -C -\"\n                  )  # curl download, retry and resume on fail\n    finally:\n        if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:\n            os.remove(out_file)  # remove partial downloads\n\n        if not osp.exists(out_file):\n            print(f'ERROR: {assert_msg}\\n')\n        print('=========================================\\n')\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Download checkpoints')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument(\n        'out', type=str, help='output dir of checkpoints to be stored')\n    parser.add_argument(\n        '--nproc', type=int, default=16, help='num of Processes')\n    parser.add_argument(\n        '--intranet',\n        action='store_true',\n        help='switch to internal network url')\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    mkdir_or_exist(args.out)\n\n    cfg = Config.fromfile(args.config)\n\n    checkpoint_url_list = []\n    checkpoint_out_list = []\n\n    for model in cfg:\n        model_infos = cfg[model]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            checkpoint = model_info['checkpoint']\n            out_file = osp.join(args.out, checkpoint)\n            if not osp.exists(out_file):\n\n                url = model_info['url']\n                if args.intranet is True:\n                    url = url.replace('.com', '.sensetime.com')\n                    url = url.replace('https', 'http')\n\n                checkpoint_url_list.append(url)\n                checkpoint_out_list.append(out_file)\n\n    if len(checkpoint_url_list) > 0:\n        pool = Pool(min(os.cpu_count(), args.nproc))\n        pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))\n    else:\n        print('No files to download!')\n"
  },
  {
    "path": ".dev_scripts/gather_models.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport json\nimport os.path as osp\nimport shutil\nimport subprocess\nfrom collections import OrderedDict\n\nimport torch\nimport yaml\nfrom mmengine.config import Config\nfrom mmengine.fileio import dump\nfrom mmengine.utils import mkdir_or_exist, scandir\n\n\ndef ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):\n\n    class OrderedDumper(Dumper):\n        pass\n\n    def _dict_representer(dumper, data):\n        return dumper.represent_mapping(\n            yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())\n\n    OrderedDumper.add_representer(OrderedDict, _dict_representer)\n    return yaml.dump(data, stream, OrderedDumper, **kwds)\n\n\ndef process_checkpoint(in_file, out_file):\n    checkpoint = torch.load(in_file, map_location='cpu')\n    # remove optimizer for smaller file size\n    if 'optimizer' in checkpoint:\n        del checkpoint['optimizer']\n\n    # remove ema state_dict\n    for key in list(checkpoint['state_dict']):\n        if key.startswith('ema_'):\n            checkpoint['state_dict'].pop(key)\n\n    # if it is necessary to remove some sensitive data in checkpoint['meta'],\n    # add the code here.\n    if torch.__version__ >= '1.6':\n        torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)\n    else:\n        torch.save(checkpoint, out_file)\n    sha = subprocess.check_output(['sha256sum', out_file]).decode()\n    final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])\n    subprocess.Popen(['mv', out_file, final_file])\n    return final_file\n\n\ndef is_by_epoch(config):\n    cfg = Config.fromfile('./configs/' + config)\n    return cfg.runner.type == 'EpochBasedRunner'\n\n\ndef get_final_epoch_or_iter(config):\n    cfg = Config.fromfile('./configs/' + config)\n    if cfg.runner.type == 'EpochBasedRunner':\n        return cfg.runner.max_epochs\n    else:\n        return cfg.runner.max_iters\n\n\ndef get_best_epoch_or_iter(exp_dir):\n    best_epoch_iter_full_path = list(\n        sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]\n    best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]\n    best_epoch_or_iter = best_epoch_or_iter_model_path.\\\n        split('_')[-1].split('.')[0]\n    return best_epoch_or_iter_model_path, int(best_epoch_or_iter)\n\n\ndef get_real_epoch_or_iter(config):\n    cfg = Config.fromfile('./configs/' + config)\n    if cfg.runner.type == 'EpochBasedRunner':\n        epoch = cfg.runner.max_epochs\n        if cfg.data.train.type == 'RepeatDataset':\n            epoch *= cfg.data.train.times\n        return epoch\n    else:\n        return cfg.runner.max_iters\n\n\ndef get_final_results(log_json_path,\n                      epoch_or_iter,\n                      results_lut,\n                      by_epoch=True):\n    result_dict = dict()\n    last_val_line = None\n    last_train_line = None\n    last_val_line_idx = -1\n    last_train_line_idx = -1\n    with open(log_json_path, 'r') as f:\n        for i, line in enumerate(f.readlines()):\n            log_line = json.loads(line)\n            if 'mode' not in log_line.keys():\n                continue\n\n            if by_epoch:\n                if (log_line['mode'] == 'train'\n                        and log_line['epoch'] == epoch_or_iter):\n                    result_dict['memory'] = log_line['memory']\n\n                if (log_line['mode'] == 'val'\n                        and log_line['epoch'] == epoch_or_iter):\n                    result_dict.update({\n                        key: log_line[key]\n                        for key in results_lut if key in log_line\n                    })\n                    return result_dict\n            else:\n                if log_line['mode'] == 'train':\n                    last_train_line_idx = i\n                    last_train_line = log_line\n\n                if log_line and log_line['mode'] == 'val':\n                    last_val_line_idx = i\n                    last_val_line = log_line\n\n    # bug: max_iters = 768, last_train_line['iter'] = 750\n    assert last_val_line_idx == last_train_line_idx + 1, \\\n        'Log file is incomplete'\n    result_dict['memory'] = last_train_line['memory']\n    result_dict.update({\n        key: last_val_line[key]\n        for key in results_lut if key in last_val_line\n    })\n\n    return result_dict\n\n\ndef get_dataset_name(config):\n    # If there are more dataset, add here.\n    name_map = dict(\n        CityscapesDataset='Cityscapes',\n        CocoDataset='COCO',\n        CocoPanopticDataset='COCO',\n        DeepFashionDataset='Deep Fashion',\n        LVISV05Dataset='LVIS v0.5',\n        LVISV1Dataset='LVIS v1',\n        VOCDataset='Pascal VOC',\n        WIDERFaceDataset='WIDER Face',\n        OpenImagesDataset='OpenImagesDataset',\n        OpenImagesChallengeDataset='OpenImagesChallengeDataset',\n        Objects365V1Dataset='Objects365 v1',\n        Objects365V2Dataset='Objects365 v2')\n    cfg = Config.fromfile('./configs/' + config)\n    return name_map[cfg.dataset_type]\n\n\ndef convert_model_info_to_pwc(model_infos):\n    pwc_files = {}\n    for model in model_infos:\n        cfg_folder_name = osp.split(model['config'])[-2]\n        pwc_model_info = OrderedDict()\n        pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]\n        pwc_model_info['In Collection'] = 'Please fill in Collection name'\n        pwc_model_info['Config'] = osp.join('configs', model['config'])\n\n        # get metadata\n        memory = round(model['results']['memory'] / 1024, 1)\n        meta_data = OrderedDict()\n        meta_data['Training Memory (GB)'] = memory\n        if 'epochs' in model:\n            meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])\n        else:\n            meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])\n        pwc_model_info['Metadata'] = meta_data\n\n        # get dataset name\n        dataset_name = get_dataset_name(model['config'])\n\n        # get results\n        results = []\n        # if there are more metrics, add here.\n        if 'bbox_mAP' in model['results']:\n            metric = round(model['results']['bbox_mAP'] * 100, 1)\n            results.append(\n                OrderedDict(\n                    Task='Object Detection',\n                    Dataset=dataset_name,\n                    Metrics={'box AP': metric}))\n        if 'segm_mAP' in model['results']:\n            metric = round(model['results']['segm_mAP'] * 100, 1)\n            results.append(\n                OrderedDict(\n                    Task='Instance Segmentation',\n                    Dataset=dataset_name,\n                    Metrics={'mask AP': metric}))\n        if 'PQ' in model['results']:\n            metric = round(model['results']['PQ'], 1)\n            results.append(\n                OrderedDict(\n                    Task='Panoptic Segmentation',\n                    Dataset=dataset_name,\n                    Metrics={'PQ': metric}))\n        pwc_model_info['Results'] = results\n\n        link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'\n        link_string += '{}/{}'.format(model['config'].rstrip('.py'),\n                                      osp.split(model['model_path'])[-1])\n        pwc_model_info['Weights'] = link_string\n        if cfg_folder_name in pwc_files:\n            pwc_files[cfg_folder_name].append(pwc_model_info)\n        else:\n            pwc_files[cfg_folder_name] = [pwc_model_info]\n    return pwc_files\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Gather benchmarked models')\n    parser.add_argument(\n        'root',\n        type=str,\n        help='root path of benchmarked models to be gathered')\n    parser.add_argument(\n        'out', type=str, help='output path of gathered models to be stored')\n    parser.add_argument(\n        '--best',\n        action='store_true',\n        help='whether to gather the best model.')\n\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    models_root = args.root\n    models_out = args.out\n    mkdir_or_exist(models_out)\n\n    # find all models in the root directory to be gathered\n    raw_configs = list(scandir('./configs', '.py', recursive=True))\n\n    # filter configs that is not trained in the experiments dir\n    used_configs = []\n    for raw_config in raw_configs:\n        if osp.exists(osp.join(models_root, raw_config)):\n            used_configs.append(raw_config)\n    print(f'Find {len(used_configs)} models to be gathered')\n\n    # find final_ckpt and log file for trained each config\n    # and parse the best performance\n    model_infos = []\n    for used_config in used_configs:\n        exp_dir = osp.join(models_root, used_config)\n        by_epoch = is_by_epoch(used_config)\n        # check whether the exps is finished\n        if args.best is True:\n            final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)\n        else:\n            final_epoch_or_iter = get_final_epoch_or_iter(used_config)\n            final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',\n                                             final_epoch_or_iter)\n\n        model_path = osp.join(exp_dir, final_model)\n        # skip if the model is still training\n        if not osp.exists(model_path):\n            continue\n\n        # get the latest logs\n        log_json_path = list(\n            sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]\n        log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]\n        cfg = Config.fromfile('./configs/' + used_config)\n        results_lut = cfg.evaluation.metric\n        if not isinstance(results_lut, list):\n            results_lut = [results_lut]\n        # case when using VOC, the evaluation key is only 'mAP'\n        # when using Panoptic Dataset, the evaluation key is 'PQ'.\n        for i, key in enumerate(results_lut):\n            if 'mAP' not in key and 'PQ' not in key:\n                results_lut[i] = key + '_mAP'\n        model_performance = get_final_results(log_json_path,\n                                              final_epoch_or_iter, results_lut,\n                                              by_epoch)\n\n        if model_performance is None:\n            continue\n\n        model_time = osp.split(log_txt_path)[-1].split('.')[0]\n        model_info = dict(\n            config=used_config,\n            results=model_performance,\n            model_time=model_time,\n            final_model=final_model,\n            log_json_path=osp.split(log_json_path)[-1])\n        model_info['epochs' if by_epoch else 'iterations'] =\\\n            final_epoch_or_iter\n        model_infos.append(model_info)\n\n    # publish model for each checkpoint\n    publish_model_infos = []\n    for model in model_infos:\n        model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))\n        mkdir_or_exist(model_publish_dir)\n\n        model_name = osp.split(model['config'])[-1].split('.')[0]\n\n        model_name += '_' + model['model_time']\n        publish_model_path = osp.join(model_publish_dir, model_name)\n        trained_model_path = osp.join(models_root, model['config'],\n                                      model['final_model'])\n\n        # convert model\n        final_model_path = process_checkpoint(trained_model_path,\n                                              publish_model_path)\n\n        # copy log\n        shutil.copy(\n            osp.join(models_root, model['config'], model['log_json_path']),\n            osp.join(model_publish_dir, f'{model_name}.log.json'))\n        shutil.copy(\n            osp.join(models_root, model['config'],\n                     model['log_json_path'].rstrip('.json')),\n            osp.join(model_publish_dir, f'{model_name}.log'))\n\n        # copy config to guarantee reproducibility\n        config_path = model['config']\n        config_path = osp.join(\n            'configs',\n            config_path) if 'configs' not in config_path else config_path\n        target_config_path = osp.split(config_path)[-1]\n        shutil.copy(config_path, osp.join(model_publish_dir,\n                                          target_config_path))\n\n        model['model_path'] = final_model_path\n        publish_model_infos.append(model)\n\n    models = dict(models=publish_model_infos)\n    print(f'Totally gathered {len(publish_model_infos)} models')\n    dump(models, osp.join(models_out, 'model_info.json'))\n\n    pwc_files = convert_model_info_to_pwc(publish_model_infos)\n    for name in pwc_files:\n        with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:\n            ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": ".dev_scripts/gather_test_benchmark_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport os.path as osp\n\nfrom mmengine.config import Config\nfrom mmengine.fileio import dump, load\nfrom mmengine.utils import mkdir_or_exist\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Gather benchmarked models metric')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument(\n        'root',\n        type=str,\n        help='root path of benchmarked models to be gathered')\n    parser.add_argument(\n        '--out', type=str, help='output path of gathered metrics to be stored')\n    parser.add_argument(\n        '--not-show', action='store_true', help='not show metrics')\n    parser.add_argument(\n        '--show-all', action='store_true', help='show all model metrics')\n\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == '__main__':\n    args = parse_args()\n\n    root_path = args.root\n    metrics_out = args.out\n    result_dict = {}\n\n    cfg = Config.fromfile(args.config)\n\n    for model_key in cfg:\n        model_infos = cfg[model_key]\n        if not isinstance(model_infos, list):\n            model_infos = [model_infos]\n        for model_info in model_infos:\n            record_metrics = model_info['metric']\n            config = model_info['config'].strip()\n            fname, _ = osp.splitext(osp.basename(config))\n            metric_json_dir = osp.join(root_path, fname)\n            if osp.exists(metric_json_dir):\n                json_list = glob.glob(osp.join(metric_json_dir, '*.json'))\n                if len(json_list) > 0:\n                    log_json_path = list(sorted(json_list))[-1]\n\n                    metric = load(log_json_path)\n                    if config in metric.get('config', {}):\n\n                        new_metrics = dict()\n                        for record_metric_key in record_metrics:\n                            record_metric_key_bk = record_metric_key\n                            old_metric = record_metrics[record_metric_key]\n                            if record_metric_key == 'AR_1000':\n                                record_metric_key = 'AR@1000'\n                            if record_metric_key not in metric['metric']:\n                                raise KeyError(\n                                    'record_metric_key not exist, please '\n                                    'check your config')\n                            new_metric = round(\n                                metric['metric'][record_metric_key] * 100, 1)\n                            new_metrics[record_metric_key_bk] = new_metric\n\n                        if args.show_all:\n                            result_dict[config] = dict(\n                                before=record_metrics, after=new_metrics)\n                        else:\n                            for record_metric_key in record_metrics:\n                                old_metric = record_metrics[record_metric_key]\n                                new_metric = new_metrics[record_metric_key]\n                                if old_metric != new_metric:\n                                    result_dict[config] = dict(\n                                        before=record_metrics,\n                                        after=new_metrics)\n                                    break\n                    else:\n                        print(f'{config} not included in: {log_json_path}')\n                else:\n                    print(f'{config} not exist file: {metric_json_dir}')\n            else:\n                print(f'{config} not exist dir: {metric_json_dir}')\n\n    if metrics_out:\n        mkdir_or_exist(metrics_out)\n        dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json'))\n    if not args.not_show:\n        print('===================================')\n        for config_name, metrics in result_dict.items():\n            print(config_name, metrics)\n        print('===================================')\n"
  },
  {
    "path": ".dev_scripts/gather_train_benchmark_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport os.path as osp\n\nfrom gather_models import get_final_results\nfrom mmengine.config import Config\nfrom mmengine.fileio import dump\nfrom mmengine.utils import mkdir_or_exist\n\ntry:\n    import xlrd\nexcept ImportError:\n    xlrd = None\ntry:\n    import xlutils\n    from xlutils.copy import copy\nexcept ImportError:\n    xlutils = None\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Gather benchmarked models metric')\n    parser.add_argument(\n        'root',\n        type=str,\n        help='root path of benchmarked models to be gathered')\n    parser.add_argument(\n        'txt_path', type=str, help='txt path output by benchmark_filter')\n    parser.add_argument(\n        '--out', type=str, help='output path of gathered metrics to be stored')\n    parser.add_argument(\n        '--not-show', action='store_true', help='not show metrics')\n    parser.add_argument(\n        '--excel', type=str, help='input path of excel to be recorded')\n    parser.add_argument(\n        '--ncol', type=int, help='Number of column to be modified or appended')\n\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == '__main__':\n    args = parse_args()\n\n    if args.excel:\n        assert args.ncol, 'Please specify \"--excel\" and \"--ncol\" ' \\\n                          'at the same time'\n        if xlrd is None:\n            raise RuntimeError(\n                'xlrd is not installed,'\n                'Please use “pip install xlrd==1.2.0” to install')\n        if xlutils is None:\n            raise RuntimeError(\n                'xlutils is not installed,'\n                'Please use “pip install xlutils==2.0.0” to install')\n        readbook = xlrd.open_workbook(args.excel)\n        sheet = readbook.sheet_by_name('Sheet1')\n        sheet_info = {}\n        total_nrows = sheet.nrows\n        for i in range(3, sheet.nrows):\n            sheet_info[sheet.row_values(i)[0]] = i\n        xlrw = copy(readbook)\n        table = xlrw.get_sheet(0)\n\n    root_path = args.root\n    metrics_out = args.out\n\n    result_dict = {}\n    with open(args.txt_path, 'r') as f:\n        model_cfgs = f.readlines()\n        for i, config in enumerate(model_cfgs):\n            config = config.strip()\n            if len(config) == 0:\n                continue\n\n            config_name = osp.split(config)[-1]\n            config_name = osp.splitext(config_name)[0]\n            result_path = osp.join(root_path, config_name)\n            if osp.exists(result_path):\n                # 1 read config\n                cfg = Config.fromfile(config)\n                total_epochs = cfg.runner.max_epochs\n                final_results = cfg.evaluation.metric\n                if not isinstance(final_results, list):\n                    final_results = [final_results]\n                final_results_out = []\n                for key in final_results:\n                    if 'proposal_fast' in key:\n                        final_results_out.append('AR@1000')  # RPN\n                    elif 'mAP' not in key:\n                        final_results_out.append(key + '_mAP')\n\n                # 2 determine whether total_epochs ckpt exists\n                ckpt_path = f'epoch_{total_epochs}.pth'\n                if osp.exists(osp.join(result_path, ckpt_path)):\n                    log_json_path = list(\n                        sorted(glob.glob(osp.join(result_path,\n                                                  '*.log.json'))))[-1]\n\n                    # 3 read metric\n                    model_performance = get_final_results(\n                        log_json_path, total_epochs, final_results_out)\n                    if model_performance is None:\n                        print(f'log file error: {log_json_path}')\n                        continue\n                    for performance in model_performance:\n                        if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:\n                            metric = round(\n                                model_performance[performance] * 100, 1)\n                            model_performance[performance] = metric\n                    result_dict[config] = model_performance\n\n                    # update and append excel content\n                    if args.excel:\n                        if 'AR@1000' in model_performance:\n                            metrics = f'{model_performance[\"AR@1000\"]}' \\\n                                      f'(AR@1000)'\n                        elif 'segm_mAP' in model_performance:\n                            metrics = f'{model_performance[\"bbox_mAP\"]}/' \\\n                                      f'{model_performance[\"segm_mAP\"]}'\n                        else:\n                            metrics = f'{model_performance[\"bbox_mAP\"]}'\n\n                        row_num = sheet_info.get(config, None)\n                        if row_num:\n                            table.write(row_num, args.ncol, metrics)\n                        else:\n                            table.write(total_nrows, 0, config)\n                            table.write(total_nrows, args.ncol, metrics)\n                            total_nrows += 1\n\n                else:\n                    print(f'{config} not exist: {ckpt_path}')\n            else:\n                print(f'not exist: {config}')\n\n        # 4 save or print results\n        if metrics_out:\n            mkdir_or_exist(metrics_out)\n            dump(result_dict, osp.join(metrics_out, 'model_metric_info.json'))\n        if not args.not_show:\n            print('===================================')\n            for config_name, metrics in result_dict.items():\n                print(config_name, metrics)\n            print('===================================')\n        if args.excel:\n            filename, sufflx = osp.splitext(args.excel)\n            xlrw.save(f'{filename}_o{sufflx}')\n            print(f'>>> Output {filename}_o{sufflx}')\n"
  },
  {
    "path": ".dev_scripts/linter.sh",
    "content": "yapf -r -i mmdet/ configs/ tests/ tools/\nisort -rc mmdet/ configs/ tests/ tools/\nflake8 .\n"
  },
  {
    "path": ".dev_scripts/test_benchmark.sh",
    "content": "PARTITION=$1\nCHECKPOINT_DIR=$2\nWORK_DIR=$3\nCPUS_PER_TASK=${4:-2}\n\necho 'configs/atss/atss_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth --work-dir $WORK_DIR/atss_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29666  &\necho 'configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION autoassign_r50-caffe_fpn_1x_coco configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth --work-dir $WORK_DIR/autoassign_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29667  &\necho 'configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn-carafe_1x_coco configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn-carafe_1x_coco --cfg-option env_cfg.dist_cfg.port=29668  &\necho 'configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth --work-dir $WORK_DIR/cascade-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29669  &\necho 'configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-mask-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth --work-dir $WORK_DIR/cascade-mask-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29670  &\necho 'configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth --work-dir $WORK_DIR/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29671  &\necho 'configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION centernet_r18-dcnv2_8xb16-crop512-140e_coco configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py $CHECKPOINT_DIR/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth --work-dir $WORK_DIR/centernet_r18-dcnv2_8xb16-crop512-140e_coco --cfg-option env_cfg.dist_cfg.port=29672  &\necho 'configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py $CHECKPOINT_DIR/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth --work-dir $WORK_DIR/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco --cfg-option env_cfg.dist_cfg.port=29673  &\necho 'configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth --work-dir $WORK_DIR/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco --cfg-option env_cfg.dist_cfg.port=29674  &\necho 'configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cornernet_hourglass104_8xb6-210e-mstest_coco configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py $CHECKPOINT_DIR/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth --work-dir $WORK_DIR/cornernet_hourglass104_8xb6-210e-mstest_coco --cfg-option env_cfg.dist_cfg.port=29675  &\necho 'configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth --work-dir $WORK_DIR/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29676  &\necho 'configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_mdpool_1x_coco configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_mdpool_1x_coco --cfg-option env_cfg.dist_cfg.port=29677  &\necho 'configs/ddod/ddod_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ddod_r50_fpn_1x_coco configs/ddod/ddod_r50_fpn_1x_coco.py $CHECKPOINT_DIR/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth --work-dir $WORK_DIR/ddod_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29678  &\necho 'configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION deformable-detr_r50_16xb2-50e_coco configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py $CHECKPOINT_DIR/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth --work-dir $WORK_DIR/deformable-detr_r50_16xb2-50e_coco --cfg-option env_cfg.dist_cfg.port=29679  &\necho 'configs/detectors/detectors_htc-r50_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION detectors_htc-r50_1x_coco configs/detectors/detectors_htc-r50_1x_coco.py $CHECKPOINT_DIR/detectors_htc_r50_1x_coco-329b1453.pth --work-dir $WORK_DIR/detectors_htc-r50_1x_coco --cfg-option env_cfg.dist_cfg.port=29680  &\necho 'configs/detr/detr_r50_8xb2-150e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION detr_r50_8xb2-150e_coco configs/detr/detr_r50_8xb2-150e_coco.py $CHECKPOINT_DIR/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth --work-dir $WORK_DIR/detr_r50_8xb2-150e_coco --cfg-option env_cfg.dist_cfg.port=29681  &\necho 'configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION dh-faster-rcnn_r50_fpn_1x_coco configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth --work-dir $WORK_DIR/dh-faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29682  &\necho 'configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION atss_r50_fpn_dyhead_1x_coco configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth --work-dir $WORK_DIR/atss_r50_fpn_dyhead_1x_coco --cfg-option env_cfg.dist_cfg.port=29683  &\necho 'configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION dynamic-rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dynamic_rcnn_r50_fpn_1x-62a3f276.pth --work-dir $WORK_DIR/dynamic-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29684  &\necho 'configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_effb3_fpn_8xb4-crop896-1x_coco configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py $CHECKPOINT_DIR/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth --work-dir $WORK_DIR/retinanet_effb3_fpn_8xb4-crop896-1x_coco --cfg-option env_cfg.dist_cfg.port=29685  &\necho 'configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50-attn1111_fpn_1x_coco configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth --work-dir $WORK_DIR/faster-rcnn_r50-attn1111_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29686  &\necho 'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29687  &\necho 'configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py $CHECKPOINT_DIR/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth --work-dir $WORK_DIR/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco --cfg-option env_cfg.dist_cfg.port=29688  &\necho 'configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION fovea_r50_fpn_gn-head-align_4xb4-2x_coco configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py $CHECKPOINT_DIR/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth --work-dir $WORK_DIR/fovea_r50_fpn_gn-head-align_4xb4-2x_coco --cfg-option env_cfg.dist_cfg.port=29689  &\necho 'configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpg_crop640-50e_coco configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpg_crop640-50e_coco --cfg-option env_cfg.dist_cfg.port=29690  &\necho 'configs/free_anchor/freeanchor_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION freeanchor_r50_fpn_1x_coco configs/free_anchor/freeanchor_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth --work-dir $WORK_DIR/freeanchor_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29691  &\necho 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $CHECKPOINT_DIR/fsaf_r50_fpn_1x_coco-94ccc51f.pth --work-dir $WORK_DIR/fsaf_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29692  &\necho 'configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth --work-dir $WORK_DIR/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29693  &\necho 'configs/gfl/gfl_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $CHECKPOINT_DIR/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth --work-dir $WORK_DIR/gfl_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29694  &\necho 'configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_r50_fpn_ghm-1x_coco configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py $CHECKPOINT_DIR/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth --work-dir $WORK_DIR/retinanet_r50_fpn_ghm-1x_coco --cfg-option env_cfg.dist_cfg.port=29695  &\necho 'configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpn_gn-all_2x_coco --cfg-option env_cfg.dist_cfg.port=29696  &\necho 'configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_gn-ws-all_1x_coco configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_gn-ws-all_1x_coco --cfg-option env_cfg.dist_cfg.port=29697  &\necho 'configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION grid-rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py $CHECKPOINT_DIR/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth --work-dir $WORK_DIR/grid-rcnn_r50_fpn_gn-head_2x_coco --cfg-option env_cfg.dist_cfg.port=29698  &\necho 'configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faste-rcnn_r50_fpn_groie_1x_coco configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth --work-dir $WORK_DIR/faste-rcnn_r50_fpn_groie_1x_coco --cfg-option env_cfg.dist_cfg.port=29699  &\necho 'configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ga-retinanet_r50-caffe_fpn_1x_coco configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth --work-dir $WORK_DIR/ga-retinanet_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29700  &\necho 'configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_hrnetv2p-w18-1x_coco configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py $CHECKPOINT_DIR/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth --work-dir $WORK_DIR/faster-rcnn_hrnetv2p-w18-1x_coco --cfg-option env_cfg.dist_cfg.port=29701  &\necho 'configs/htc/htc_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $CHECKPOINT_DIR/htc_r50_fpn_1x_coco_20200317-7332cf16.pth --work-dir $WORK_DIR/htc_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29702  &\necho 'configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpn_instaboost-4x_coco configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpn_instaboost-4x_coco --cfg-option env_cfg.dist_cfg.port=29703  &\necho 'configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION libra-faster-rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth --work-dir $WORK_DIR/libra-faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29704  &\necho 'configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask2former_r50_8xb2-lsj-50e_coco-panoptic configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py $CHECKPOINT_DIR/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth --work-dir $WORK_DIR/mask2former_r50_8xb2-lsj-50e_coco-panoptic --cfg-option env_cfg.dist_cfg.port=29705  &\necho 'configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29706  &\necho 'configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION maskformer_r50_ms-16xb1-75e_coco configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py $CHECKPOINT_DIR/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth --work-dir $WORK_DIR/maskformer_r50_ms-16xb1-75e_coco --cfg-option env_cfg.dist_cfg.port=29707  &\necho 'configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ms-rcnn_r50-caffe_fpn_1x_coco configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth --work-dir $WORK_DIR/ms-rcnn_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29708  &\necho 'configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py $CHECKPOINT_DIR/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth --work-dir $WORK_DIR/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco --cfg-option env_cfg.dist_cfg.port=29709  &\necho 'configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_r50_nasfpn_crop640-50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py $CHECKPOINT_DIR/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth --work-dir $WORK_DIR/retinanet_r50_nasfpn_crop640-50e_coco --cfg-option env_cfg.dist_cfg.port=29710  &\necho 'configs/paa/paa_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $CHECKPOINT_DIR/paa_r50_fpn_1x_coco_20200821-936edec3.pth --work-dir $WORK_DIR/paa_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29711  &\necho 'configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_pafpn_1x_coco configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth --work-dir $WORK_DIR/faster-rcnn_r50_pafpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29712  &\necho 'configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION panoptic-fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth --work-dir $WORK_DIR/panoptic-fpn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29713  &\necho 'configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_pisa_1x_coco configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py $CHECKPOINT_DIR/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_pisa_1x_coco --cfg-option env_cfg.dist_cfg.port=29714  &\necho 'configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION point-rend_r50-caffe_fpn_ms-1x_coco configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py $CHECKPOINT_DIR/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth --work-dir $WORK_DIR/point-rend_r50-caffe_fpn_ms-1x_coco --cfg-option env_cfg.dist_cfg.port=29715  &\necho 'configs/pvt/retinanet_pvt-s_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_pvt-s_fpn_1x_coco configs/pvt/retinanet_pvt-s_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth --work-dir $WORK_DIR/retinanet_pvt-s_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29716  &\necho 'configs/queryinst/queryinst_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION queryinst_r50_fpn_1x_coco configs/queryinst/queryinst_r50_fpn_1x_coco.py $CHECKPOINT_DIR/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth --work-dir $WORK_DIR/queryinst_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29717  &\necho 'configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth --work-dir $WORK_DIR/mask-rcnn_regnetx-3.2GF_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29718  &\necho 'configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION reppoints-moment_r50_fpn_1x_coco configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py $CHECKPOINT_DIR/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth --work-dir $WORK_DIR/reppoints-moment_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29719  &\necho 'configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_res2net-101_fpn_2x_coco configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py $CHECKPOINT_DIR/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth --work-dir $WORK_DIR/faster-rcnn_res2net-101_fpn_2x_coco --cfg-option env_cfg.dist_cfg.port=29720  &\necho 'configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py $CHECKPOINT_DIR/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth --work-dir $WORK_DIR/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco --cfg-option env_cfg.dist_cfg.port=29721  &\necho 'configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50-rsb-pre_fpn_1x_coco configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth --work-dir $WORK_DIR/mask-rcnn_r50-rsb-pre_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29722  &\necho 'configs/retinanet/retinanet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_r50_fpn_1x_coco configs/retinanet/retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth --work-dir $WORK_DIR/retinanet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29723  &\necho 'configs/rpn/rpn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth --work-dir $WORK_DIR/rpn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29724  &\necho 'configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION sabl-retinanet_r50_fpn_1x_coco configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth --work-dir $WORK_DIR/sabl-retinanet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29725  &\necho 'configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION sabl-faster-rcnn_r50_fpn_1x_coco configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth --work-dir $WORK_DIR/sabl-faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29726  &\necho 'configs/scnet/scnet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/scnet_r50_fpn_1x_coco-c3f09857.pth --work-dir $WORK_DIR/scnet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29727  &\necho 'configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50-scratch_fpn_gn-all_6x_coco configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py $CHECKPOINT_DIR/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth --work-dir $WORK_DIR/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco --cfg-option env_cfg.dist_cfg.port=29728  &\necho 'configs/solo/decoupled-solo_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION decoupled-solo_r50_fpn_1x_coco configs/solo/decoupled-solo_r50_fpn_1x_coco.py $CHECKPOINT_DIR/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth --work-dir $WORK_DIR/decoupled-solo_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29729  &\necho 'configs/solov2/solov2_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION solov2_r50_fpn_1x_coco configs/solov2/solov2_r50_fpn_1x_coco.py $CHECKPOINT_DIR/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth --work-dir $WORK_DIR/solov2_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29730  &\necho 'configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION sparse-rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth --work-dir $WORK_DIR/sparse-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29731  &\necho 'configs/ssd/ssd300_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $CHECKPOINT_DIR/ssd300_coco_20210803_015428-d231a06e.pth --work-dir $WORK_DIR/ssd300_coco --cfg-option env_cfg.dist_cfg.port=29732  &\necho 'configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ssdlite_mobilenetv2-scratch_8xb24-600e_coco configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py $CHECKPOINT_DIR/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth --work-dir $WORK_DIR/ssdlite_mobilenetv2-scratch_8xb24-600e_coco --cfg-option env_cfg.dist_cfg.port=29733  &\necho 'configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_swin-t-p4-w7_fpn_1x_coco configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth --work-dir $WORK_DIR/mask-rcnn_swin-t-p4-w7_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29734  &\necho 'configs/tood/tood_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION tood_r50_fpn_1x_coco configs/tood/tood_r50_fpn_1x_coco.py $CHECKPOINT_DIR/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth --work-dir $WORK_DIR/tood_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29735  &\necho 'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION tridentnet_r50-caffe_1x_coco configs/tridentnet/tridentnet_r50-caffe_1x_coco.py $CHECKPOINT_DIR/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth --work-dir $WORK_DIR/tridentnet_r50-caffe_1x_coco --cfg-option env_cfg.dist_cfg.port=29736  &\necho 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth --work-dir $WORK_DIR/vfnet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29737  &\necho 'configs/yolact/yolact_r50_1xb8-55e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolact_r50_1xb8-55e_coco configs/yolact/yolact_r50_1xb8-55e_coco.py $CHECKPOINT_DIR/yolact_r50_1x8_coco_20200908-f38d58df.pth --work-dir $WORK_DIR/yolact_r50_1xb8-55e_coco --cfg-option env_cfg.dist_cfg.port=29738  &\necho 'configs/yolo/yolov3_d53_8xb8-320-273e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolov3_d53_8xb8-320-273e_coco configs/yolo/yolov3_d53_8xb8-320-273e_coco.py $CHECKPOINT_DIR/yolov3_d53_320_273e_coco-421362b6.pth --work-dir $WORK_DIR/yolov3_d53_8xb8-320-273e_coco --cfg-option env_cfg.dist_cfg.port=29739  &\necho 'configs/yolof/yolof_r50-c5_8xb8-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolof_r50-c5_8xb8-1x_coco configs/yolof/yolof_r50-c5_8xb8-1x_coco.py $CHECKPOINT_DIR/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth --work-dir $WORK_DIR/yolof_r50-c5_8xb8-1x_coco --cfg-option env_cfg.dist_cfg.port=29740  &\necho 'configs/yolox/yolox_tiny_8xb8-300e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolox_tiny_8xb8-300e_coco configs/yolox/yolox_tiny_8xb8-300e_coco.py $CHECKPOINT_DIR/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth --work-dir $WORK_DIR/yolox_tiny_8xb8-300e_coco --cfg-option env_cfg.dist_cfg.port=29741  &\n"
  },
  {
    "path": ".dev_scripts/test_init_backbone.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Check out backbone whether successfully load pretrained checkpoint.\"\"\"\nimport copy\nimport os\nfrom os.path import dirname, exists, join\n\nimport pytest\nfrom mmengine.config import Config\nfrom mmengine.runner import CheckpointLoader\nfrom mmengine.utils import ProgressBar\n\nfrom mmdet.registry import MODELS\n\n\ndef _get_config_directory():\n    \"\"\"Find the predefined detector config directory.\"\"\"\n    try:\n        # Assume we are running in the source mmdetection repo\n        repo_dpath = dirname(dirname(__file__))\n    except NameError:\n        # For IPython development when this __file__ is not defined\n        import mmdet\n        repo_dpath = dirname(dirname(mmdet.__file__))\n    config_dpath = join(repo_dpath, 'configs')\n    if not exists(config_dpath):\n        raise Exception('Cannot find config path')\n    return config_dpath\n\n\ndef _get_config_module(fname):\n    \"\"\"Load a configuration as a python module.\"\"\"\n    config_dpath = _get_config_directory()\n    config_fpath = join(config_dpath, fname)\n    config_mod = Config.fromfile(config_fpath)\n    return config_mod\n\n\ndef _get_detector_cfg(fname):\n    \"\"\"Grab configs necessary to create a detector.\n\n    These are deep copied to allow for safe modification of parameters without\n    influencing other tests.\n    \"\"\"\n    config = _get_config_module(fname)\n    model = copy.deepcopy(config.model)\n    return model\n\n\ndef _traversed_config_file():\n    \"\"\"We traversed all potential config files under the `config` file. If you\n    need to print details or debug code, you can use this function.\n\n    If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you\n    need add the folder name in `ignores_folder` (if the config files in this\n    folder all set backbone.init_cfg is None) or add config name in\n    `ignores_file` (if the config file set backbone.init_cfg is None)\n    \"\"\"\n    config_path = _get_config_directory()\n    check_cfg_names = []\n\n    # `base`, `legacy_1.x` and `common` ignored by default.\n    ignores_folder = ['_base_', 'legacy_1.x', 'common']\n    # 'ld' need load teacher model, if want to check 'ld',\n    # please check teacher_config path first.\n    ignores_folder += ['ld']\n    # `selfsup_pretrain` need convert model, if want to check this model,\n    # need to convert the model first.\n    ignores_folder += ['selfsup_pretrain']\n\n    # the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',\n    # 'scratch' is None.\n    # the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)\n    # is None\n    # Please confirm `bockbone.init_cfg` is None first.\n    ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']\n    ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']\n\n    for config_file_name in os.listdir(config_path):\n        if config_file_name not in ignores_folder:\n            config_file = join(config_path, config_file_name)\n            if os.path.isdir(config_file):\n                for config_sub_file in os.listdir(config_file):\n                    if config_sub_file.endswith('py') and \\\n                            config_sub_file not in ignores_file:\n                        name = join(config_file, config_sub_file)\n                        check_cfg_names.append(name)\n    return check_cfg_names\n\n\ndef _check_backbone(config, print_cfg=True):\n    \"\"\"Check out backbone whether successfully load pretrained model, by using\n    `backbone.init_cfg`.\n\n    First, using `CheckpointLoader.load_checkpoint` to load the checkpoint\n        without loading models.\n    Then, using `MODELS.build` to build models, and using\n        `model.init_weights()` to initialize the parameters.\n    Finally, assert weights and bias of each layer loaded from pretrained\n        checkpoint are equal to the weights and bias of original checkpoint.\n        For the convenience of comparison, we sum up weights and bias of\n        each loaded layer separately.\n\n    Args:\n        config (str): Config file path.\n        print_cfg (bool): Whether print logger and return the result.\n\n    Returns:\n        results (str or None): If backbone successfully load pretrained\n            checkpoint, return None; else, return config file path.\n    \"\"\"\n    if print_cfg:\n        print('-' * 15 + 'loading ', config)\n    cfg = Config.fromfile(config)\n    init_cfg = None\n    try:\n        init_cfg = cfg.model.backbone.init_cfg\n        init_flag = True\n    except AttributeError:\n        init_flag = False\n    if init_cfg is None or init_cfg.get('type') != 'Pretrained':\n        init_flag = False\n    if init_flag:\n        checkpoint = CheckpointLoader.load_checkpoint(init_cfg.checkpoint)\n        if 'state_dict' in checkpoint:\n            state_dict = checkpoint['state_dict']\n        else:\n            state_dict = checkpoint\n\n        model = MODELS.build(cfg.model)\n        model.init_weights()\n\n        checkpoint_layers = state_dict.keys()\n        for name, value in model.backbone.state_dict().items():\n            if name in checkpoint_layers:\n                assert value.equal(state_dict[name])\n\n        if print_cfg:\n            print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +\n                  '\\n', )\n            return None\n    else:\n        if print_cfg:\n            print(config + '\\n' + '-' * 10 +\n                  'config file do not have init_cfg' + '-' * 10 + '\\n')\n            return config\n\n\n@pytest.mark.parametrize('config', _traversed_config_file())\ndef test_load_pretrained(config):\n    \"\"\"Check out backbone whether successfully load pretrained model by using\n    `backbone.init_cfg`.\n\n    Details please refer to `_check_backbone`\n    \"\"\"\n    _check_backbone(config, print_cfg=False)\n\n\ndef _test_load_pretrained():\n    \"\"\"We traversed all potential config files under the `config` file. If you\n    need to print details or debug code, you can use this function.\n\n    Returns:\n        check_cfg_names (list[str]): Config files that backbone initialized\n        from pretrained checkpoint might be problematic. Need to recheck\n        the config file. The output including the config files that the\n        backbone.init_cfg is None\n    \"\"\"\n    check_cfg_names = _traversed_config_file()\n    need_check_cfg = []\n\n    prog_bar = ProgressBar(len(check_cfg_names))\n    for config in check_cfg_names:\n        init_cfg_name = _check_backbone(config)\n        if init_cfg_name is not None:\n            need_check_cfg.append(init_cfg_name)\n        prog_bar.update()\n    print('These config files need to be checked again')\n    print(need_check_cfg)\n"
  },
  {
    "path": ".dev_scripts/train_benchmark.sh",
    "content": "PARTITION=$1\nWORK_DIR=$2\nCPUS_PER_TASK=${3:-4}\n\necho 'configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_albu_1x_coco configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_albu_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/atss/atss_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $WORK_DIR/atss_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION autoassign_r50-caffe_fpn_1x_coco configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py $WORK_DIR/autoassign_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn-carafe_1x_coco configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn-carafe_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cascade-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py $WORK_DIR/cascade-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cascade-mask-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py $WORK_DIR/cascade-mask-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py $WORK_DIR/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION centernet_r18-dcnv2_8xb16-crop512-140e_coco configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py $WORK_DIR/centernet_r18-dcnv2_8xb16-crop512-140e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION centernet-update_r50-caffe_fpn_ms-1x_coco configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py $WORK_DIR/centernet-update_r50-caffe_fpn_ms-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py' &\nGPUS=16  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py $WORK_DIR/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cornernet_hourglass104_8xb6-210e-mstest_coco configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py $WORK_DIR/cornernet_hourglass104_8xb6-210e-mstest_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py $WORK_DIR/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py $WORK_DIR/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn_mdpool_1x_coco configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn_mdpool_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/ddod/ddod_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ddod_r50_fpn_1x_coco configs/ddod/ddod_r50_fpn_1x_coco.py $WORK_DIR/ddod_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/detectors/detectors_htc-r50_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION detectors_htc-r50_1x_coco configs/detectors/detectors_htc-r50_1x_coco.py $WORK_DIR/detectors_htc-r50_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py' &\nGPUS=16  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION deformable-detr_r50_16xb2-50e_coco configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py $WORK_DIR/deformable-detr_r50_16xb2-50e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/detr/detr_r50_8xb2-150e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION detr_r50_8xb2-150e_coco configs/detr/detr_r50_8xb2-150e_coco.py $WORK_DIR/detr_r50_8xb2-150e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION dh-faster-rcnn_r50_fpn_1x_coco configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py $WORK_DIR/dh-faster-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION dynamic-rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py $WORK_DIR/dynamic-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION atss_r50_fpn_dyhead_1x_coco configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py $WORK_DIR/atss_r50_fpn_dyhead_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_effb3_fpn_8xb4-crop896-1x_coco configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py $WORK_DIR/retinanet_effb3_fpn_8xb4-crop896-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-attn1111_fpn_1x_coco configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py $WORK_DIR/faster-rcnn_r50-attn1111_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-caffe-dc5_ms-1x_coco configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py $WORK_DIR/faster-rcnn_r50-caffe-dc5_ms-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py $WORK_DIR/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py' &\nGPUS=4  GPUS_PER_NODE=4  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION fovea_r50_fpn_gn-head-align_4xb4-2x_coco configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py $WORK_DIR/fovea_r50_fpn_gn-head-align_4xb4-2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpg_crop640-50e_coco configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py $WORK_DIR/mask-rcnn_r50_fpg_crop640-50e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/free_anchor/freeanchor_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION freeanchor_r50_fpn_1x_coco configs/free_anchor/freeanchor_r50_fpn_1x_coco.py $WORK_DIR/freeanchor_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $WORK_DIR/fsaf_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py $WORK_DIR/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/gfl/gfl_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $WORK_DIR/gfl_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50_fpn_ghm-1x_coco configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py $WORK_DIR/retinanet_r50_fpn_ghm-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_gn-all_2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn_gn-ws-all_1x_coco configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn_gn-ws-all_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION grid-rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py $WORK_DIR/grid-rcnn_r50_fpn_gn-head_2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faste-rcnn_r50_fpn_groie_1x_coco configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py $WORK_DIR/faste-rcnn_r50_fpn_groie_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ga-retinanet_r50-caffe_fpn_1x_coco configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py $WORK_DIR/ga-retinanet_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_hrnetv2p-w18-1x_coco configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py $WORK_DIR/faster-rcnn_hrnetv2p-w18-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/htc/htc_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $WORK_DIR/htc_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_instaboost-4x_coco configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_instaboost-4x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py' &\nGPUS=2  GPUS_PER_NODE=2  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION lad_r50-paa-r101_fpn_2xb8_coco_1x configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py $WORK_DIR/lad_r50-paa-r101_fpn_2xb8_coco_1x --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ld_r18-gflv1-r101_fpn_1x_coco configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py $WORK_DIR/ld_r18-gflv1-r101_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION libra-faster-rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py $WORK_DIR/libra-faster-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask2former_r50_8xb2-lsj-50e_coco-panoptic configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py $WORK_DIR/mask2former_r50_8xb2-lsj-50e_coco-panoptic --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py' &\nGPUS=16  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION maskformer_r50_ms-16xb1-75e_coco configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py $WORK_DIR/maskformer_r50_ms-16xb1-75e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ms-rcnn_r50-caffe_fpn_1x_coco configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py $WORK_DIR/ms-rcnn_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py' &\nGPUS=4  GPUS_PER_NODE=4  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py $WORK_DIR/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50_nasfpn_crop640-50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py $WORK_DIR/retinanet_r50_nasfpn_crop640-50e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/paa/paa_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $WORK_DIR/paa_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_pafpn_1x_coco configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py $WORK_DIR/faster-rcnn_r50_pafpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION panoptic-fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py $WORK_DIR/panoptic-fpn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_pisa_1x_coco configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_pisa_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION point-rend_r50-caffe_fpn_ms-1x_coco configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py $WORK_DIR/point-rend_r50-caffe_fpn_ms-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/pvt/retinanet_pvt-t_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_pvt-t_fpn_1x_coco configs/pvt/retinanet_pvt-t_fpn_1x_coco.py $WORK_DIR/retinanet_pvt-t_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/queryinst/queryinst_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION queryinst_r50_fpn_1x_coco configs/queryinst/queryinst_r50_fpn_1x_coco.py $WORK_DIR/queryinst_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_regnetx-800MF_fpn_1x_coco configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py $WORK_DIR/retinanet_regnetx-800MF_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION reppoints-moment_r50_fpn_1x_coco configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py $WORK_DIR/reppoints-moment_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_res2net-101_fpn_2x_coco configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py $WORK_DIR/faster-rcnn_res2net-101_fpn_2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py $WORK_DIR/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50-rsb-pre_fpn_1x_coco configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py $WORK_DIR/retinanet_r50-rsb-pre_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50-caffe_fpn_1x_coco configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py $WORK_DIR/retinanet_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/rpn/rpn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $WORK_DIR/rpn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION sabl-retinanet_r50_fpn_1x_coco configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py $WORK_DIR/sabl-retinanet_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/scnet/scnet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $WORK_DIR/scnet_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-scratch_fpn_gn-all_6x_coco configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py $WORK_DIR/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/solo/solo_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION solo_r50_fpn_1x_coco configs/solo/solo_r50_fpn_1x_coco.py $WORK_DIR/solo_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/solov2/solov2_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION solov2_r50_fpn_1x_coco configs/solov2/solov2_r50_fpn_1x_coco.py $WORK_DIR/solov2_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION sparse-rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py $WORK_DIR/sparse-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/ssd/ssd300_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $WORK_DIR/ssd300_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ssdlite_mobilenetv2-scratch_8xb24-600e_coco configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py $WORK_DIR/ssdlite_mobilenetv2-scratch_8xb24-600e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_swin-t-p4-w7_fpn_1x_coco configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py $WORK_DIR/mask-rcnn_swin-t-p4-w7_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/tood/tood_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION tood_r50_fpn_1x_coco configs/tood/tood_r50_fpn_1x_coco.py $WORK_DIR/tood_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho ''configs/tridentnet/tridentnet_r50-caffe_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION tridentnet_r50-caffe_1x_coco 'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py $WORK_DIR/tridentnet_r50-caffe_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $WORK_DIR/vfnet_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/yolact/yolact_r50_8xb8-55e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolact_r50_8xb8-55e_coco configs/yolact/yolact_r50_8xb8-55e_coco.py $WORK_DIR/yolact_r50_8xb8-55e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/yolo/yolov3_d53_8xb8-320-273e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolov3_d53_8xb8-320-273e_coco configs/yolo/yolov3_d53_8xb8-320-273e_coco.py $WORK_DIR/yolov3_d53_8xb8-320-273e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/yolof/yolof_r50-c5_8xb8-1x_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolof_r50-c5_8xb8-1x_coco configs/yolof/yolof_r50-c5_8xb8-1x_coco.py $WORK_DIR/yolof_r50-c5_8xb8-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\necho 'configs/yolox/yolox_tiny_8xb8-300e_coco.py' &\nGPUS=8  GPUS_PER_NODE=8  CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolox_tiny_8xb8-300e_coco configs/yolox/yolox_tiny_8xb8-300e_coco.py $WORK_DIR/yolox_tiny_8xb8-300e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &\n"
  },
  {
    "path": ".github/CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as\ncontributors and maintainers pledge to making participation in our project and\nour community a harassment-free experience for everyone, regardless of age, body\nsize, disability, ethnicity, sex characteristics, gender identity and expression,\nlevel of experience, education, socio-economic status, nationality, personal\nappearance, race, religion, or sexual identity and orientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment\ninclude:\n\n- Using welcoming and inclusive language\n- Being respectful of differing viewpoints and experiences\n- Gracefully accepting constructive criticism\n- Focusing on what is best for the community\n- Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n- The use of sexualized language or imagery and unwelcome sexual attention or\n  advances\n- Trolling, insulting/derogatory comments, and personal or political attacks\n- Public or private harassment\n- Publishing others' private information, such as a physical or electronic\n  address, without explicit permission\n- Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable\nbehavior and are expected to take appropriate and fair corrective action in\nresponse to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or\nreject comments, commits, code, wiki edits, issues, and other contributions\nthat are not aligned to this Code of Conduct, or to ban temporarily or\npermanently any contributor for other behaviors that they deem inappropriate,\nthreatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces\nwhen an individual is representing the project or its community. Examples of\nrepresenting a project or community include using an official project e-mail\naddress, posting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event. Representation of a project may be\nfurther defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported by contacting the project team at chenkaidev@gmail.com. All\ncomplaints will be reviewed and investigated and will result in a response that\nis deemed necessary and appropriate to the circumstances. The project team is\nobligated to maintain confidentiality with regard to the reporter of an incident.\nFurther details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good\nfaith may face temporary or permanent repercussions as determined by other\nmembers of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,\navailable at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html\n\nFor answers to common questions about this code of conduct, see\nhttps://www.contributor-covenant.org/faq\n\n[homepage]: https://www.contributor-covenant.org\n"
  },
  {
    "path": ".github/CONTRIBUTING.md",
    "content": "We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](https://github.com/open-mmlab/mmcv/blob/master/CONTRIBUTING.md) in MMCV for more details about the contributing guideline.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "blank_issues_enabled: false\n\ncontact_links:\n  - name: Common Issues\n    url: https://mmdetection.readthedocs.io/en/latest/faq.html\n    about: Check if your issue already has solutions\n  - name: MMDetection Documentation\n    url: https://mmdetection.readthedocs.io/en/latest/\n    about: Check if your question is answered in docs\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/error-report.md",
    "content": "---\nname: Error report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: ''\n---\n\nThanks for your error report and we appreciate it a lot.\n\n**Checklist**\n\n1. I have searched related issues but cannot get the expected help.\n2. I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.\n3. The bug has not been fixed in the latest version.\n\n**Describe the bug**\nA clear and concise description of what the bug is.\n\n**Reproduction**\n\n1. What command or script did you run?\n\n```none\nA placeholder for the command.\n```\n\n2. Did you make any modifications on the code or config? Did you understand what you have modified?\n3. What dataset did you use?\n\n**Environment**\n\n1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here.\n2. You may add addition that may be helpful for locating the problem, such as\n   - How you installed PyTorch \\[e.g., pip, conda, source\\]\n   - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)\n\n**Error traceback**\nIf applicable, paste the error trackback here.\n\n```none\nA placeholder for trackback.\n```\n\n**Bug fix**\nIf you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: ''\nassignees: ''\n---\n\n**Describe the feature**\n\n**Motivation**\nA clear and concise description of the motivation of the feature.\nEx1. It is inconvenient when \\[....\\].\nEx2. There is a recent paper \\[....\\], which is very helpful for \\[....\\].\n\n**Related resources**\nIf there is an official code release or third-party implementations, please also provide the information here, which would be very helpful.\n\n**Additional context**\nAdd any other context or screenshots about the feature request here.\nIf you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/general_questions.md",
    "content": "---\nname: General questions\nabout: Ask general questions to get help\ntitle: ''\nlabels: ''\nassignees: ''\n---\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/reimplementation_questions.md",
    "content": "---\nname: Reimplementation Questions\nabout: Ask about questions during model reimplementation\ntitle: ''\nlabels: reimplementation\nassignees: ''\n---\n\n**Notice**\n\nThere are several common situations in the reimplementation issues as below\n\n1. Reimplement a model in the model zoo using the provided configs\n2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets)\n3. Reimplement a custom model but all the components are implemented in MMDetection\n4. Reimplement a custom model with new modules implemented by yourself\n\nThere are several things to do for different cases as below.\n\n- For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue.\n- For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write.\n- One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you.\n\n**Checklist**\n\n1. I have searched related issues but cannot get the expected help.\n2. The issue has not been fixed in the latest version.\n\n**Describe the issue**\n\nA clear and concise description of what the problem you meet and what have you done.\n\n**Reproduction**\n\n1. What command or script did you run?\n\n```none\nA placeholder for the command.\n```\n\n2. What config dir you run?\n\n```none\nA placeholder for the config.\n```\n\n3. Did you make any modifications on the code or config? Did you understand what you have modified?\n4. What dataset did you use?\n\n**Environment**\n\n1. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here.\n2. You may add addition that may be helpful for locating the problem, such as\n   1. How you installed PyTorch \\[e.g., pip, conda, source\\]\n   2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)\n\n**Results**\n\nIf applicable, paste the related results here, e.g., what you expect and what you get.\n\n```none\nA placeholder for results comparison\n```\n\n**Issue fix**\n\nIf you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.\n\n## Motivation\n\nPlease describe the motivation of this PR and the goal you want to achieve through this PR.\n\n## Modification\n\nPlease briefly describe what modification is made in this PR.\n\n## BC-breaking (Optional)\n\nDoes the modification introduce changes that break the backward-compatibility of the downstream repos?\nIf so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.\n\n## Use cases (Optional)\n\nIf this PR introduces a new feature, it is better to list some use cases here, and update the documentation.\n\n## Checklist\n\n1. Pre-commit or other linting tools are used to fix the potential lint issues.\n2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.\n3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMCls.\n4. The documentation has been modified accordingly, like docstring or example tutorials.\n"
  },
  {
    "path": ".github/workflows/deploy.yml",
    "content": "name: deploy\n\non: push\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build-n-publish:\n    runs-on: ubuntu-latest\n    if: startsWith(github.event.ref, 'refs/tags')\n    steps:\n      - uses: actions/checkout@v2\n      - name: Set up Python 3.7\n        uses: actions/setup-python@v2\n        with:\n          python-version: 3.7\n      - name: Install torch\n        run: pip install torch\n      - name: Install wheel\n        run: pip install wheel\n      - name: Build MMDetection\n        run: python setup.py sdist bdist_wheel\n      - name: Publish distribution to PyPI\n        run: |\n          pip install twine\n          twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/en/_build/\ndocs/zh_cn/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\ndata/\ndata\n.vscode\n.idea\n.DS_Store\n\n# custom\n*.pkl\n*.pkl.json\n*.log.json\ndocs/modelzoo_statistics.md\nmmdet/.mim\nwork_dirs/\n\n# Pytorch\n*.pth\n*.py~\n*.sh~\n"
  },
  {
    "path": ".owners.yml",
    "content": "assign:\n  strategy:\n    # random\n    daily-shift-based\n  scedule:\n    '*/1 * * * *'\n  assignees:\n    - Czm369\n    - hhaAndroid\n    - jbwang1997\n    - RangiLyu\n    - BIGWangYuDong\n    - chhluo\n    - ZwwWayne\n"
  },
  {
    "path": ".pre-commit-config-zh-cn.yaml",
    "content": "exclude: ^tests/data/\nrepos:\n  - repo: https://gitee.com/openmmlab/mirrors-flake8\n    rev: 5.0.4\n    hooks:\n      - id: flake8\n  - repo: https://gitee.com/openmmlab/mirrors-isort\n    rev: 5.11.5\n    hooks:\n      - id: isort\n  - repo: https://gitee.com/openmmlab/mirrors-yapf\n    rev: v0.32.0\n    hooks:\n      - id: yapf\n  - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks\n    rev: v4.3.0\n    hooks:\n      - id: trailing-whitespace\n      - id: check-yaml\n      - id: end-of-file-fixer\n      - id: requirements-txt-fixer\n      - id: double-quote-string-fixer\n      - id: check-merge-conflict\n      - id: fix-encoding-pragma\n        args: [\"--remove\"]\n      - id: mixed-line-ending\n        args: [\"--fix=lf\"]\n  - repo: https://gitee.com/openmmlab/mirrors-mdformat\n    rev: 0.7.9\n    hooks:\n      - id: mdformat\n        args: [\"--number\"]\n        additional_dependencies:\n          - mdformat-openmmlab\n          - mdformat_frontmatter\n          - linkify-it-py\n  - repo: https://gitee.com/openmmlab/mirrors-codespell\n    rev: v2.2.1\n    hooks:\n      - id: codespell\n  - repo: https://gitee.com/openmmlab/mirrors-docformatter\n    rev: v1.3.1\n    hooks:\n      - id: docformatter\n        args: [\"--in-place\", \"--wrap-descriptions\", \"79\"]\n  - repo: https://gitee.com/openmmlab/mirrors-pyupgrade\n    rev: v3.0.0\n    hooks:\n      - id: pyupgrade\n        args: [\"--py36-plus\"]\n  - repo: https://gitee.com/open-mmlab/pre-commit-hooks\n    rev: v0.2.0\n    hooks:\n      - id: check-algo-readme\n      - id: check-copyright\n        args: [\"mmdet\"]\n#  - repo: https://gitee.com/openmmlab/mirrors-mypy\n#    rev: v0.812\n#    hooks:\n#      - id: mypy\n#        exclude: \"docs\"\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n  - repo: https://github.com/PyCQA/flake8\n    rev: 5.0.4\n    hooks:\n      - id: flake8\n  - repo: https://github.com/PyCQA/isort\n    rev: 5.11.5\n    hooks:\n      - id: isort\n  - repo: https://github.com/pre-commit/mirrors-yapf\n    rev: v0.32.0\n    hooks:\n      - id: yapf\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.3.0\n    hooks:\n      - id: trailing-whitespace\n      - id: check-yaml\n      - id: end-of-file-fixer\n      - id: requirements-txt-fixer\n      - id: double-quote-string-fixer\n      - id: check-merge-conflict\n      - id: fix-encoding-pragma\n        args: [\"--remove\"]\n      - id: mixed-line-ending\n        args: [\"--fix=lf\"]\n  - repo: https://github.com/codespell-project/codespell\n    rev: v2.2.1\n    hooks:\n      - id: codespell\n  - repo: https://github.com/executablebooks/mdformat\n    rev: 0.7.9\n    hooks:\n      - id: mdformat\n        args: [\"--number\"]\n        additional_dependencies:\n          - mdformat-openmmlab\n          - mdformat_frontmatter\n          - linkify-it-py\n  - repo: https://github.com/myint/docformatter\n    rev: v1.3.1\n    hooks:\n      - id: docformatter\n        args: [\"--in-place\", \"--wrap-descriptions\", \"79\"]\n  - repo: https://github.com/open-mmlab/pre-commit-hooks\n    rev: v0.2.0  # Use the ref you want to point at\n    hooks:\n      - id: check-algo-readme\n      - id: check-copyright\n        args: [\"mmdet\"]  # replace the dir_to_check with your expected directory to check\n"
  },
  {
    "path": ".readthedocs.yml",
    "content": "version: 2\n\nformats: all\n\npython:\n  version: 3.7\n  install:\n    - requirements: requirements/docs.txt\n    - requirements: requirements/readthedocs.txt\n"
  },
  {
    "path": "CITATION.cff",
    "content": "cff-version: 1.2.0\nmessage: \"If you use this software, please cite it as below.\"\nauthors:\n  - name: \"MMDetection Contributors\"\ntitle: \"OpenMMLab Detection Toolbox and Benchmark\"\ndate-released: 2018-08-22\nurl: \"https://github.com/open-mmlab/mmdetection\"\nlicense: Apache-2.0\n"
  },
  {
    "path": "LICENSE",
    "content": "## creative commons\n\n# Attribution-NonCommercial 4.0 International\n\nCreative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.\n\n### Using Creative Commons Public Licenses\n\nCreative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.\n\n* __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors).\n\n* __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees).\n\n## Creative Commons Attribution-NonCommercial 4.0 International Public License\n\nBy exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License (\"Public License\"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.\n\n### Section 1 – Definitions.\n\na. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.\n\nb. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.\n\nc. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.\n\nd. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.\n\ne. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.\n\nf. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License.\n\ng. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.\n\nh. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License.\n\ni. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.\n\nj. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.\n\nk. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.\n\nl. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.\n\n### Section 2 – Scope.\n\na. ___License grant.___\n\n   1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:\n\n       A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and\n\n       B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only.\n\n   2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.\n\n   3. __Term.__ The term of this Public License is specified in Section 6(a).\n\n   4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.\n\n   5. __Downstream recipients.__\n\n        A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.\n\n        B. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.\n\n   6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).\n\nb. ___Other rights.___\n\n   1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.\n\n   2. Patent and trademark rights are not licensed under this Public License.\n\n   3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.\n\n### Section 3 – License Conditions.\n\nYour exercise of the Licensed Rights is expressly made subject to the following conditions.\n\na. ___Attribution.___\n\n   1. If You Share the Licensed Material (including in modified form), You must:\n\n       A. retain the following if it is supplied by the Licensor with the Licensed Material:\n\n         i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);\n\n         ii. a copyright notice;\n\n         iii. a notice that refers to this Public License;\n\n         iv. a notice that refers to the disclaimer of warranties;\n\n         v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;\n\n       B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and\n\n       C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.\n\n   2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.\n\n   3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.\n\n   4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License.\n\n### Section 4 – Sui Generis Database Rights.\n\nWhere the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:\n\na. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only;\n\nb. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and\n\nc. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.\n\nFor the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.\n\n### Section 5 – Disclaimer of Warranties and Limitation of Liability.\n\na. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__\n\nb. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__\n\nc. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.\n\n### Section 6 – Term and Termination.\n\na. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.\n\nb. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:\n\n   1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or\n\n   2. upon express reinstatement by the Licensor.\n\n   For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.\n\nc. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.\n\nd. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.\n\n### Section 7 – Other Terms and Conditions.\n\na. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.\n\nb. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.\n\n### Section 8 – Interpretation.\n\na. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.\n\nb. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.\n\nc. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.\n\nd. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.\n\n> Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.\n>\n> Creative Commons may be contacted at creativecommons.org\n\nCopyright (c) 2022 MCG-NKU\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include requirements/*.txt\ninclude mmdet/VERSION\ninclude mmdet/.mim/model-index.yml\ninclude mmdet/.mim/demo/*/*\nrecursive-include mmdet/.mim/configs *.py *.yml\nrecursive-include mmdet/.mim/tools *.sh *.py\n"
  },
  {
    "path": "README.md",
    "content": "# <p align=center>  🌟 `CrossKD: Cross-Head Knowledge Distillation for Dense Object Detection` 🌟 </p>\n\n![Python 3.8](https://img.shields.io/badge/python-3.8-g) ![pytorch 1.12.1](https://img.shields.io/badge/pytorch-1.12.0-blue.svg)\n\nThis repository contains the official implementation of the following paper:\n\n> **CrossKD: Cross-Head Knowledge Distillation for Dense Object Detection**<br>\n> [Jiabao Wang](https://scholar.google.co.uk/citations?hl=en&user=S9ErhhEAAAAJ)<sup>\\*</sup>, [Yuming Chen](https://github.com/FishAndWasabi/)<sup>\\*</sup>, [Zhaohui Zheng](https://scholar.google.co.uk/citations?hl=en&user=0X71NDYAAAAJ)，[Xiang Li](http://implus.github.io/), [Ming-Ming Cheng](https://mmcheng.net/cmm), [Qibin Hou](https://houqb.github.io/)<sup>\\*</sup>  <br>\n> (\\* denotes equal contribution) <br>\n> VCIP, School of Computer Science, Nankai University <br>\n\n[[Arxiv Paper](https://arxiv.org/abs/2306.11369)]\n\n## Introduction\n\nKnowledge Distillation (KD) has been validated as an effective model compression technique for learning compact object detectors. Existing state-of-the-art KD methods for object detection are mostly based on feature imitation, which is generally observed to be better than prediction mimicking. In this paper, we show that the inconsistency of the optimization objectives between the ground-truth signals and distillation targets is the key reason for the inefficiency of prediction mimicking. To alleviate this issue, we present a simple yet effective distillation scheme, termed CrossKD, which delivers the intermediate features of the student's detection head to the teacher's detection head. The resulting cross-head predictions are then forced to mimic the teacher's predictions. Such a distillation manner relieves the student's head from receiving contradictory supervision signals from the ground-truth annotations and the teacher's predictions, greatly improving the student's detection performance. On MS COCO, with only prediction mimicking losses applied, our CrossKD boosts the average precision of GFL ResNet-50 with 1x training schedule from 40.2 to 43.7, outperforming all existing KD methods for object detection.\n\n![struture](assets/structure.png)\n\n## Get Started\n\n### 1. Prerequisites\n\n**Dependencies**\n\n- Ubuntu >= 20.04\n- CUDA >= 11.3\n- pytorch==1.12.1\n- torchvision=0.13.1\n- mmcv==2.0.0rc4\n- mmengine==0.7.3\n\nOur implementation based on MMDetection==3.0.0rc6. For more information about installation, please see the [official instructions](https://mmdetection.readthedocs.io/en/3.x/).\n\n**Step 0.** Create Conda Environment\n\n```shell\nconda create --name openmmlab python=3.8 -y\nconda activate openmmlab\n```\n\n**Step 1.** Install [Pytorch](https://pytorch.org)\n\n```shell\nconda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch\n```\n\n**Step 2.** Install [MMEngine](https://github.com/open-mmlab/mmengine) and [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim).\n\n```shell\npip install -U openmim\nmim install \"mmengine==0.7.3\"\nmim install \"mmcv==2.0.0rc4\"\n```\n\n**Step 3.** Install [CrossKD](https://github.com/jbwang1997/CrossKD.git).\n\n```shell\ngit clone https://github.com/jbwang1997/CrossKD\ncd CrossKD\npip install -v -e .\n# \"-v\" means verbose, or more output\n# \"-e\" means installing a project in editable mode,\n# thus any local modifications made to the code will take effect without reinstallation.\n```\n\n**Step 4.** Prepare dataset follow the [official instructions](https://mmdetection.readthedocs.io/en/3.x/user_guides/dataset_prepare.html).\n\n\n\n### 2. Training\n\n**Single GPU**\n\n```shell\npython tools/train.py configs/crosskd/${CONFIG_FILE} [optional arguments]\n```\n\n**Multi GPU**\n\n```shell\nCUDA_VISIBLE_DEVICES=x,x,x,x python tools/dist_train.sh \\\n    configs/crosskd/${CONFIG_FILE} ${GPU_NUM} [optional arguments]\n```\n\n### 3. Evaluation\n\n```shell\npython tools/test.py configs/crosskd/${CONFIG_FILE} ${CHECKPOINT_FILE}\n```\n\n## Results\n\n### 1. GFL\n\n| **Method**         | schedule | AP          | Config                                                                           | Download                                                                                                                                             |\n|:------------------:|:--------:|:-----------:|:--------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------:|\n| **GFL-Res101 (T)** | 2x+ms    | 44.9        |  [config](<configs/gfl/gfl_r101_fpn_ms-2x_coco.py>)                              |  [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) |\n| **GFL-Res50 (S)**  | 1x       | 40.2        |  [config](<configs/gfl/gfl_r50_fpn_1x_coco.py>)                                  |  [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth)                   |\n| **CrossKD**        | 1x       | 43.7 (+3.5) |  [config](<configs/crosskd/crosskd_r50_gflv1_r101-2x-ms_fpn_1x_coco.py>)         |  [model](https://drive.google.com/file/d/1S7fyDkFSAauJry0ZGS-ZW-P3CJb7RlsO/view?usp=drive_link)                                                      |\n| **CrossKD+PKD**    | 1x       | 43.9 (+3.7) |  [config](<configs/crosskd+pkd/crosskd+pkd_r50_gflv1_r101-2x-ms_fpn_1x_coco.py>) |  [model](https://drive.google.com/file/d/1LJZ27al2omdXb3cUty-RX37pMLp8L-4B/view?usp=drive_link)                                                      |\n\n\n\n\n### 2. RetinaNet\n\n| **Method**               | schedule | AP          | Config                                                                         | Download                                                                                                                                        |\n|:------------------------:|:--------:|:-----------:|:------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------:|\n| **RetineNet-Res101 (T)** | 2x       | 38.9        |  [config](<configs/retinanet/retinanet_r101_fpn_2x_coco.py>)                   |  [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth) |\n| **RetineNet-Res50 (S)**  | 2x       | 37.4        |  [config](<configs/retinanet/retinanet_r50_fpn_2x_coco.py>)                    |  [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth)   |\n| **CrossKD**              | 2x       | 39.7 (+2.3) |  [config](<configs/crosskd/crosskd_r50_retinanet_r101_fpn_2x_coco.py>)         |  [model](https://drive.google.com/file/d/1fjwtuoKd4a_b5CHf6X0tKDmSNlwzYfWb/view?usp=drive_link)                                                 |\n| **CrossKD+PKD**          | 2x       | 39.8 (+2.4) |  [config](<configs/crosskd+pkd/crosskd+pkd_r50_retinanet_r101_fpn_2x_coco.py>) |  [model](https://drive.google.com/file/d/1Ha9r5DrzaZ_9tz8x9PVxOkGaKAApIBGd/view?usp=drive_link)                                                 |\n\n\n### 3. FCOS\n\n| **Method**          | schedule | AP          | Config                                                                                           | Download                                                                                                                                                                            |\n|:-------------------:|:--------:|:-----------:|:------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|\n| **FCOS-Res101 (T)** | 2x+ms    | 40.8        |  [config](<configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py>)                      |  [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) |\n| **FCOS-Res50 (S)**  | 2x+ms    | 38.5        |  [config](<configs/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py>)                       |  [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth)   |\n| **CrossKD**         | 2x+ms    | 41.1 (+2.6) |  [config](<configs/crosskd/crosskd_r50_fcos_r101-2x-ms_caffe_fpn_gn-head_2x_ms_coco.py>)         |  [model](https://drive.google.com/file/d/1ll5vOGFMEfOsNCkgbPuqh0uMNFnfICbE/view?usp=drive_link)                                                                                     |\n| **CrossKD+PKD**     | 2x+ms    | 41.3 (+2.8) |  [config](<configs/crosskd+pkd/crosskd+pkd_r50_fcos_r101-2x-ms_caffe_fpn_gn-head_2x_ms_coco.py>) |  [model](https://drive.google.com/file/d/1r-UzxAOYOfPJFIV5e7Rd3P3uC9gXP09v/view?usp=drive_link)                                                                                     |\n\n\n### 4. ATSS\n\n| **Method**          | schedule | AP         | Config                                                                    | Download                                                                                                                       |\n|:-------------------:|:--------:|:----------:|:-------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------:|\n| **ATSS-Res101 (T)** | 1x       | 41.5       |  [config](<configs/atss/atss_r101_fpn_1x_coco.py>)                        |  [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth)    |\n| **ATSS-Res50 (S)**  | 1x       | 39.4       |  [config](<configs/atss/atss_r50_fpn_1x_coco.py>)                         |  [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) |\n| **CrossKD**         | 1x       | 41.8(+2.4) |  [config](<configs/crosskd/crosskd_r50_atss_r101_fpn_1x_coco.py>)         |  [model](https://drive.google.com/file/d/1qyxOMaxQrwJ20tEgIwU8pi31O8A1hsEG/view?usp=drive_link)                                |\n| **CrossKD+PKD**     | 1x       | 41.8(+2.4) |  [config](<configs/crosskd+pkd/crosskd+pkd_r50_atss_r101_fpn_1x_coco.py>) |  [model](https://drive.google.com/file/d/1LkuKau1Na843ZPSNz77DqV8v8111b2_y/view?usp=drive_link)                                |\n\n\n\n## Heterogeneous Results\n\n### 1. Swin-Tiny\n\n| **Method**          | schedule | AP         | Config                                                                    | Download                                                                                                                       |\n|:-------------------:|:--------:|:----------:|:-------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------:|\n| **RetinaNet-SwinT (T)** | 1x       | 37.3       |  [config](<configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py>)                        |  [model](https://drive.google.com/file/d/1W2KGR77XfQ5SRomgIyxCjqUNGJichcgh/view?usp=drive_link)    |\n| **RetinaNet-Res50 (S)**  | 1x       | 36.5       |  [config](<configs/atss/atss_r50_fpn_1x_coco.py>)                         |  [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) |\n| **CrossKD**     | 1x       | 38.0 (+1.5) |  [config](<configs/crosskd+pkd/crosskd+pkd_r50_atss_r101_fpn_1x_coco.py>) |  [model](https://drive.google.com/file/d/17rbkcXgqKfVUweRgzK7NtqFcLC-PohKX/view?usp=drive_link)                                |\n\n\n\n## Citation\n\nIf you find our repo useful for your research, please cite us:\n\n```\n@misc{wang2023crosskd,\n      title={CrossKD: Cross-Head Knowledge Distillation for Dense Object Detection}, \n      author={Jiabao Wang and Yuming Chen and Zhaohui Zheng and Xiang Li and \n              Ming-Ming Cheng and Qibin Hou},\n      year={2023},\n      eprint={2306.11369},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV}\n}\n```\n\nThis project is based on the open source codebase [MMDetection](https://github.com/open-mmlab/mmdetection).\n```\n@article{mmdetection,\n  title   = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},\n  author  = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and\n             Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and\n             Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and\n             Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and\n             Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong\n             and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},\n  journal= {arXiv preprint arXiv:1906.07155},\n  year={2019}\n}\n```\n\n## License\n\nLicensed under a [Creative Commons Attribution-NonCommercial 4.0 International](https://creativecommons.org/licenses/by-nc/4.0/) for Non-commercial use only. Any commercial use should get formal permission first.\n\n## Contact\n\nFor technical questions, please contact `jbwang@mail.nankai.edu.cn` and `chenyuming@mail.nankai.edu.cn`.\n\n## Acknowledgement\n\nThis repo is modified from open source object detection codebase [MMDetection](https://github.com/open-mmlab/mmdetection).\n"
  },
  {
    "path": "configs/_base_/datasets/cityscapes_detection.py",
    "content": "# dataset settings\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomResize',\n        scale=[(2048, 800), (2048, 1024)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='Resize', scale=(2048, 1024), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type='RepeatDataset',\n        times=8,\n        dataset=dict(\n            type=dataset_type,\n            data_root=data_root,\n            ann_file='annotations/instancesonly_filtered_gtFine_train.json',\n            data_prefix=dict(img='leftImg8bit/train/'),\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=train_pipeline)))\n\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instancesonly_filtered_gtFine_val.json',\n        data_prefix=dict(img='leftImg8bit/val/'),\n        test_mode=True,\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=test_pipeline))\n\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',\n    metric='bbox')\n\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/cityscapes_instance.py",
    "content": "# dataset settings\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='RandomResize',\n        scale=[(2048, 800), (2048, 1024)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='Resize', scale=(2048, 1024), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type='RepeatDataset',\n        times=8,\n        dataset=dict(\n            type=dataset_type,\n            data_root=data_root,\n            ann_file='annotations/instancesonly_filtered_gtFine_train.json',\n            data_prefix=dict(img='leftImg8bit/train/'),\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=train_pipeline)))\n\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instancesonly_filtered_gtFine_val.json',\n        data_prefix=dict(img='leftImg8bit/val/'),\n        test_mode=True,\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=test_pipeline))\n\ntest_dataloader = val_dataloader\n\nval_evaluator = [\n    dict(\n        type='CocoMetric',\n        ann_file=data_root +\n        'annotations/instancesonly_filtered_gtFine_val.json',\n        metric=['bbox', 'segm']),\n    dict(\n        type='CityScapesMetric',\n        ann_file=data_root +\n        'annotations/instancesonly_filtered_gtFine_val.json',\n        seg_prefix=data_root + '/gtFine/val',\n        outfile_prefix='./work_dirs/cityscapes_metric/instance')\n]\n\ntest_evaluator = val_evaluator\n\n# inference on test dataset and\n# format the output results for submission.\n# test_dataloader = dict(\n#     batch_size=1,\n#     num_workers=2,\n#     persistent_workers=True,\n#     drop_last=False,\n#     sampler=dict(type='DefaultSampler', shuffle=False),\n#     dataset=dict(\n#         type=dataset_type,\n#         data_root=data_root,\n#         ann_file='annotations/instancesonly_filtered_gtFine_test.json',\n#         data_prefix=dict(img='leftImg8bit/test/'),\n#         test_mode=True,\n#         filter_cfg=dict(filter_empty_gt=True, min_size=32),\n#         pipeline=test_pipeline))\n# test_evaluator = dict(\n#         type='CityScapesMetric',\n#         format_only=True,\n#         outfile_prefix='./work_dirs/cityscapes_metric/test')\n"
  },
  {
    "path": "configs/_base_/datasets/coco_detection.py",
    "content": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_train2017.json',\n        data_prefix=dict(img='train2017/'),\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=train_pipeline))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_val2017.json',\n        data_prefix=dict(img='val2017/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/instances_val2017.json',\n    metric='bbox',\n    format_only=False)\ntest_evaluator = val_evaluator\n\n# inference on test dataset and\n# format the output results for submission.\n# test_dataloader = dict(\n#     batch_size=1,\n#     num_workers=2,\n#     persistent_workers=True,\n#     drop_last=False,\n#     sampler=dict(type='DefaultSampler', shuffle=False),\n#     dataset=dict(\n#         type=dataset_type,\n#         data_root=data_root,\n#         ann_file=data_root + 'annotations/image_info_test-dev2017.json',\n#         data_prefix=dict(img='test2017/'),\n#         test_mode=True,\n#         pipeline=test_pipeline))\n# test_evaluator = dict(\n#     type='CocoMetric',\n#     metric='bbox',\n#     format_only=True,\n#     ann_file=data_root + 'annotations/image_info_test-dev2017.json',\n#     outfile_prefix='./work_dirs/coco_detection/test')\n"
  },
  {
    "path": "configs/_base_/datasets/coco_instance.py",
    "content": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_train2017.json',\n        data_prefix=dict(img='train2017/'),\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=train_pipeline))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_val2017.json',\n        data_prefix=dict(img='val2017/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/instances_val2017.json',\n    metric=['bbox', 'segm'],\n    format_only=False)\ntest_evaluator = val_evaluator\n\n# inference on test dataset and\n# format the output results for submission.\n# test_dataloader = dict(\n#     batch_size=1,\n#     num_workers=2,\n#     persistent_workers=True,\n#     drop_last=False,\n#     sampler=dict(type='DefaultSampler', shuffle=False),\n#     dataset=dict(\n#         type=dataset_type,\n#         data_root=data_root,\n#         ann_file=data_root + 'annotations/image_info_test-dev2017.json',\n#         data_prefix=dict(img='test2017/'),\n#         test_mode=True,\n#         pipeline=test_pipeline))\n# test_evaluator = dict(\n#     type='CocoMetric',\n#     metric=['bbox', 'segm'],\n#     format_only=True,\n#     ann_file=data_root + 'annotations/image_info_test-dev2017.json',\n#     outfile_prefix='./work_dirs/coco_instance/test')\n"
  },
  {
    "path": "configs/_base_/datasets/coco_instance_semantic.py",
    "content": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(\n        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(\n        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_train2017.json',\n        data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=train_pipeline))\n\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_val2017.json',\n        data_prefix=dict(img='val2017/'),\n        test_mode=True,\n        pipeline=test_pipeline))\n\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/instances_val2017.json',\n    metric=['bbox', 'segm'],\n    format_only=False)\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/coco_panoptic.py",
    "content": "# dataset settings\ndataset_type = 'CocoPanopticDataset'\ndata_root = 'data/coco/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/panoptic_train2017.json',\n        data_prefix=dict(\n            img='train2017/', seg='annotations/panoptic_train2017/'),\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=train_pipeline))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/panoptic_val2017.json',\n        data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoPanopticMetric',\n    ann_file=data_root + 'annotations/panoptic_val2017.json',\n    seg_prefix=data_root + 'annotations/panoptic_val2017/',\n    file_client_args=file_client_args,\n)\ntest_evaluator = val_evaluator\n\n# inference on test dataset and\n# format the output results for submission.\n# test_dataloader = dict(\n#     batch_size=1,\n#     num_workers=1,\n#     persistent_workers=True,\n#     drop_last=False,\n#     sampler=dict(type='DefaultSampler', shuffle=False),\n#     dataset=dict(\n#         type=dataset_type,\n#         data_root=data_root,\n#         ann_file='annotations/panoptic_image_info_test-dev2017.json',\n#         data_prefix=dict(img='test2017/'),\n#         test_mode=True,\n#         pipeline=test_pipeline))\n# test_evaluator = dict(\n#     type='CocoPanopticMetric',\n#     format_only=True,\n#     ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',\n#     outfile_prefix='./work_dirs/coco_panoptic/test')\n"
  },
  {
    "path": "configs/_base_/datasets/deepfashion.py",
    "content": "# dataset settings\ndataset_type = 'DeepFashionDataset'\ndata_root = 'data/DeepFashion/In-shop/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(type='Resize', scale=(750, 1101), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(750, 1101), keep_ratio=True),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type='RepeatDataset',\n        times=2,\n        dataset=dict(\n            type=dataset_type,\n            data_root=data_root,\n            ann_file='Anno/segmentation/DeepFashion_segmentation_train.json',\n            data_prefix=dict(img='Img/'),\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=train_pipeline)))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='Anno/segmentation/DeepFashion_segmentation_query.json',\n        data_prefix=dict(img='Img/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='Anno/segmentation/DeepFashion_segmentation_gallery.json',\n        data_prefix=dict(img='Img/'),\n        test_mode=True,\n        pipeline=test_pipeline))\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root +\n    'Anno/segmentation/DeepFashion_segmentation_query.json',\n    metric=['bbox', 'segm'],\n    format_only=False)\ntest_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root +\n    'Anno/segmentation/DeepFashion_segmentation_gallery.json',\n    metric=['bbox', 'segm'],\n    format_only=False)\n"
  },
  {
    "path": "configs/_base_/datasets/lvis_v0.5_instance.py",
    "content": "# dataset settings\ndataset_type = 'LVISV05Dataset'\ndata_root = 'data/lvis_v0.5/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='RandomChoiceResize',\n        scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),\n                (1333, 768), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type='ClassBalancedDataset',\n        oversample_thr=1e-3,\n        dataset=dict(\n            type=dataset_type,\n            data_root=data_root,\n            ann_file='annotations/lvis_v0.5_train.json',\n            data_prefix=dict(img='train2017/'),\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=train_pipeline)))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/lvis_v0.5_val.json',\n        data_prefix=dict(img='val2017/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='LVISMetric',\n    ann_file=data_root + 'annotations/lvis_v0.5_val.json',\n    metric=['bbox', 'segm'])\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/lvis_v1_instance.py",
    "content": "# dataset settings\n_base_ = 'lvis_v0.5_instance.py'\ndataset_type = 'LVISV1Dataset'\ndata_root = 'data/lvis_v1/'\n\ntrain_dataloader = dict(\n    dataset=dict(\n        dataset=dict(\n            type=dataset_type,\n            data_root=data_root,\n            ann_file='annotations/lvis_v1_train.json',\n            data_prefix=dict(img=''))))\nval_dataloader = dict(\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/lvis_v1_val.json',\n        data_prefix=dict(img='')))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(ann_file=data_root + 'annotations/lvis_v1_val.json')\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/objects365v1_detection.py",
    "content": "# dataset settings\ndataset_type = 'Objects365V1Dataset'\ndata_root = 'data/Objects365/Obj365_v1/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/objects365_train.json',\n        data_prefix=dict(img='train/'),\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=train_pipeline))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/objects365_val.json',\n        data_prefix=dict(img='val/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/objects365_val.json',\n    metric='bbox',\n    sort_categories=True,\n    format_only=False)\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/objects365v2_detection.py",
    "content": "# dataset settings\ndataset_type = 'Objects365V2Dataset'\ndata_root = 'data/Objects365/Obj365_v2/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/zhiyuan_objv2_train.json',\n        data_prefix=dict(img='train/'),\n        filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        pipeline=train_pipeline))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/zhiyuan_objv2_val.json',\n        data_prefix=dict(img='val/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',\n    metric='bbox',\n    format_only=False)\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/openimages_detection.py",
    "content": "# dataset settings\ndataset_type = 'OpenImagesDataset'\ndata_root = 'data/OpenImages/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\n\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='Resize', scale=(1024, 800), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1024, 800), keep_ratio=True),\n    # avoid bboxes being resized\n    dict(type='LoadAnnotations', with_bbox=True),\n    # TODO: find a better way to collect image_level_labels\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor', 'instances', 'image_level_labels'))\n]\n\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=0,  # workers_per_gpu > 0 may occur out of memory\n    persistent_workers=False,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/oidv6-train-annotations-bbox.csv',\n        data_prefix=dict(img='OpenImages/train/'),\n        label_file='annotations/class-descriptions-boxable.csv',\n        hierarchy_file='annotations/bbox_labels_600_hierarchy.json',\n        meta_file='annotations/train-image-metas.pkl',\n        pipeline=train_pipeline))\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=0,\n    persistent_workers=False,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/validation-annotations-bbox.csv',\n        data_prefix=dict(img='OpenImages/validation/'),\n        label_file='annotations/class-descriptions-boxable.csv',\n        hierarchy_file='annotations/bbox_labels_600_hierarchy.json',\n        meta_file='annotations/validation-image-metas.pkl',\n        image_level_ann_file='annotations/validation-'\n        'annotations-human-imagelabels-boxable.csv',\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='OpenImagesMetric',\n    iou_thrs=0.5,\n    ioa_thrs=0.5,\n    use_group_of=True,\n    get_supercategory=True)\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/semi_coco_detection.py",
    "content": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ncolor_space = [\n    [dict(type='ColorTransform')],\n    [dict(type='AutoContrast')],\n    [dict(type='Equalize')],\n    [dict(type='Sharpness')],\n    [dict(type='Posterize')],\n    [dict(type='Solarize')],\n    [dict(type='Color')],\n    [dict(type='Contrast')],\n    [dict(type='Brightness')],\n]\n\ngeometric = [\n    [dict(type='Rotate')],\n    [dict(type='ShearX')],\n    [dict(type='ShearY')],\n    [dict(type='TranslateX')],\n    [dict(type='TranslateY')],\n]\n\nscale = [(1333, 400), (1333, 1200)]\n\nbranch_field = ['sup', 'unsup_teacher', 'unsup_student']\n# pipeline used to augment labeled data,\n# which will be sent to student model for supervised training.\nsup_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='RandomResize', scale=scale, keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='RandAugment', aug_space=color_space, aug_num=1),\n    dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),\n    dict(\n        type='MultiBranch',\n        branch_field=branch_field,\n        sup=dict(type='PackDetInputs'))\n]\n\n# pipeline used to augment unlabeled data weakly,\n# which will be sent to teacher model for predicting pseudo instances.\nweak_pipeline = [\n    dict(type='RandomResize', scale=scale, keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor', 'flip', 'flip_direction',\n                   'homography_matrix')),\n]\n\n# pipeline used to augment unlabeled data strongly,\n# which will be sent to student model for unsupervised training.\nstrong_pipeline = [\n    dict(type='RandomResize', scale=scale, keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(\n        type='RandomOrder',\n        transforms=[\n            dict(type='RandAugment', aug_space=color_space, aug_num=1),\n            dict(type='RandAugment', aug_space=geometric, aug_num=1),\n        ]),\n    dict(type='RandomErasing', n_patches=(1, 5), ratio=(0, 0.2)),\n    dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor', 'flip', 'flip_direction',\n                   'homography_matrix')),\n]\n\n# pipeline used to augment unlabeled data into different views\nunsup_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadEmptyAnnotations'),\n    dict(\n        type='MultiBranch',\n        branch_field=branch_field,\n        unsup_teacher=weak_pipeline,\n        unsup_student=strong_pipeline,\n    )\n]\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\nbatch_size = 5\nnum_workers = 5\n# There are two common semi-supervised learning settings on the coco dataset：\n# (1) Divide the train2017 into labeled and unlabeled datasets\n# by a fixed percentage, such as 1%, 2%, 5% and 10%.\n# The format of labeled_ann_file and unlabeled_ann_file are\n# instances_train2017.{fold}@{percent}.json, and\n# instances_train2017.{fold}@{percent}-unlabeled.json\n# `fold` is used for cross-validation, and `percent` represents\n# the proportion of labeled data in the train2017.\n# (2) Choose the train2017 as the labeled dataset\n# and unlabeled2017 as the unlabeled dataset.\n# The labeled_ann_file and unlabeled_ann_file are\n# instances_train2017.json and image_info_unlabeled2017.json\n# We use this configuration by default.\nlabeled_dataset = dict(\n    type=dataset_type,\n    data_root=data_root,\n    ann_file='annotations/instances_train2017.json',\n    data_prefix=dict(img='train2017/'),\n    filter_cfg=dict(filter_empty_gt=True, min_size=32),\n    pipeline=sup_pipeline)\n\nunlabeled_dataset = dict(\n    type=dataset_type,\n    data_root=data_root,\n    ann_file='annotations/instances_unlabeled2017.json',\n    data_prefix=dict(img='unlabeled2017/'),\n    filter_cfg=dict(filter_empty_gt=False),\n    pipeline=unsup_pipeline)\n\ntrain_dataloader = dict(\n    batch_size=batch_size,\n    num_workers=num_workers,\n    persistent_workers=True,\n    sampler=dict(\n        type='GroupMultiSourceSampler',\n        batch_size=batch_size,\n        source_ratio=[1, 4]),\n    dataset=dict(\n        type='ConcatDataset', datasets=[labeled_dataset, unlabeled_dataset]))\n\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='annotations/instances_val2017.json',\n        data_prefix=dict(img='val2017/'),\n        test_mode=True,\n        pipeline=test_pipeline))\n\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(\n    type='CocoMetric',\n    ann_file=data_root + 'annotations/instances_val2017.json',\n    metric='bbox',\n    format_only=False)\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/voc0712.py",
    "content": "# dataset settings\ndataset_type = 'VOCDataset'\ndata_root = 'data/VOCdevkit/'\n\n# file_client_args = dict(\n#     backend='petrel',\n#     path_mapping=dict({\n#         './data/': 's3://openmmlab/datasets/detection/',\n#         'data/': 's3://openmmlab/datasets/detection/'\n#     }))\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='Resize', scale=(1000, 600), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=file_client_args),\n    dict(type='Resize', scale=(1000, 600), keep_ratio=True),\n    # avoid bboxes being resized\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    batch_size=2,\n    num_workers=2,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    batch_sampler=dict(type='AspectRatioBatchSampler'),\n    dataset=dict(\n        type='RepeatDataset',\n        times=3,\n        dataset=dict(\n            type='ConcatDataset',\n            # VOCDataset will add different `dataset_type` in dataset.metainfo,\n            # which will get error if using ConcatDataset. Adding\n            # `ignore_keys` can avoid this error.\n            ignore_keys=['dataset_type'],\n            datasets=[\n                dict(\n                    type=dataset_type,\n                    data_root=data_root,\n                    ann_file='VOC2007/ImageSets/Main/trainval.txt',\n                    data_prefix=dict(sub_data_root='VOC2007/'),\n                    filter_cfg=dict(\n                        filter_empty_gt=True, min_size=32, bbox_min_size=32),\n                    pipeline=train_pipeline),\n                dict(\n                    type=dataset_type,\n                    data_root=data_root,\n                    ann_file='VOC2012/ImageSets/Main/trainval.txt',\n                    data_prefix=dict(sub_data_root='VOC2012/'),\n                    filter_cfg=dict(\n                        filter_empty_gt=True, min_size=32, bbox_min_size=32),\n                    pipeline=train_pipeline)\n            ])))\n\nval_dataloader = dict(\n    batch_size=1,\n    num_workers=2,\n    persistent_workers=True,\n    drop_last=False,\n    sampler=dict(type='DefaultSampler', shuffle=False),\n    dataset=dict(\n        type=dataset_type,\n        data_root=data_root,\n        ann_file='VOC2007/ImageSets/Main/test.txt',\n        data_prefix=dict(sub_data_root='VOC2007/'),\n        test_mode=True,\n        pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\n# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL\n# VOC2012 defaults to use 'area'.\nval_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')\ntest_evaluator = val_evaluator\n"
  },
  {
    "path": "configs/_base_/datasets/wider_face.py",
    "content": "# dataset settings\ndataset_type = 'WIDERFaceDataset'\ndata_root = 'data/WIDERFace/'\nimg_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', to_float32=True),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PhotoMetricDistortion',\n        brightness_delta=32,\n        contrast_range=(0.5, 1.5),\n        saturation_range=(0.5, 1.5),\n        hue_delta=18),\n    dict(\n        type='Expand',\n        mean=img_norm_cfg['mean'],\n        to_rgb=img_norm_cfg['to_rgb'],\n        ratio_range=(1, 4)),\n    dict(\n        type='MinIoURandomCrop',\n        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n        min_crop_size=0.3),\n    dict(type='Resize', img_scale=(300, 300), keep_ratio=False),\n    dict(type='Normalize', **img_norm_cfg),\n    dict(type='RandomFlip', flip_ratio=0.5),\n    dict(type='DefaultFormatBundle'),\n    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(300, 300),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=False),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img']),\n        ])\n]\ndata = dict(\n    samples_per_gpu=60,\n    workers_per_gpu=2,\n    train=dict(\n        type='RepeatDataset',\n        times=2,\n        dataset=dict(\n            type=dataset_type,\n            ann_file=data_root + 'train.txt',\n            img_prefix=data_root + 'WIDER_train/',\n            min_size=17,\n            pipeline=train_pipeline)),\n    val=dict(\n        type=dataset_type,\n        ann_file=data_root + 'val.txt',\n        img_prefix=data_root + 'WIDER_val/',\n        pipeline=test_pipeline),\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'val.txt',\n        img_prefix=data_root + 'WIDER_val/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "configs/_base_/default_runtime.py",
    "content": "default_scope = 'mmdet'\n\ndefault_hooks = dict(\n    timer=dict(type='IterTimerHook'),\n    logger=dict(type='LoggerHook', interval=50),\n    param_scheduler=dict(type='ParamSchedulerHook'),\n    checkpoint=dict(type='CheckpointHook', interval=1),\n    sampler_seed=dict(type='DistSamplerSeedHook'),\n    visualization=dict(type='DetVisualizationHook'))\n\nenv_cfg = dict(\n    cudnn_benchmark=False,\n    mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n    dist_cfg=dict(backend='nccl'),\n)\n\nvis_backends = [dict(type='LocalVisBackend')]\nvisualizer = dict(\n    type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')\nlog_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)\n\nlog_level = 'INFO'\nload_from = None\nresume = False\n"
  },
  {
    "path": "configs/_base_/models/cascade-mask-rcnn_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='CascadeRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_mask=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=5),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=256,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[8],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n    roi_head=dict(\n        type='CascadeRoIHead',\n        num_stages=3,\n        stage_loss_weights=[1, 0.5, 0.25],\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        bbox_head=[\n            dict(\n                type='Shared2FCBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=80,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.1, 0.1, 0.2, 0.2]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=False,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                               loss_weight=1.0)),\n            dict(\n                type='Shared2FCBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=80,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.05, 0.05, 0.1, 0.1]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=False,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                               loss_weight=1.0)),\n            dict(\n                type='Shared2FCBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=80,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.033, 0.033, 0.067, 0.067]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=False,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n        ],\n        mask_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        mask_head=dict(\n            type='FCNMaskHead',\n            num_convs=4,\n            in_channels=256,\n            conv_out_channels=256,\n            num_classes=80,\n            loss_mask=dict(\n                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=0,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=2000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=[\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.5,\n                    min_pos_iou=0.5,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                mask_size=28,\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.6,\n                    neg_iou_thr=0.6,\n                    min_pos_iou=0.6,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                mask_size=28,\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.7,\n                    neg_iou_thr=0.7,\n                    min_pos_iou=0.7,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                mask_size=28,\n                pos_weight=-1,\n                debug=False)\n        ]),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=1000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100,\n            mask_thr_binary=0.5)))\n"
  },
  {
    "path": "configs/_base_/models/cascade-rcnn_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='CascadeRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=5),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=256,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[8],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),\n    roi_head=dict(\n        type='CascadeRoIHead',\n        num_stages=3,\n        stage_loss_weights=[1, 0.5, 0.25],\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        bbox_head=[\n            dict(\n                type='Shared2FCBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=80,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.1, 0.1, 0.2, 0.2]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=False,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                               loss_weight=1.0)),\n            dict(\n                type='Shared2FCBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=80,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.05, 0.05, 0.1, 0.1]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=False,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                               loss_weight=1.0)),\n            dict(\n                type='Shared2FCBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=80,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.033, 0.033, 0.067, 0.067]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=False,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n        ]),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=0,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=2000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=[\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.5,\n                    min_pos_iou=0.5,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.6,\n                    neg_iou_thr=0.6,\n                    min_pos_iou=0.6,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.7,\n                    neg_iou_thr=0.7,\n                    min_pos_iou=0.7,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                pos_weight=-1,\n                debug=False)\n        ]),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=1000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)))\n"
  },
  {
    "path": "configs/_base_/models/fast-rcnn_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='FastRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=5),\n    roi_head=dict(\n        type='StandardRoIHead',\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        bbox_head=dict(\n            type='Shared2FCBBoxHead',\n            in_channels=256,\n            fc_out_channels=1024,\n            roi_feat_size=7,\n            num_classes=80,\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[0., 0., 0., 0.],\n                target_stds=[0.1, 0.1, 0.2, 0.2]),\n            reg_class_agnostic=False,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rcnn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.5,\n                match_low_quality=False,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=512,\n                pos_fraction=0.25,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=True),\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)))\n"
  },
  {
    "path": "configs/_base_/models/faster-rcnn_r50-caffe-c4.py",
    "content": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n    type='FasterRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=3,\n        strides=(1, 2, 2),\n        dilations=(1, 1, 1),\n        out_indices=(2, ),\n        frozen_stages=1,\n        norm_cfg=norm_cfg,\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=1024,\n        feat_channels=1024,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[2, 4, 8, 16, 32],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[16]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    roi_head=dict(\n        type='StandardRoIHead',\n        shared_head=dict(\n            type='ResLayer',\n            depth=50,\n            stage=3,\n            stride=2,\n            dilation=1,\n            style='caffe',\n            norm_cfg=norm_cfg,\n            norm_eval=True,\n            init_cfg=dict(\n                type='Pretrained',\n                checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),\n            out_channels=1024,\n            featmap_strides=[16]),\n        bbox_head=dict(\n            type='BBoxHead',\n            with_avg_pool=True,\n            roi_feat_size=7,\n            in_channels=2048,\n            num_classes=80,\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[0., 0., 0., 0.],\n                target_stds=[0.1, 0.1, 0.2, 0.2]),\n            reg_class_agnostic=False,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=-1,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=12000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.5,\n                match_low_quality=False,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=512,\n                pos_fraction=0.25,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=True),\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=6000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)))\n"
  },
  {
    "path": "configs/_base_/models/faster-rcnn_r50-caffe-dc5.py",
    "content": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n    type='FasterRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        strides=(1, 2, 2, 1),\n        dilations=(1, 1, 1, 2),\n        out_indices=(3, ),\n        frozen_stages=1,\n        norm_cfg=norm_cfg,\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=2048,\n        feat_channels=2048,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[2, 4, 8, 16, 32],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[16]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    roi_head=dict(\n        type='StandardRoIHead',\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n            out_channels=2048,\n            featmap_strides=[16]),\n        bbox_head=dict(\n            type='Shared2FCBBoxHead',\n            in_channels=2048,\n            fc_out_channels=1024,\n            roi_feat_size=7,\n            num_classes=80,\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[0., 0., 0., 0.],\n                target_stds=[0.1, 0.1, 0.2, 0.2]),\n            reg_class_agnostic=False,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=0,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=12000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.5,\n                match_low_quality=False,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=512,\n                pos_fraction=0.25,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=True),\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms=dict(type='nms', iou_threshold=0.7),\n            nms_pre=6000,\n            max_per_img=1000,\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)))\n"
  },
  {
    "path": "configs/_base_/models/faster-rcnn_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='FasterRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=5),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=256,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[8],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    roi_head=dict(\n        type='StandardRoIHead',\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        bbox_head=dict(\n            type='Shared2FCBBoxHead',\n            in_channels=256,\n            fc_out_channels=1024,\n            roi_feat_size=7,\n            num_classes=80,\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[0., 0., 0., 0.],\n                target_stds=[0.1, 0.1, 0.2, 0.2]),\n            reg_class_agnostic=False,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=-1,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=2000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.5,\n                match_low_quality=False,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=512,\n                pos_fraction=0.25,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=True),\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=1000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)\n        # soft-nms is also supported for rcnn testing\n        # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)\n    ))\n"
  },
  {
    "path": "configs/_base_/models/mask-rcnn_r50-caffe-c4.py",
    "content": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n    type='MaskRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_mask=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=3,\n        strides=(1, 2, 2),\n        dilations=(1, 1, 1),\n        out_indices=(2, ),\n        frozen_stages=1,\n        norm_cfg=norm_cfg,\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=1024,\n        feat_channels=1024,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[2, 4, 8, 16, 32],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[16]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    roi_head=dict(\n        type='StandardRoIHead',\n        shared_head=dict(\n            type='ResLayer',\n            depth=50,\n            stage=3,\n            stride=2,\n            dilation=1,\n            style='caffe',\n            norm_cfg=norm_cfg,\n            norm_eval=True),\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),\n            out_channels=1024,\n            featmap_strides=[16]),\n        bbox_head=dict(\n            type='BBoxHead',\n            with_avg_pool=True,\n            roi_feat_size=7,\n            in_channels=2048,\n            num_classes=80,\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[0., 0., 0., 0.],\n                target_stds=[0.1, 0.1, 0.2, 0.2]),\n            reg_class_agnostic=False,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n            loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n        mask_roi_extractor=None,\n        mask_head=dict(\n            type='FCNMaskHead',\n            num_convs=0,\n            in_channels=2048,\n            conv_out_channels=256,\n            num_classes=80,\n            loss_mask=dict(\n                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=0,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=12000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.5,\n                match_low_quality=False,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=512,\n                pos_fraction=0.25,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=True),\n            mask_size=14,\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=6000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            max_per_img=1000,\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100,\n            mask_thr_binary=0.5)))\n"
  },
  {
    "path": "configs/_base_/models/mask-rcnn_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='MaskRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_mask=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=5),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=256,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[8],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    roi_head=dict(\n        type='StandardRoIHead',\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        bbox_head=dict(\n            type='Shared2FCBBoxHead',\n            in_channels=256,\n            fc_out_channels=1024,\n            roi_feat_size=7,\n            num_classes=80,\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[0., 0., 0., 0.],\n                target_stds=[0.1, 0.1, 0.2, 0.2]),\n            reg_class_agnostic=False,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n            loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n        mask_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        mask_head=dict(\n            type='FCNMaskHead',\n            num_convs=4,\n            in_channels=256,\n            conv_out_channels=256,\n            num_classes=80,\n            loss_mask=dict(\n                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=-1,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=2000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.5,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=512,\n                pos_fraction=0.25,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=True),\n            mask_size=28,\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=1000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100,\n            mask_thr_binary=0.5)))\n"
  },
  {
    "path": "configs/_base_/models/retinanet_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='RetinaNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_input',\n        num_outs=5),\n    bbox_head=dict(\n        type='RetinaHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='MaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.4,\n            min_pos_iou=0,\n            ignore_iof_thr=-1),\n        sampler=dict(\n            type='PseudoSampler'),  # Focal loss should use PseudoSampler\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100))\n"
  },
  {
    "path": "configs/_base_/models/rpn_r50-caffe-c4.py",
    "content": "# model settings\nmodel = dict(\n    type='RPN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=3,\n        strides=(1, 2, 2),\n        dilations=(1, 1, 1),\n        out_indices=(2, ),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=False),\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    neck=None,\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=1024,\n        feat_channels=1024,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[2, 4, 8, 16, 32],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[16]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=-1,\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=12000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0)))\n"
  },
  {
    "path": "configs/_base_/models/rpn_r50_fpn.py",
    "content": "# model settings\nmodel = dict(\n    type='RPN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=5),\n    rpn_head=dict(\n        type='RPNHead',\n        in_channels=256,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            scales=[8],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=-1,\n            pos_weight=-1,\n            debug=False)),\n    test_cfg=dict(\n        rpn=dict(\n            nms_pre=2000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0)))\n"
  },
  {
    "path": "configs/_base_/models/ssd300.py",
    "content": "# model settings\ninput_size = 300\nmodel = dict(\n    type='SingleStageDetector',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[1, 1, 1],\n        bgr_to_rgb=True,\n        pad_size_divisor=1),\n    backbone=dict(\n        type='SSDVGG',\n        depth=16,\n        with_last_pool=False,\n        ceil_mode=True,\n        out_indices=(3, 4),\n        out_feature_indices=(22, 34),\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')),\n    neck=dict(\n        type='SSDNeck',\n        in_channels=(512, 1024),\n        out_channels=(512, 1024, 512, 256, 256, 256),\n        level_strides=(2, 2, 1, 1),\n        level_paddings=(1, 1, 0, 0),\n        l2_norm_scale=20),\n    bbox_head=dict(\n        type='SSDHead',\n        in_channels=(512, 1024, 512, 256, 256, 256),\n        num_classes=80,\n        anchor_generator=dict(\n            type='SSDAnchorGenerator',\n            scale_major=False,\n            input_size=input_size,\n            basesize_ratio_range=(0.15, 0.9),\n            strides=[8, 16, 32, 64, 100, 300],\n            ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[0.1, 0.1, 0.2, 0.2])),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='MaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.5,\n            min_pos_iou=0.,\n            ignore_iof_thr=-1,\n            gt_max_assign_all=False),\n        sampler=dict(type='PseudoSampler'),\n        smoothl1_beta=1.,\n        allowed_border=-1,\n        pos_weight=-1,\n        neg_pos_ratio=3,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        nms=dict(type='nms', iou_threshold=0.45),\n        min_bbox_size=0,\n        score_thr=0.02,\n        max_per_img=200))\ncudnn_benchmark = True\n"
  },
  {
    "path": "configs/_base_/schedules/schedule_1x.py",
    "content": "# training schedule for 1x\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)\nval_cfg = dict(type='ValLoop')\ntest_cfg = dict(type='TestLoop')\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=12,\n        by_epoch=True,\n        milestones=[8, 11],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))\n\n# Default setting for scaling LR automatically\n#   - `enable` means enable scaling LR automatically\n#       or not by default.\n#   - `base_batch_size` = (8 GPUs) x (2 samples per GPU).\nauto_scale_lr = dict(enable=False, base_batch_size=16)\n"
  },
  {
    "path": "configs/_base_/schedules/schedule_20e.py",
    "content": "# training schedule for 20e\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1)\nval_cfg = dict(type='ValLoop')\ntest_cfg = dict(type='TestLoop')\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=20,\n        by_epoch=True,\n        milestones=[16, 19],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))\n\n# Default setting for scaling LR automatically\n#   - `enable` means enable scaling LR automatically\n#       or not by default.\n#   - `base_batch_size` = (8 GPUs) x (2 samples per GPU).\nauto_scale_lr = dict(enable=False, base_batch_size=16)\n"
  },
  {
    "path": "configs/_base_/schedules/schedule_2x.py",
    "content": "# training schedule for 2x\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)\nval_cfg = dict(type='ValLoop')\ntest_cfg = dict(type='TestLoop')\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=24,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))\n\n# Default setting for scaling LR automatically\n#   - `enable` means enable scaling LR automatically\n#       or not by default.\n#   - `base_batch_size` = (8 GPUs) x (2 samples per GPU).\nauto_scale_lr = dict(enable=False, base_batch_size=16)\n"
  },
  {
    "path": "configs/atss/README.md",
    "content": "# ATSS\n\n> [Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection](https://arxiv.org/abs/1912.02424)\n\n<!-- [ALGORITHM] -->\n\n## Abstract\n\nObject detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/40661020/143870776-c81168f5-e8b2-44ee-978b-509e4372c5c9.png\"/>\n</div>\n\n## Results and Models\n\n| Backbone |  Style  | Lr schd | Mem (GB) | Inf time (fps) | box AP |                Config                |                                                                                                                            Download                                                                                                                             |\n| :------: | :-----: | :-----: | :------: | :------------: | :----: | :----------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|   R-50   | pytorch |   1x    |   3.7    |      19.7      |  39.4  | [config](./atss_r50_fpn_1x_coco.py)  | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209_102539.log.json) |\n|  R-101   | pytorch |   1x    |   5.6    |      12.3      |  41.5  | [config](./atss_r101_fpn_1x_coco.py) |   [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.log.json)   |\n\n## Citation\n\n```latex\n@article{zhang2019bridging,\n  title   =  {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection},\n  author  =  {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.},\n  journal =  {arXiv preprint arXiv:1912.02424},\n  year    =  {2019}\n}\n```\n"
  },
  {
    "path": "configs/atss/atss_r101_fpn_1x_coco.py",
    "content": "_base_ = './atss_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/atss/atss_r101_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/atss/atss_r18_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        depth=18,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(in_channels=[64, 128, 256, 512]))\n"
  },
  {
    "path": "configs/atss/atss_r50_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\n# model settings\nmodel = dict(\n    type='ATSS',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='ATSSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[0.1, 0.1, 0.2, 0.2]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    # training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/atss/atss_r50_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = '../common/lsj-200e_coco-detection.py'\n\nimage_size = (1024, 1024)\nbatch_augments = [dict(type='BatchFixedSizePad', size=image_size)]\n\nmodel = dict(\n    type='ATSS',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32,\n        batch_augments=batch_augments),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='ATSSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[0.1, 0.1, 0.2, 0.2]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    # training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\ntrain_dataloader = dict(batch_size=8, num_workers=4)\n\n# Enable automatic-mixed-precision training with AmpOptimWrapper.\noptim_wrapper = dict(\n    type='AmpOptimWrapper',\n    optimizer=dict(\n        type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004))\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (8 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64)\n"
  },
  {
    "path": "configs/atss/metafile.yml",
    "content": "Collections:\n  - Name: ATSS\n    Metadata:\n      Training Data: COCO\n      Training Techniques:\n        - SGD with Momentum\n        - Weight Decay\n      Training Resources: 8x V100 GPUs\n      Architecture:\n        - ATSS\n        - FPN\n        - ResNet\n    Paper:\n      URL: https://arxiv.org/abs/1912.02424\n      Title: 'Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection'\n    README: configs/atss/README.md\n    Code:\n      URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/atss.py#L6\n      Version: v2.0.0\n\nModels:\n  - Name: atss_r50_fpn_1x_coco\n    In Collection: ATSS\n    Config: configs/atss/atss_r50_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.7\n      inference time (ms/im):\n        - value: 50.76\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 39.4\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth\n\n  - Name: atss_r101_fpn_1x_coco\n    In Collection: ATSS\n    Config: configs/atss/atss_r101_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 5.6\n      inference time (ms/im):\n        - value: 81.3\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 41.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth\n"
  },
  {
    "path": "configs/centernet/README.md",
    "content": "# CenterNet\n\n> [Objects as Points](https://arxiv.org/abs/1904.07850)\n\n<!-- [ALGORITHM] -->\n\n## Abstract\n\nDetection identifies objects as axis-aligned boxes in an image. Most successful object detectors enumerate a nearly exhaustive list of potential object locations and classify each. This is wasteful, inefficient, and requires additional post-processing. In this paper, we take a different approach. We model an object as a single point --- the center point of its bounding box. Our detector uses keypoint estimation to find center points and regresses to all other object properties, such as size, 3D location, orientation, and even pose. Our center point based approach, CenterNet, is end-to-end differentiable, simpler, faster, and more accurate than corresponding bounding box based detectors. CenterNet achieves the best speed-accuracy trade-off on the MS COCO dataset, with 28.1% AP at 142 FPS, 37.4% AP at 52 FPS, and 45.1% AP with multi-scale testing at 1.4 FPS. We use the same approach to estimate 3D bounding box in the KITTI benchmark and human pose on the COCO keypoint dataset. Our method performs competitively with sophisticated multi-stage methods and runs in real-time.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/40661020/143873810-85ffa6e7-915b-46a4-9b8f-709e5d7700bb.png\"/>\n</div>\n\n## Results and Models\n\n| Backbone  | DCN | Mem (GB) | Box AP | Flip box AP |                           Config                           |                                                                                                                                                                 Download                                                                                                                                                                 |\n| :-------: | :-: | :------: | :----: | :---------: | :--------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n| ResNet-18 |  N  |   3.45   |  25.9  |    27.3     |    [config](./centernet_r18_8xb16-crop512-140e_coco.py)    |             [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630.log.json)             |\n| ResNet-18 |  Y  |   3.47   |  29.5  |    30.9     | [config](./centernet_r18-dcnv2_8xb16-crop512-140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131.log.json) |\n\nNote:\n\n- Flip box AP setting is single-scale and `flip=True`.\n- Due to complex data enhancement, we find that the performance is unstable and may fluctuate by about 0.4 mAP. mAP 29.4 ~ 29.8 is acceptable in ResNet-18-DCNv2.\n- Compared to the source code, we refer to [CenterNet-Better](https://github.com/FateScript/CenterNet-better), and make the following changes\n  - fix wrong image mean and variance in image normalization to be compatible with the pre-trained backbone.\n  - Use SGD rather than ADAM optimizer and add warmup and grad clip.\n  - Use DistributedDataParallel as other models in MMDetection rather than using DataParallel.\n\n## CenterNet Update\n\n| Backbone  | Style | Lr schd | MS train | Mem (GB) | Box AP |                          Config                          |         Download         |\n| :-------: | :---: | :-----: | :------: | :------: | :----: | :------------------------------------------------------: | :----------------------: |\n| ResNet-50 | caffe |   1x    |   True   |   3.3    |  40.2  | [config](./centernet-update_r50-caffe_fpn_ms-1x_coco.py) | [model](<>) \\| [log](<>) |\n\nCenterNet Update from the paper of [Probabilistic two-stage detection](https://arxiv.org/abs/2103.07461). The author has updated CenterNet to greatly improve performance and convergence speed.\nThe [Details](https://github.com/xingyizhou/CenterNet2/blob/master/docs/MODEL_ZOO.md) are as follows:\n\n- Using top-left-right-bottom box encoding and GIoU Loss\n- Adding regression loss to the center 3x3 region\n- Adding more positive pixels for the heatmap loss whose regression loss is small and is within the center3x3 region\n- Using RetinaNet-style optimizer (SGD), learning rate rule (0.01 for each batch size 16), and schedule (12 epochs)\n- Added FPN neck layers, and assigns objects to FPN levels based on a fixed size range.\n- Using standard NMS instead of max pooling\n\n## Citation\n\n```latex\n@article{zhou2019objects,\n  title={Objects as Points},\n  author={Zhou, Xingyi and Wang, Dequan and Kr{\\\"a}henb{\\\"u}hl, Philipp},\n  booktitle={arXiv preprint arXiv:1904.07850},\n  year={2019}\n}\n```\n"
  },
  {
    "path": "configs/centernet/centernet-update_r101_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/centernet/centernet-update_r18_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        depth=18,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(in_channels=[64, 128, 256, 512]))\n"
  },
  {
    "path": "configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\nmodel = dict(\n    type='CenterNet',\n    # use caffe img_norm\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=False),\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5,\n        # There is a chance to get 40.3 after switching init_cfg,\n        # otherwise it is about 39.9~40.1\n        init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),\n        relu_before_extra_convs=True),\n    bbox_head=dict(\n        type='CenterNetUpdateHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        hm_min_radius=4,\n        hm_min_overlap=0.8,\n        more_pos_thresh=0.2,\n        more_pos_topk=9,\n        soft_weight_on_reg=False,\n        loss_cls=dict(\n            type='GaussianFocalLoss',\n            pos_weight=0.25,\n            neg_weight=0.75,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n    ),\n    train_cfg=None,\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\n# single-scale training is about 39.3\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomChoiceResize',\n        scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),\n                (1333, 768), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\n\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR',\n        start_factor=0.00025,\n        by_epoch=False,\n        begin=0,\n        end=4000),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=12,\n        by_epoch=True,\n        milestones=[8, 11],\n        gamma=0.1)\n]\n\noptim_wrapper = dict(\n    optimizer=dict(lr=0.01),\n    # Experiments show that there is no need to turn on clip_grad.\n    paramwise_cfg=dict(norm_decay_mult=0.))\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (2 samples per GPU)\nauto_scale_lr = dict(base_batch_size=16)\n"
  },
  {
    "path": "configs/centernet/centernet-update_r50_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = '../common/lsj-200e_coco-detection.py'\n\nimage_size = (1024, 1024)\nbatch_augments = [dict(type='BatchFixedSizePad', size=image_size)]\n\nmodel = dict(\n    type='CenterNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32,\n        batch_augments=batch_augments),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5,\n        init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),\n        relu_before_extra_convs=True),\n    bbox_head=dict(\n        type='CenterNetUpdateHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        loss_cls=dict(\n            type='GaussianFocalLoss',\n            pos_weight=0.25,\n            neg_weight=0.75,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n    ),\n    train_cfg=None,\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\ntrain_dataloader = dict(batch_size=8, num_workers=4)\n# Enable automatic-mixed-precision training with AmpOptimWrapper.\noptim_wrapper = dict(\n    type='AmpOptimWrapper',\n    optimizer=dict(\n        type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),\n    paramwise_cfg=dict(norm_decay_mult=0.))\n\nparam_scheduler = [\n    dict(\n        type='LinearLR',\n        start_factor=0.00025,\n        by_epoch=False,\n        begin=0,\n        end=4000),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=25,\n        by_epoch=True,\n        milestones=[22, 24],\n        gamma=0.1)\n]\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (8 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64)\n"
  },
  {
    "path": "configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',\n    './centernet_tta.py'\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\n# model settings\nmodel = dict(\n    type='CenterNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True),\n    backbone=dict(\n        type='ResNet',\n        depth=18,\n        norm_eval=False,\n        norm_cfg=dict(type='BN'),\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(\n        type='CTResNetNeck',\n        in_channels=512,\n        num_deconv_filters=(256, 128, 64),\n        num_deconv_kernels=(4, 4, 4),\n        use_dcn=True),\n    bbox_head=dict(\n        type='CenterNetHead',\n        num_classes=80,\n        in_channels=64,\n        feat_channels=64,\n        loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),\n        loss_wh=dict(type='L1Loss', loss_weight=0.1),\n        loss_offset=dict(type='L1Loss', loss_weight=1.0)),\n    train_cfg=None,\n    test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))\n\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PhotoMetricDistortion',\n        brightness_delta=32,\n        contrast_range=(0.5, 1.5),\n        saturation_range=(0.5, 1.5),\n        hue_delta=18),\n    dict(\n        type='RandomCenterCropPad',\n        # The cropped images are padded into squares during training,\n        # but may be less than crop_size.\n        crop_size=(512, 512),\n        ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),\n        mean=[0, 0, 0],\n        std=[1, 1, 1],\n        to_rgb=True,\n        test_pad_mode=None),\n    # Make sure the output is always crop_size.\n    dict(type='Resize', scale=(512, 512), keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        to_float32=True,\n        file_client_args={{_base_.file_client_args}}),\n    # don't need Resize\n    dict(\n        type='RandomCenterCropPad',\n        ratios=None,\n        border=None,\n        mean=[0, 0, 0],\n        std=[1, 1, 1],\n        to_rgb=True,\n        test_mode=True,\n        test_pad_mode=['logical_or', 31],\n        test_pad_add_pix=1),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))\n]\n\n# Use RepeatDataset to speed up training\ntrain_dataloader = dict(\n    batch_size=16,\n    num_workers=4,\n    persistent_workers=True,\n    sampler=dict(type='DefaultSampler', shuffle=True),\n    dataset=dict(\n        _delete_=True,\n        type='RepeatDataset',\n        times=5,\n        dataset=dict(\n            type=dataset_type,\n            data_root=data_root,\n            ann_file='annotations/instances_train2017.json',\n            data_prefix=dict(img='train2017/'),\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=train_pipeline)))\n\nval_dataloader = dict(dataset=dict(pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\n# optimizer\n# Based on the default settings of modern detectors, the SGD effect is better\n# than the Adam in the source code, so we use SGD default settings and\n# if you use adam+lr5e-4, the map is 29.1.\noptim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))\n\nmax_epochs = 28\n# learning policy\n# Based on the default settings of modern detectors, we added warmup settings.\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,\n        end=1000),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[18, 24],  # the real step is [18*5, 24*5]\n        gamma=0.1)\n]\ntrain_cfg = dict(max_epochs=max_epochs)  # the real epoch is 28*5=140\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (16 samples per GPU)\nauto_scale_lr = dict(base_batch_size=128)\n"
  },
  {
    "path": "configs/centernet/centernet_r18_8xb16-crop512-140e_coco.py",
    "content": "_base_ = './centernet_r18-dcnv2_8xb16-crop512-140e_coco.py'\n\nmodel = dict(neck=dict(use_dcn=False))\n"
  },
  {
    "path": "configs/centernet/centernet_tta.py",
    "content": "# This is different from the TTA of official CenterNet.\n\ntta_model = dict(\n    type='DetTTAModel',\n    tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))\n\ntta_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        to_float32=True,\n        file_client_args=dict(backend='disk')),\n    dict(\n        type='TestTimeAug',\n        transforms=[\n            [\n                # ``RandomFlip`` must be placed before ``RandomCenterCropPad``,\n                # otherwise bounding box coordinates after flipping cannot be\n                # recovered correctly.\n                dict(type='RandomFlip', prob=1.),\n                dict(type='RandomFlip', prob=0.)\n            ],\n            [\n                dict(\n                    type='RandomCenterCropPad',\n                    ratios=None,\n                    border=None,\n                    mean=[0, 0, 0],\n                    std=[1, 1, 1],\n                    to_rgb=True,\n                    test_mode=True,\n                    test_pad_mode=['logical_or', 31],\n                    test_pad_add_pix=1),\n            ],\n            [dict(type='LoadAnnotations', with_bbox=True)],\n            [\n                dict(\n                    type='PackDetInputs',\n                    meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                               'flip', 'flip_direction', 'border'))\n            ]\n        ])\n]\n"
  },
  {
    "path": "configs/centernet/metafile.yml",
    "content": "Collections:\n  - Name: CenterNet\n    Metadata:\n      Training Data: COCO\n      Training Techniques:\n        - SGD with Momentum\n        - Weight Decay\n      Training Resources: 8x TITANXP GPUs\n      Architecture:\n        - ResNet\n    Paper:\n      URL: https://arxiv.org/abs/1904.07850\n      Title: 'Objects as Points'\n    README: configs/centernet/README.md\n    Code:\n      URL: https://github.com/open-mmlab/mmdetection/blob/v2.13.0/mmdet/models/detectors/centernet.py#L10\n      Version: v2.13.0\n\nModels:\n  - Name: centernet_r18-dcnv2_8xb16-crop512-140e_coco\n    In Collection: CenterNet\n    Config: configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py\n    Metadata:\n      Batch Size: 128\n      Training Memory (GB): 3.47\n      Epochs: 140\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 29.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth\n\n  - Name: centernet_r18_8xb16-crop512-140e_coco\n    In Collection: CenterNet\n    Config: configs/centernet/centernet_r18_8xb16-crop512-140e_coco.py\n    Metadata:\n      Batch Size: 128\n      Training Memory (GB): 3.45\n      Epochs: 140\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 25.9\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth\n"
  },
  {
    "path": "configs/crosskd/crosskd_r18_gflv1_r50_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth'  # noqa\nmodel = dict(\n    type='CrossKDGFL',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/gfl/gfl_r50_fpn_1x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=18,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(\n        type='FPN',\n        in_channels=[64, 128, 256, 512],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='GFLHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        loss_cls=dict(\n            type='QualityFocalLoss',\n            use_sigmoid=True,\n            beta=2.0,\n            loss_weight=1.0),\n        loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n        reg_max=16,\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(\n            type='KnowledgeDistillationKLDivLoss',\n            class_reduction='sum',\n            T=1,\n            loss_weight=4.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\n"
  },
  {
    "path": "configs/crosskd/crosskd_r18_retinanet_r50_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'  # noqa\nmodel = dict(\n    type='CrossKDRetinaNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=18,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(\n        type='FPN',\n        in_channels=[64, 128, 256, 512],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='RetinaHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(type='GIoULoss', loss_weight=1.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='MaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.4,\n            min_pos_iou=0,\n            ignore_iof_thr=-1),\n        sampler=dict(\n            type='PseudoSampler'),  # Focal loss should use PseudoSampler\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100))\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\n"
  },
  {
    "path": "configs/crosskd/crosskd_r50_atss_r101_fpn_1x_coco.py",
    "content": "\n_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', \n    '../_base_/default_runtime.py'\n]\n\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth'\n\ndata_preprocessor = dict(\n    type='DetDataPreprocessor',\n    mean=[123.675, 116.28, 103.53],\n    std=[58.395, 57.12, 57.375],\n    bgr_to_rgb=True,\n    pad_size_divisor=32\n    )\n\nmodel = dict(\n    type='CrossKDATSS',\n    data_preprocessor=data_preprocessor,\n    teacher_config='configs/atss/atss_r101_fpn_1x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='ATSSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[0.0, 0.0, 0.0, 0.0],\n            target_stds=[0.1, 0.1, 0.2, 0.2]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(type='GIoULoss', loss_weight=1.0),\n        loss_center_kd=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        reused_teacher_head_idx=3),\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100)\n    )\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)\n    )\ntrain_dataloader = dict(batch_size=8, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)"
  },
  {
    "path": "configs/crosskd/crosskd_r50_fcos_r101-2x-ms_caffe_fpn_gn-head_2x_ms_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', \n    '../_base_/default_runtime.py'\n]\n\ndata_preprocessor = dict(type='DetDataPreprocessor',\n                         mean=[102.9801, 115.9465, 122.7717],\n                         std=[1.0, 1.0, 1.0],\n                         bgr_to_rgb=False,\n                         pad_size_divisor=32)\n\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth'\nmodel = dict(\n    type='CrossKDFCOS',\n    data_preprocessor=data_preprocessor,\n    teacher_config='configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=False),\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron/resnet50_caffe')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5,\n        relu_before_extra_convs=True),\n    bbox_head=dict(\n        type='FCOSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=0.4),\n        loss_reg_kd=dict(type='IoULoss', loss_weight=0.75),\n        reused_teacher_head_idx=2),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100))\n\n\n# dataset settings\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomChoiceResize',\n        scales=[(1333, 640), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline),\n                        batch_size=2, \n                        num_workers=4)\n\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(lr=0.01),\n    paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),\n    clip_grad=dict(max_norm=35, norm_type=2))\n\n# training schedule for 2x\nmax_epochs = 24\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))\nauto_scale_lr = dict(enable=True, base_batch_size=16)\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='ConstantLR',\n        factor=0.3333333333333333,\n        by_epoch=False,\n        begin=0,\n        end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=24,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]"
  },
  {
    "path": "configs/crosskd/crosskd_r50_gflv1_r101-2x-ms_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth'  # noqa\nmodel = dict(\n    type='CrossKDGFL',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/gfl/gfl_r101_fpn_ms-2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='GFLHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        loss_cls=dict(\n            type='QualityFocalLoss',\n            use_sigmoid=True,\n            beta=2.0,\n            loss_weight=1.0),\n        loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n        reg_max=16,\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(\n            type='KnowledgeDistillationKLDivLoss',\n            class_reduction='sum',\n            T=1,\n            loss_weight=4.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\n"
  },
  {
    "path": "configs/crosskd/crosskd_r50_retinanet_r101_fpn_2x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_2x.py', \n    '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth'  # noqa\nmodel = dict(\n    type='CrossKDRetinaNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/retinanet/retinanet_r101_fpn_2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='RetinaHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(type='GIoULoss', loss_weight=1.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='MaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.4,\n            min_pos_iou=0,\n            ignore_iof_thr=-1),\n        sampler=dict(\n            type='PseudoSampler'),  # Focal loss should use PseudoSampler\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100)\n    )\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\n"
  },
  {
    "path": "configs/crosskd/crosskd_r50_retinanet_swint_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', \n    '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'retinanet_swin-t-p4-w7_fpn_1x_coco.pth'\nmodel = dict(\n    type='CrossKDRetinaNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='RetinaHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(type='GIoULoss', loss_weight=1.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='MaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.4,\n            min_pos_iou=0,\n            ignore_iof_thr=-1),\n        sampler=dict(\n            type='PseudoSampler'),  # Focal loss should use PseudoSampler\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100)\n    )\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\n"
  },
  {
    "path": "configs/crosskd+pkd/crosskd+pkd_r50_atss_r101_fpn_1x_coco.py",
    "content": "\n_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', \n    '../_base_/default_runtime.py'\n]\n\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth'\n\ndata_preprocessor = dict(\n    type='DetDataPreprocessor',\n    mean=[123.675, 116.28, 103.53],\n    std=[58.395, 57.12, 57.375],\n    bgr_to_rgb=True,\n    pad_size_divisor=32)\n\nmodel = dict(\n    type='CrossKDATSS',\n    data_preprocessor=data_preprocessor,\n    teacher_config='configs/atss/atss_r101_fpn_1x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='ATSSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[0.0, 0.0, 0.0, 0.0],\n            target_stds=[0.1, 0.1, 0.2, 0.2]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(type='GIoULoss', loss_weight=1.0),\n        loss_center_kd=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_feat_kd=dict(type='PKDLoss', loss_weight=1),\n        reused_teacher_head_idx=3),\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\ntrain_dataloader = dict(batch_size=8, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)"
  },
  {
    "path": "configs/crosskd+pkd/crosskd+pkd_r50_fcos_r101-2x-ms_caffe_fpn_gn-head_2x_ms_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', \n    '../_base_/default_runtime.py'\n]\n\ndata_preprocessor = dict(type='DetDataPreprocessor',\n                         mean=[102.9801, 115.9465, 122.7717],\n                         std=[1.0, 1.0, 1.0],\n                         bgr_to_rgb=False,\n                         pad_size_divisor=32)\n\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth'\nmodel = dict(\n    type='CrossKDFCOS',\n    data_preprocessor=data_preprocessor,\n    teacher_config='configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=False),\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron/resnet50_caffe')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5,\n        relu_before_extra_convs=True),\n    bbox_head=dict(\n        type='FCOSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=0.4),\n        loss_reg_kd=dict(type='IoULoss', loss_weight=0.75),\n        loss_feat_kd=dict(type='PKDLoss', loss_weight=2),\n        reused_teacher_head_idx=2),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100))\n\n\n# dataset settings\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomChoiceResize',\n        scales=[(1333, 640), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline),\n                        batch_size=2, \n                        num_workers=4)\n\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(lr=0.01),\n    paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),\n    clip_grad=dict(max_norm=35, norm_type=2))\n\n# training schedule for 2x\nmax_epochs = 24\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=2))\nauto_scale_lr = dict(enable=True, base_batch_size=16)\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='ConstantLR',\n        factor=0.3333333333333333,\n        by_epoch=False,\n        begin=0,\n        end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=24,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]"
  },
  {
    "path": "configs/crosskd+pkd/crosskd+pkd_r50_gflv1_r101-2x-ms_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth'  # noqa\nmodel = dict(\n    type='CrossKDGFL',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/gfl/gfl_r101_fpn_ms-2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='GFLHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        loss_cls=dict(\n            type='QualityFocalLoss',\n            use_sigmoid=True,\n            beta=2.0,\n            loss_weight=1.0),\n        loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n        reg_max=16,\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(\n            type='KnowledgeDistillationKLDivLoss',\n            class_reduction='sum',\n            T=1,\n            loss_weight=4.0),\n        loss_feat_kd=dict(type='PKDLoss', loss_weight=6.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\n"
  },
  {
    "path": "configs/crosskd+pkd/crosskd+pkd_r50_retinanet_r101_fpn_2x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_2x.py', \n    '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth'  # noqa\nmodel = dict(\n    type='CrossKDRetinaNet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/retinanet/retinanet_r101_fpn_2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='RetinaHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n    kd_cfg=dict(\n        loss_cls_kd=dict(type='KDQualityFocalLoss', beta=1, loss_weight=1.0),\n        loss_reg_kd=dict(type='GIoULoss', loss_weight=1.0),\n        loss_feat_kd=dict(type='PKDLoss', loss_weight=2.0),\n        reused_teacher_head_idx=3),\n    # model training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='MaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.4,\n            min_pos_iou=0,\n            ignore_iof_thr=-1),\n        sampler=dict(\n            type='PseudoSampler'),  # Focal loss should use PseudoSampler\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100)\n    )\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\ntrain_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)\ntrain_dataloader = dict(batch_size=2, num_workers=4)\nauto_scale_lr = dict(enable=True, base_batch_size=16)\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=12))\n"
  },
  {
    "path": "configs/fcos/README.md",
    "content": "# FCOS\n\n> [FCOS: Fully Convolutional One-Stage Object Detection](https://arxiv.org/abs/1904.01355)\n\n<!-- [ALGORITHM] -->\n\n## Abstract\n\nWe propose a fully convolutional one-stage object detector (FCOS) to solve object detection in a per-pixel prediction fashion, analogue to semantic segmentation. Almost all state-of-the-art object detectors such as RetinaNet, SSD, YOLOv3, and Faster R-CNN rely on pre-defined anchor boxes. In contrast, our proposed detector FCOS is anchor box free, as well as proposal free. By eliminating the predefined set of anchor boxes, FCOS completely avoids the complicated computation related to anchor boxes such as calculating overlapping during training. More importantly, we also avoid all hyper-parameters related to anchor boxes, which are often very sensitive to the final detection performance. With the only post-processing non-maximum suppression (NMS), FCOS with ResNeXt-64x4d-101 achieves 44.7% in AP with single-model and single-scale testing, surpassing previous one-stage detectors with the advantage of being much simpler. For the first time, we demonstrate a much simpler and flexible detection framework achieving improved detection accuracy. We hope that the proposed FCOS framework can serve as a simple and strong alternative for many other instance-level tasks.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/40661020/143882011-45b234bc-d04b-4bbe-a822-94bec057ac86.png\"/>\n</div>\n\n## Results and Models\n\n| Backbone | Style | GN  | MS train | Tricks | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP |                                         Config                                         |                                                                                                                                                                                          Download                                                                                                                                                                                          |\n| :------: | :---: | :-: | :------: | :----: | :-: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|   R-50   | caffe |  Y  |    N     |   N    |  N  |   1x    |   3.6    |      22.7      |  36.6  |                   [config](./fcos_r50-caffe_fpn_gn-head_1x_coco.py)                    |                                                        [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json)                                                         |\n|   R-50   | caffe |  Y  |    N     |   Y    |  N  |   1x    |   3.7    |       -        |  38.7  |   [config](./fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py)   |       [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json)       |\n|   R-50   | caffe |  Y  |    N     |   Y    |  Y  |   1x    |   3.8    |       -        |  42.3  | [config](./fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json) |\n|  R-101   | caffe |  Y  |    N     |   N    |  N  |   1x    |   5.5    |      17.3      |  39.1  |                   [config](./fcos_r101-caffe_fpn_gn-head-1x_coco.py)                   |                                                       [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json)                                                       |\n\n| Backbone |  Style  | GN  | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP |                            Config                             |                                                                                                                                                            Download                                                                                                                                                            |\n| :------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|   R-50   |  caffe  |  Y  |    Y     |   2x    |   2.6    |      22.9      |  38.5  | [config](./fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py)  |  [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json)   |\n|  R-101   |  caffe  |  Y  |    Y     |   2x    |   5.5    |      17.3      |  40.8  | [config](./fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) |\n|  X-101   | pytorch |  Y  |    Y     |   2x    |   10.0   |      9.7       |  42.6  | [config](./fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) |\n\n**Notes:**\n\n- The X-101 backbone is X-101-64x4d.\n- Tricks means setting `norm_on_bbox`, `centerness_on_reg`, `center_sampling` as `True`.\n- DCN means using `DCNv2` in both backbone and head.\n\n## Citation\n\n```latex\n@article{tian2019fcos,\n  title={FCOS: Fully Convolutional One-Stage Object Detection},\n  author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong},\n  journal={arXiv preprint arXiv:1904.01355},\n  year={2019}\n}\n```\n"
  },
  {
    "path": "configs/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py",
    "content": "_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# model settings\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron/resnet101_caffe')))\n"
  },
  {
    "path": "configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py",
    "content": "_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# model settings\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron/resnet101_caffe')))\n\n# dataset settings\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomChoiceResize',\n        scale=[(1333, 640), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n\n# training schedule for 2x\nmax_epochs = 24\ntrain_cfg = dict(max_epochs=max_epochs)\n\n# learning rate\nparam_scheduler = [\n    dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\n"
  },
  {
    "path": "configs/fcos/fcos_r101_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py'  # noqa\n\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/fcos/fcos_r18_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py'  # noqa\n\nmodel = dict(\n    backbone=dict(\n        depth=18,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(in_channels=[64, 128, 256, 512]))\n"
  },
  {
    "path": "configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py",
    "content": "_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# model setting\nmodel = dict(\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    bbox_head=dict(\n        norm_on_bbox=True,\n        centerness_on_reg=True,\n        dcn_on_last_conv=False,\n        center_sampling=True,\n        conv_bias=True,\n        loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),\n    # training and testing settings\n    test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR',\n        start_factor=1.0 / 3.0,\n        by_epoch=False,\n        begin=0,\n        end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=12,\n        by_epoch=True,\n        milestones=[8, 11],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(clip_grad=None)\n"
  },
  {
    "path": "configs/fcos/fcos_r50-caffe_fpn_gn-head-center_1x_coco.py",
    "content": "_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# model settings\nmodel = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))\n"
  },
  {
    "path": "configs/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\n# model settings\nmodel = dict(\n    type='FCOS',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[102.9801, 115.9465, 122.7717],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=False),\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron/resnet50_caffe')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',  # use P5\n        num_outs=5,\n        relu_before_extra_convs=True),\n    bbox_head=dict(\n        type='FCOSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    # testing settings\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.5),\n        max_per_img=100))\n\n# learning rate\nparam_scheduler = [\n    dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=12,\n        by_epoch=True,\n        milestones=[8, 11],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(lr=0.01),\n    paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),\n    clip_grad=dict(max_norm=35, norm_type=2))\n"
  },
  {
    "path": "configs/fcos/fcos_r50-caffe_fpn_gn-head_4xb4-1x_coco.py",
    "content": "# TODO: Remove this config after benchmarking all related configs\n_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# dataset settings\ntrain_dataloader = dict(batch_size=4, num_workers=4)\n"
  },
  {
    "path": "configs/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py",
    "content": "_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# dataset settings\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomChoiceResize',\n        scale=[(1333, 640), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n\n# training schedule for 2x\nmax_epochs = 24\ntrain_cfg = dict(max_epochs=max_epochs)\n\n# learning rate\nparam_scheduler = [\n    dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\n"
  },
  {
    "path": "configs/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py",
    "content": "_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# model settings\nmodel = dict(\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),\n        stage_with_dcn=(False, True, True, True),\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')),\n    bbox_head=dict(\n        norm_on_bbox=True,\n        centerness_on_reg=True,\n        dcn_on_last_conv=True,\n        center_sampling=True,\n        conv_bias=True,\n        loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),\n    # training and testing settings\n    test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR',\n        start_factor=1.0 / 3.0,\n        by_epoch=False,\n        begin=0,\n        end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=12,\n        by_epoch=True,\n        milestones=[8, 11],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(clip_grad=None)\n"
  },
  {
    "path": "configs/fcos/fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = '../common/lsj-200e_coco-detection.py'\n\nimage_size = (1024, 1024)\nbatch_augments = [dict(type='BatchFixedSizePad', size=image_size)]\n\n# model settings\nmodel = dict(\n    type='FCOS',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32,\n        batch_augments=batch_augments),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',  # use P5\n        num_outs=5,\n        relu_before_extra_convs=True),\n    bbox_head=dict(\n        type='FCOSHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        norm_on_bbox=True,\n        centerness_on_reg=True,\n        dcn_on_last_conv=False,\n        center_sampling=True,\n        conv_bias=True,\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=1.0),\n        loss_centerness=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n    # testing settings\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\ntrain_dataloader = dict(batch_size=8, num_workers=4)\n# Enable automatic-mixed-precision training with AmpOptimWrapper.\noptim_wrapper = dict(\n    type='AmpOptimWrapper',\n    optimizer=dict(\n        type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),\n    paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),\n    clip_grad=dict(max_norm=35, norm_type=2))\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (8 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64)\n"
  },
  {
    "path": "configs/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py",
    "content": "_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'\n\n# model settings\nmodel = dict(\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=64,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))\n\n# dataset settings\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomChoiceResize',\n        scale=[(1333, 640), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n\n# training schedule for 2x\nmax_epochs = 24\ntrain_cfg = dict(max_epochs=max_epochs)\n\n# learning rate\nparam_scheduler = [\n    dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\n"
  },
  {
    "path": "configs/fcos/metafile.yml",
    "content": "Collections:\n  - Name: FCOS\n    Metadata:\n      Training Data: COCO\n      Training Techniques:\n        - SGD with Momentum\n        - Weight Decay\n      Training Resources: 8x V100 GPUs\n      Architecture:\n        - FPN\n        - Group Normalization\n        - ResNet\n    Paper:\n      URL: https://arxiv.org/abs/1904.01355\n      Title: 'FCOS: Fully Convolutional One-Stage Object Detection'\n    README: configs/fcos/README.md\n    Code:\n      URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fcos.py#L6\n      Version: v2.0.0\n\nModels:\n  - Name: fcos_r50-caffe_fpn_gn-head_1x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.6\n      inference time (ms/im):\n        - value: 44.05\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 36.6\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth\n\n  - Name: fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.7\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 38.7\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth\n\n  - Name: fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.8\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 42.3\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth\n\n  - Name: fcos_r101-caffe_fpn_gn-head-1x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py\n    Metadata:\n      Training Memory (GB): 5.5\n      inference time (ms/im):\n        - value: 57.8\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 39.1\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth\n\n  - Name: fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py\n    Metadata:\n      Training Memory (GB): 2.6\n      inference time (ms/im):\n        - value: 43.67\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 38.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth\n\n  - Name: fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py\n    Metadata:\n      Training Memory (GB): 5.5\n      inference time (ms/im):\n        - value: 57.8\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 40.8\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth\n\n  - Name: fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco\n    In Collection: FCOS\n    Config: configs/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py\n    Metadata:\n      Training Memory (GB): 10.0\n      inference time (ms/im):\n        - value: 103.09\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 42.6\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth\n"
  },
  {
    "path": "configs/gfl/README.md",
    "content": "# GFL\n\n> [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388)\n\n<!-- [ALGORITHM] -->\n\n## Abstract\n\nOne-stage detector basically formulates object detection as dense classification and localization. The classification is usually optimized by Focal Loss and the box location is commonly learned under Dirac delta distribution. A recent trend for one-stage detectors is to introduce an individual prediction branch to estimate the quality of localization, where the predicted quality facilitates the classification to improve detection performance. This paper delves into the representations of the above three fundamental elements: quality estimation, classification and localization. Two problems are discovered in existing practices, including (1) the inconsistent usage of the quality estimation and classification between training and inference and (2) the inflexible Dirac delta distribution for localization when there is ambiguity and uncertainty in complex scenes. To address the problems, we design new representations for these elements. Specifically, we merge the quality estimation into the class prediction vector to form a joint representation of localization quality and classification, and use a vector to represent arbitrary distribution of box locations. The improved representations eliminate the inconsistency risk and accurately depict the flexible distribution in real data, but contain continuous labels, which is beyond the scope of Focal Loss. We then propose Generalized Focal Loss (GFL) that generalizes Focal Loss from its discrete form to the continuous version for successful optimization. On COCO test-dev, GFL achieves 45.0% AP using ResNet-101 backbone, surpassing state-of-the-art SAPD (43.5%) and ATSS (43.6%) with higher or comparable inference speed, under the same backbone and training settings. Notably, our best model can achieve a single-model single-scale AP of 48.2%, at 10 FPS on a single 2080Ti GPU.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/40661020/143887865-44dc384d-ba0d-44e8-b3d7-d5fa837838cf.png\"/>\n</div>\n\n## Results and Models\n\n|     Backbone      |  Style  | Lr schd | Multi-scale Training | Inf time (fps) | box AP |                          Config                          |                                                                                                                                                                                   Download                                                                                                                                                                                   |\n| :---------------: | :-----: | :-----: | :------------------: | :------------: | :----: | :------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|       R-50        | pytorch |   1x    |          No          |      19.5      |  40.2  |            [config](./gfl_r50_fpn_1x_coco.py)            |                                                       [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244.log.json)                                                       |\n|       R-50        | pytorch |   2x    |         Yes          |      19.5      |  42.9  |          [config](./gfl_r50_fpn_ms-2x_coco.py)           |                                       [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802.log.json)                                       |\n|       R-101       | pytorch |   2x    |         Yes          |      14.7      |  44.7  |          [config](./gfl_r101_fpn_ms-2x_coco.py)          |                                     [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126.log.json)                                     |\n|    R-101-dcnv2    | pytorch |   2x    |         Yes          |      12.9      |  47.1  |    [config](./gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py)    |             [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002.log.json)             |\n|    X-101-32x4d    | pytorch |   2x    |         Yes          |      12.1      |  45.9  |       [config](./gfl_x101-32x4d_fpn_ms-2x_coco.py)       |                         [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002.log.json)                         |\n| X-101-32x4d-dcnv2 | pytorch |   2x    |         Yes          |      10.7      |  48.1  | [config](./gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002.log.json) |\n\n\\[1\\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \\\n\\[2\\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \\\n\\[3\\] *`dcnv2` denotes deformable convolutional networks v2.* \\\n\\[4\\] *FPS is tested with a single GeForce RTX 2080Ti GPU, using a batch size of 1.*\n\n## Citation\n\nWe provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388)\n\n```latex\n@article{li2020generalized,\n  title={Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection},\n  author={Li, Xiang and Wang, Wenhai and Wu, Lijun and Chen, Shuo and Hu, Xiaolin and Li, Jun and Tang, Jinhui and Yang, Jian},\n  journal={arXiv preprint arXiv:2006.04388},\n  year={2020}\n}\n```\n"
  },
  {
    "path": "configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py",
    "content": "_base_ = './gfl_r50_fpn_ms-2x_coco.py'\nmodel = dict(\n    backbone=dict(\n        type='ResNet',\n        depth=101,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),\n        stage_with_dcn=(False, True, True, True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/gfl/gfl_r101_fpn_ms-2x_coco.py",
    "content": "_base_ = './gfl_r50_fpn_ms-2x_coco.py'\nmodel = dict(\n    backbone=dict(\n        type='ResNet',\n        depth=101,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/gfl/gfl_r50_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\nmodel = dict(\n    type='GFL',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='GFLHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        loss_cls=dict(\n            type='QualityFocalLoss',\n            use_sigmoid=True,\n            beta=2.0,\n            loss_weight=1.0),\n        loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n        reg_max=16,\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),\n    # training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/gfl/gfl_r50_fpn_ms-2x_coco.py",
    "content": "_base_ = './gfl_r50_fpn_1x_coco.py'\nmax_epochs = 24\n\n# learning policy\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\ntrain_cfg = dict(max_epochs=max_epochs)\n\n# multi-scale training\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomResize', scale=[(1333, 480), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n"
  },
  {
    "path": "configs/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py",
    "content": "_base_ = './gfl_r50_fpn_ms-2x_coco.py'\nmodel = dict(\n    type='GFL',\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=32,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),\n        stage_with_dcn=(False, False, True, True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))\n"
  },
  {
    "path": "configs/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py",
    "content": "_base_ = './gfl_r50_fpn_ms-2x_coco.py'\nmodel = dict(\n    type='GFL',\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=32,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))\n"
  },
  {
    "path": "configs/gfl/metafile.yml",
    "content": "Collections:\n  - Name: Generalized Focal Loss\n    Metadata:\n      Training Data: COCO\n      Training Techniques:\n        - SGD with Momentum\n        - Weight Decay\n      Training Resources: 8x V100 GPUs\n      Architecture:\n        - Generalized Focal Loss\n        - FPN\n        - ResNet\n    Paper:\n      URL: https://arxiv.org/abs/2006.04388\n      Title: 'Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection'\n    README: configs/gfl/README.md\n    Code:\n      URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/gfl.py#L6\n      Version: v2.2.0\n\nModels:\n  - Name: gfl_r50_fpn_1x_coco\n    In Collection: Generalized Focal Loss\n    Config: configs/gfl/gfl_r50_fpn_1x_coco.py\n    Metadata:\n      inference time (ms/im):\n        - value: 51.28\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 40.2\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth\n\n  - Name: gfl_r50_fpn_ms-2x_coco\n    In Collection: Generalized Focal Loss\n    Config: configs/gfl/gfl_r50_fpn_ms-2x_coco.py\n    Metadata:\n      inference time (ms/im):\n        - value: 51.28\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 42.9\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth\n\n  - Name: gfl_r101_fpn_ms-2x_coco\n    In Collection: Generalized Focal Loss\n    Config: configs/gfl/gfl_r101_fpn_ms-2x_coco.py\n    Metadata:\n      inference time (ms/im):\n        - value: 68.03\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 44.7\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth\n\n  - Name: gfl_r101-dconv-c3-c5_fpn_ms-2x_coco\n    In Collection: Generalized Focal Loss\n    Config: configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py\n    Metadata:\n      inference time (ms/im):\n        - value: 77.52\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 47.1\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth\n\n  - Name: gfl_x101-32x4d_fpn_ms-2x_coco\n    In Collection: Generalized Focal Loss\n    Config: configs/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py\n    Metadata:\n      inference time (ms/im):\n        - value: 82.64\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 45.9\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth\n\n  - Name: gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco\n    In Collection: Generalized Focal Loss\n    Config: configs/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py\n    Metadata:\n      inference time (ms/im):\n        - value: 93.46\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 48.1\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth\n"
  },
  {
    "path": "configs/ld/README.md",
    "content": "# LD\n\n> [Localization Distillation for Dense Object Detection](https://arxiv.org/abs/2102.12252)\n\n<!-- [ALGORITHM] -->\n\n## Abstract\n\nKnowledge distillation (KD) has witnessed its powerful capability in learning compact models in object detection. Previous KD methods for object detection mostly focus on imitating deep features within the imitation regions instead of mimicking classification logits due to its inefficiency in distilling localization information. In this paper, by reformulating the knowledge distillation process on localization, we present a novel localization distillation (LD) method which can efficiently transfer the localization knowledge from the teacher to the student. Moreover, we also heuristically introduce the concept of valuable localization region that can aid to selectively distill the semantic and localization knowledge for a certain region. Combining these two new components, for the first time, we show that logit mimicking can outperform feature imitation and localization knowledge distillation is more important and efficient than semantic knowledge for distilling object detectors. Our distillation scheme is simple as well as effective and can be easily applied to different dense object detectors. Experiments show that our LD can boost the AP score of GFocal-ResNet-50 with a single-scale 1× training schedule from 40.1 to 42.1 on the COCO benchmark without any sacrifice on the inference speed.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/40661020/143966265-48a03668-8585-4525-8a86-afa2209d1602.png\"/>\n</div>\n\n## Results and Models\n\n### GFocalV1 with LD\n\n|  Teacher  | Student | Training schedule | Mini-batch size | AP (val) |                      Config                       |                                                                                                                                                        Download                                                                                                                                                        |\n| :-------: | :-----: | :---------------: | :-------------: | :------: | :-----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|    --     |  R-18   |        1x         |        6        |   35.8   |                                                   |                                                                                                                                                                                                                                                                                                                        |\n|   R-101   |  R-18   |        1x         |        6        |   36.5   |   [config](./ld_r18-gflv1-r101_fpn_1x_coco.py)    |         [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206.log.json)         |\n|    --     |  R-34   |        1x         |        6        |   38.9   |                                                   |                                                                                                                                                                                                                                                                                                                        |\n|   R-101   |  R-34   |        1x         |        6        |   39.9   |   [config](./ld_r34-gflv1-r101_fpn_1x_coco.py)    |         [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007.log.json)         |\n|    --     |  R-50   |        1x         |        6        |   40.1   |                                                   |                                                                                                                                                                                                                                                                                                                        |\n|   R-101   |  R-50   |        1x         |        6        |   41.0   |   [config](./ld_r50-gflv1-r101_fpn_1x_coco.py)    |         [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355.log.json)         |\n|    --     |  R-101  |        2x         |        6        |   44.6   |                                                   |                                                                                                                                                                                                                                                                                                                        |\n| R-101-DCN |  R-101  |        2x         |        6        |   45.5   | [config](./ld_r101-gflv1-r101-dcn_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920.log.json) |\n\n## Note\n\n- Meaning of Config name: ld_r18(student model)\\_gflv1(based on gflv1)\\_r101(teacher model)\\_fpn(neck)\\_coco(dataset)\\_1x(12 epoch).py\n\n## Citation\n\n```latex\n@Inproceedings{zheng2022LD,\n  title={Localization Distillation for Dense Object Detection},\n  author= {Zheng, Zhaohui and Ye, Rongguang and Wang, Ping and Ren, Dongwei and Zuo, Wangmeng and Hou, Qibin and Cheng, Mingming},\n  booktitle={CVPR},\n  year={2022}\n}\n```\n"
  },
  {
    "path": "configs/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py",
    "content": "_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth'  # noqa\nmodel = dict(\n    teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=101,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5))\n\nmax_epochs = 24\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\ntrain_cfg = dict(max_epochs=max_epochs)\n\n# multi-scale training\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomResize', scale=[(1333, 480), (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n"
  },
  {
    "path": "configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\nteacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth'  # noqa\nmodel = dict(\n    type='KnowledgeDistillationSingleStageDetector',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    teacher_config='configs/gfl/gfl_r101_fpn_ms-2x_coco.py',\n    teacher_ckpt=teacher_ckpt,\n    backbone=dict(\n        type='ResNet',\n        depth=18,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(\n        type='FPN',\n        in_channels=[64, 128, 256, 512],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5),\n    bbox_head=dict(\n        type='LDHead',\n        num_classes=80,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        loss_cls=dict(\n            type='QualityFocalLoss',\n            use_sigmoid=True,\n            beta=2.0,\n            loss_weight=1.0),\n        loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n        loss_ld=dict(\n            type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10),\n        reg_max=16,\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),\n    # training and testing settings\n    train_cfg=dict(\n        assigner=dict(type='ATSSAssigner', topk=9),\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(type='nms', iou_threshold=0.6),\n        max_per_img=100))\n\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/ld/ld_r34-gflv1-r101_fpn_1x_coco.py",
    "content": "_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']\nmodel = dict(\n    backbone=dict(\n        type='ResNet',\n        depth=34,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')),\n    neck=dict(\n        type='FPN',\n        in_channels=[64, 128, 256, 512],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5))\n"
  },
  {
    "path": "configs/ld/ld_r50-gflv1-r101_fpn_1x_coco.py",
    "content": "_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']\nmodel = dict(\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        start_level=1,\n        add_extra_convs='on_output',\n        num_outs=5))\n"
  },
  {
    "path": "configs/ld/metafile.yml",
    "content": "Collections:\n  - Name: Localization Distillation\n    Metadata:\n      Training Data: COCO\n      Training Techniques:\n        - Localization Distillation\n        - SGD with Momentum\n        - Weight Decay\n      Training Resources: 8x V100 GPUs\n      Architecture:\n        - FPN\n        - ResNet\n    Paper:\n      URL: https://arxiv.org/abs/2102.12252\n      Title: 'Localization Distillation for Dense Object Detection'\n    README: configs/ld/README.md\n    Code:\n      URL: https://github.com/open-mmlab/mmdetection/blob/v2.11.0/mmdet/models/dense_heads/ld_head.py#L11\n      Version: v2.11.0\n\nModels:\n  - Name: ld_r18-gflv1-r101_fpn_1x_coco\n    In Collection: Localization Distillation\n    Config: configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 1.8\n      Epochs: 12\n    Results:\n    - Task: Object Detection\n      Dataset: COCO\n      Metrics:\n        box AP: 36.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth\n  - Name: ld_r34-gflv1-r101_fpn_1x_coco\n    In Collection: Localization Distillation\n    Config: configs/ld/ld_r34-gflv1-r101_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 2.2\n      Epochs: 12\n    Results:\n    - Task: Object Detection\n      Dataset: COCO\n      Metrics:\n        box AP: 39.9\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth\n  - Name: ld_r50-gflv1-r101_fpn_1x_coco\n    In Collection: Localization Distillation\n    Config: configs/ld/ld_r50-gflv1-r101_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.6\n      Epochs: 12\n    Results:\n    - Task: Object Detection\n      Dataset: COCO\n      Metrics:\n        box AP: 41.0\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth\n  - Name: ld_r101-gflv1-r101-dcn_fpn_2x_coco\n    In Collection: Localization Distillation\n    Config: configs/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py\n    Metadata:\n      Training Memory (GB): 5.5\n      Epochs: 24\n    Results:\n    - Task: Object Detection\n      Dataset: COCO\n      Metrics:\n        box AP: 45.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth\n"
  },
  {
    "path": "configs/retinanet/README.md",
    "content": "# RetinaNet\n\n> [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002)\n\n<!-- [ALGORITHM] -->\n\n## Abstract\n\nThe highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/40661020/143973551-2b8e766a-1677-4f6d-953d-2e6d2a3c67b5.png\" height=\"300\"/>\n</div>\n\n## Results and Models\n\n|    Backbone     |  Style  |   Lr schd    | Mem (GB) | Inf time (fps) | box AP |                     Config                      |                                                                                                                                                         Download                                                                                                                                                          |\n| :-------------: | :-----: | :----------: | :------: | :------------: | :----: | :---------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|    R-18-FPN     | pytorch |      1x      |   1.7    |                |  31.7  |    [config](./retinanet_r18_fpn_1x_coco.py)     |           [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055.log.json)            |\n|    R-18-FPN     | pytorch | 1x(1 x 8 BS) |   5.0    |                |  31.7  |  [config](./retinanet_r18_fpn_1xb8-1x_coco.py)  |   [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255.log.json)    |\n|    R-50-FPN     |  caffe  |      1x      |   3.5    |      18.6      |  36.3  | [config](./retinanet_r50-caffe_fpn_1x_coco.py)  |   [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531_012518.log.json)   |\n|    R-50-FPN     | pytorch |      1x      |   3.8    |      19.0      |  36.5  |    [config](./retinanet_r50_fpn_1x_coco.py)     |               [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json)               |\n| R-50-FPN (FP16) | pytorch |      1x      |   2.8    |      31.6      |  36.4  |  [config](./retinanet_r50_fpn_amp-1x_coco.py)   |          [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json)          |\n|    R-50-FPN     | pytorch |      2x      |    -     |       -        |  37.4  |    [config](./retinanet_r50_fpn_2x_coco.py)     |               [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131_114738.log.json)               |\n|    R-101-FPN    |  caffe  |      1x      |   5.5    |      14.7      |  38.5  | [config](./retinanet_r101-caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531_012536.log.json) |\n|    R-101-FPN    | pytorch |      1x      |   5.7    |      15.0      |  38.5  |    [config](./retinanet_r101_fpn_1x_coco.py)    |             [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130_003055.log.json)             |\n|    R-101-FPN    | pytorch |      2x      |    -     |       -        |  38.9  |    [config](./retinanet_r101_fpn_2x_coco.py)    |             [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131_114859.log.json)             |\n| X-101-32x4d-FPN | pytorch |      1x      |   7.0    |      12.1      |  39.9  | [config](./retinanet_x101-32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130_003004.log.json) |\n| X-101-32x4d-FPN | pytorch |      2x      |    -     |       -        |  40.1  | [config](./retinanet_x101-32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131_114812.log.json) |\n| X-101-64x4d-FPN | pytorch |      1x      |   10.0   |      8.7       |  41.0  | [config](./retinanet_x101-64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130_003008.log.json) |\n| X-101-64x4d-FPN | pytorch |      2x      |    -     |       -        |  40.8  | [config](./retinanet_x101-64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131_114833.log.json) |\n\n## Pre-trained Models\n\nWe also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks.\n\n|    Backbone     |  Style  | Lr schd | Mem (GB) | box AP |                           Config                           |                                                                                                                                                                                 Download                                                                                                                                                                                  |\n| :-------------: | :-----: | :-----: | :------: | :----: | :--------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|    R-50-FPN     | pytorch |   3x    |   3.5    |  39.5  |    [config](./retinanet_r50_fpn_ms-640-800-3x_coco.py)     |               [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.log.json)               |\n|    R-101-FPN    |  caffe  |   3x    |   5.4    |  40.7  |     [config](./retinanet_r101-caffe_fpn_ms-3x_coco.py)     | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.log.json) |\n|    R-101-FPN    | pytorch |   3x    |   5.4    |   41   |    [config](./retinanet_r101_fpn_ms-640-800-3x_coco.py)    |             [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.log.json)             |\n| X-101-64x4d-FPN | pytorch |   3x    |   9.8    |  41.6  | [config](./retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth) \\| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.log.json) |\n\n## Citation\n\n```latex\n@inproceedings{lin2017focal,\n  title={Focal loss for dense object detection},\n  author={Lin, Tsung-Yi and Goyal, Priya and Girshick, Ross and He, Kaiming and Doll{\\'a}r, Piotr},\n  booktitle={Proceedings of the IEEE international conference on computer vision},\n  year={2017}\n}\n```\n"
  },
  {
    "path": "configs/retinanet/metafile.yml",
    "content": "Collections:\n  - Name: RetinaNet\n    Metadata:\n      Training Data: COCO\n      Training Techniques:\n        - SGD with Momentum\n        - Weight Decay\n      Training Resources: 8x V100 GPUs\n      Architecture:\n        - Focal Loss\n        - FPN\n        - ResNet\n    Paper:\n      URL: https://arxiv.org/abs/1708.02002\n      Title: \"Focal Loss for Dense Object Detection\"\n    README: configs/retinanet/README.md\n    Code:\n      URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/retinanet.py#L6\n      Version: v2.0.0\n\nModels:\n  - Name: retinanet_r18_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r18_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 1.7\n      Training Resources: 8x V100 GPUs\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 31.7\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth\n\n  - Name: retinanet_r18_fpn_1xb8-1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py\n    Metadata:\n      Training Memory (GB): 5.0\n      Training Resources:  1x V100 GPUs\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 31.7\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth\n\n  - Name: retinanet_r50-caffe_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.5\n      inference time (ms/im):\n        - value: 53.76\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 36.3\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth\n\n  - Name: retinanet_r50_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r50_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 3.8\n      inference time (ms/im):\n        - value: 52.63\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 36.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth\n\n  - Name: retinanet_r50_fpn_amp-1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r50_fpn_amp-1x_coco.py\n    Metadata:\n      Training Memory (GB): 2.8\n      Training Techniques:\n        - SGD with Momentum\n        - Weight Decay\n        - Mixed Precision Training\n      inference time (ms/im):\n        - value: 31.65\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP16\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 36.4\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth\n\n  - Name: retinanet_r50_fpn_2x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r50_fpn_2x_coco.py\n    Metadata:\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 37.4\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth\n\n  - Name: retinanet_r50_fpn_ms-640-800-3x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py\n    Metadata:\n      Epochs: 36\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 39.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth\n\n  - Name: retinanet_r101-caffe_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r101-caffe_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 5.5\n      inference time (ms/im):\n        - value: 68.03\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 38.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth\n\n  - Name: retinanet_r101-caffe_fpn_ms-3x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py\n    Metadata:\n      Epochs: 36\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 40.7\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth\n\n  - Name: retinanet_r101_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r101_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 5.7\n      inference time (ms/im):\n        - value: 66.67\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 38.5\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth\n\n  - Name: retinanet_r101_fpn_2x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r101_fpn_2x_coco.py\n    Metadata:\n      Training Memory (GB): 5.7\n      inference time (ms/im):\n        - value: 66.67\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 38.9\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth\n\n  - Name: retinanet_r101_fpn_ms-640-800-3x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py\n    Metadata:\n      Epochs: 36\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 41\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth\n\n  - Name: retinanet_x101-32x4d_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 7.0\n      inference time (ms/im):\n        - value: 82.64\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 39.9\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth\n\n  - Name: retinanet_x101-32x4d_fpn_2x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py\n    Metadata:\n      Training Memory (GB): 7.0\n      inference time (ms/im):\n        - value: 82.64\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 40.1\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth\n\n  - Name: retinanet_x101-64x4d_fpn_1x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py\n    Metadata:\n      Training Memory (GB): 10.0\n      inference time (ms/im):\n        - value: 114.94\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 12\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 41.0\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth\n\n  - Name: retinanet_x101-64x4d_fpn_2x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py\n    Metadata:\n      Training Memory (GB): 10.0\n      inference time (ms/im):\n        - value: 114.94\n          hardware: V100\n          backend: PyTorch\n          batch size: 1\n          mode: FP32\n          resolution: (800, 1333)\n      Epochs: 24\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 40.8\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth\n\n  - Name: retinanet_x101-64x4d_fpn_ms-640-800-3x_coco\n    In Collection: RetinaNet\n    Config: configs/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py\n    Metadata:\n      Epochs: 36\n    Results:\n      - Task: Object Detection\n        Dataset: COCO\n        Metrics:\n          box AP: 41.6\n    Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth\n"
  },
  {
    "path": "configs/retinanet/retinanet_r101-caffe_fpn_1x_coco.py",
    "content": "_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet101_caffe')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py",
    "content": "_base_ = './retinanet_r50-caffe_fpn_ms-3x_coco.py'\n# learning policy\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet101_caffe')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r101_fpn_1x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r101_fpn_2x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_2x_coco.py'\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r101_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py",
    "content": "_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']\n# optimizer\nmodel = dict(\n    backbone=dict(\n        depth=101,\n        init_cfg=dict(type='Pretrained',\n                      checkpoint='torchvision://resnet101')))\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r18_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/models/retinanet_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\n# model\nmodel = dict(\n    backbone=dict(\n        depth=18,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(in_channels=[64, 128, 256, 512]))\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n\n# TODO: support auto scaling lr\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (2 samples per GPU)\n# auto_scale_lr = dict(base_batch_size=16)\n"
  },
  {
    "path": "configs/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py",
    "content": "_base_ = [\n    '../_base_/models/retinanet_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\n# data\ntrain_dataloader = dict(batch_size=8)\n\n# model\nmodel = dict(\n    backbone=dict(\n        depth=18,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(in_channels=[64, 128, 256, 512]))\n\n# Note: If the learning rate is set to 0.0025, the mAP will be 32.4.\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001))\n# TODO: support auto scaling lr\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (1 GPUs) x (8 samples per GPU)\n# auto_scale_lr = dict(base_batch_size=8)\n"
  },
  {
    "path": "configs/retinanet/retinanet_r18_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        depth=18,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),\n    neck=dict(in_channels=[64, 128, 256, 512]))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_1x_coco.py'\nmodel = dict(\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        # use caffe img_norm\n        mean=[103.530, 116.280, 123.675],\n        std=[1.0, 1.0, 1.0],\n        bgr_to_rgb=False,\n        pad_size_divisor=32),\n    backbone=dict(\n        norm_cfg=dict(requires_grad=False),\n        norm_eval=True,\n        style='caffe',\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint='open-mmlab://detectron2/resnet50_caffe')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50-caffe_fpn_ms-1x_coco.py",
    "content": "_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomResize',\n        scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),\n               (1333, 800)],\n        keep_ratio=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\n\ntrain_dataloader = dict(dataset=dict(pipeline=train_pipeline))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50-caffe_fpn_ms-2x_coco.py",
    "content": "_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py'\n# training schedule for 2x\ntrain_cfg = dict(max_epochs=24)\n\n# learning rate policy\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=24,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50-caffe_fpn_ms-3x_coco.py",
    "content": "_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py'\n\n# training schedule for 2x\ntrain_cfg = dict(max_epochs=36)\n\n# learning rate policy\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=36,\n        by_epoch=True,\n        milestones=[28, 34],\n        gamma=0.1)\n]\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50_fpn_1x_coco.py",
    "content": "_base_ = [\n    '../_base_/models/retinanet_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',\n    './retinanet_tta.py'\n]\n\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50_fpn_2x_coco.py",
    "content": "_base_ = [\n    '../_base_/models/retinanet_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'\n]\n\n# training schedule for 2x\ntrain_cfg = dict(max_epochs=24)\n\n# learning rate policy\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=24,\n        by_epoch=True,\n        milestones=[16, 22],\n        gamma=0.1)\n]\n\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py",
    "content": "_base_ = [\n    '../_base_/models/retinanet_r50_fpn.py',\n    '../common/lsj-200e_coco-detection.py'\n]\n\nimage_size = (1024, 1024)\nbatch_augments = [dict(type='BatchFixedSizePad', size=image_size)]\n\nmodel = dict(data_preprocessor=dict(batch_augments=batch_augments))\n\ntrain_dataloader = dict(batch_size=8, num_workers=4)\n# Enable automatic-mixed-precision training with AmpOptimWrapper.\noptim_wrapper = dict(\n    type='AmpOptimWrapper',\n    optimizer=dict(\n        type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004))\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (8 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64)\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50_fpn_90k_coco.py",
    "content": "_base_ = 'retinanet_r50_fpn_1x_coco.py'\n\n# training schedule for 90k\ntrain_cfg = dict(\n    _delete_=True,\n    type='IterBasedTrainLoop',\n    max_iters=90000,\n    val_interval=10000)\n# learning rate policy\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=90000,\n        by_epoch=False,\n        milestones=[60000, 80000],\n        gamma=0.1)\n]\ntrain_dataloader = dict(sampler=dict(type='InfiniteSampler'))\ndefault_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))\n\nlog_processor = dict(by_epoch=False)\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50_fpn_amp-1x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_1x_coco.py'\n# fp16 settings\nfp16 = dict(loss_scale=512.)\n"
  },
  {
    "path": "configs/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py",
    "content": "_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']\n# optimizer\noptim_wrapper = dict(\n    optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))\n"
  },
  {
    "path": "configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        _delete_=True,\n        type='SwinTransformer',\n        embed_dims=96,\n        depths=[2, 2, 6, 2],\n        num_heads=[3, 6, 12, 24],\n        window_size=7,\n        mlp_ratio=4,\n        qkv_bias=True,\n        qk_scale=None,\n        drop_rate=0.0,\n        attn_drop_rate=0.0,\n        drop_path_rate=0.2,\n        patch_norm=True,\n        out_indices=(1, 2, 3),\n        with_cp=False,\n        convert_weights=True,\n        init_cfg=dict(\n            type='Pretrained',\n            checkpoint=\n            'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth'\n        )),\n    neck=dict(\n        type='FPN',\n        in_channels=[192, 384, 768],\n        out_channels=256,\n        start_level=0,\n        add_extra_convs='on_input',\n        num_outs=5)\n    )"
  },
  {
    "path": "configs/retinanet/retinanet_tta.py",
    "content": "tta_model = dict(\n    type='DetTTAModel',\n    tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))\n\nimg_scales = [(1333, 800), (666, 400), (2000, 1200)]\ntta_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),\n    dict(\n        type='TestTimeAug',\n        transforms=[[\n            dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales\n        ], [\n            dict(type='RandomFlip', prob=1.),\n            dict(type='RandomFlip', prob=0.)\n        ], [dict(type='LoadAnnotations', with_bbox=True)],\n                    [\n                        dict(\n                            type='PackDetInputs',\n                            meta_keys=('img_id', 'img_path', 'ori_shape',\n                                       'img_shape', 'scale_factor', 'flip',\n                                       'flip_direction'))\n                    ]])\n]\n"
  },
  {
    "path": "configs/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=32,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_2x_coco.py'\nmodel = dict(\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=32,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=64,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py",
    "content": "_base_ = './retinanet_r50_fpn_2x_coco.py'\nmodel = dict(\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=64,\n        base_width=4,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        style='pytorch',\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))\n"
  },
  {
    "path": "configs/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py",
    "content": "_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']\n# optimizer\nmodel = dict(\n    backbone=dict(\n        type='ResNeXt',\n        depth=101,\n        groups=64,\n        base_width=4,\n        init_cfg=dict(\n            type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))\noptim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))\n"
  },
  {
    "path": "demo/create_result_gif.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport mmcv\nimport numpy as np\nfrom mmengine.utils import scandir\n\ntry:\n    import imageio\nexcept ImportError:\n    imageio = None\n\n\n# TODO verify after refactoring analyze_results.py\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Create GIF for demo')\n    parser.add_argument(\n        'image_dir',\n        help='directory where result '\n        'images save path generated by ‘analyze_results.py’')\n    parser.add_argument(\n        '--out',\n        type=str,\n        default='result.gif',\n        help='gif path where will be saved')\n    args = parser.parse_args()\n    return args\n\n\ndef _generate_batch_data(sampler, batch_size):\n    batch = []\n    for idx in sampler:\n        batch.append(idx)\n        if len(batch) == batch_size:\n            yield batch\n            batch = []\n    if len(batch) > 0:\n        yield batch\n\n\ndef create_gif(frames, gif_name, duration=2):\n    \"\"\"Create gif through imageio.\n\n    Args:\n        frames (list[ndarray]): Image frames\n        gif_name (str): Saved gif name\n        duration (int): Display interval (s),\n            Default: 2\n    \"\"\"\n    if imageio is None:\n        raise RuntimeError('imageio is not installed,'\n                           'Please use “pip install imageio” to install')\n    imageio.mimsave(gif_name, frames, 'GIF', duration=duration)\n\n\ndef create_frame_by_matplotlib(image_dir,\n                               nrows=1,\n                               fig_size=(300, 300),\n                               font_size=15):\n    \"\"\"Create gif frame image through matplotlib.\n\n    Args:\n        image_dir (str): Root directory of result images\n        nrows (int): Number of rows displayed, Default: 1\n        fig_size (tuple): Figure size of the pyplot figure.\n           Default: (300, 300)\n        font_size (int): Font size of texts. Default: 15\n\n    Returns:\n        list[ndarray]: image frames\n    \"\"\"\n\n    result_dir_names = os.listdir(image_dir)\n    assert len(result_dir_names) == 2\n    # Longer length has higher priority\n    result_dir_names.reverse()\n\n    images_list = []\n    for dir_names in result_dir_names:\n        images_list.append(scandir(osp.join(image_dir, dir_names)))\n\n    frames = []\n    for paths in _generate_batch_data(zip(*images_list), nrows):\n\n        fig, axes = plt.subplots(nrows=nrows, ncols=2)\n        fig.suptitle('Good/bad case selected according '\n                     'to the COCO mAP of the single image')\n\n        det_patch = mpatches.Patch(color='salmon', label='prediction')\n        gt_patch = mpatches.Patch(color='royalblue', label='ground truth')\n        # bbox_to_anchor may need to be finetuned\n        plt.legend(\n            handles=[det_patch, gt_patch],\n            bbox_to_anchor=(1, -0.18),\n            loc='lower right',\n            borderaxespad=0.)\n\n        if nrows == 1:\n            axes = [axes]\n\n        dpi = fig.get_dpi()\n        # set fig size and margin\n        fig.set_size_inches(\n            (fig_size[0] * 2 + fig_size[0] // 20) / dpi,\n            (fig_size[1] * nrows + fig_size[1] // 3) / dpi,\n        )\n\n        fig.tight_layout()\n        # set subplot margin\n        plt.subplots_adjust(\n            hspace=.05,\n            wspace=0.05,\n            left=0.02,\n            right=0.98,\n            bottom=0.02,\n            top=0.98)\n\n        for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):\n            image_path_left = osp.join(\n                osp.join(image_dir, result_dir_names[0], path_tuple[0]))\n            image_path_right = osp.join(\n                osp.join(image_dir, result_dir_names[1], path_tuple[1]))\n            image_left = mmcv.imread(image_path_left)\n            image_left = mmcv.rgb2bgr(image_left)\n            image_right = mmcv.imread(image_path_right)\n            image_right = mmcv.rgb2bgr(image_right)\n\n            if i == 0:\n                ax_tuple[0].set_title(\n                    result_dir_names[0], fontdict={'size': font_size})\n                ax_tuple[1].set_title(\n                    result_dir_names[1], fontdict={'size': font_size})\n            ax_tuple[0].imshow(\n                image_left, extent=(0, *fig_size, 0), interpolation='bilinear')\n            ax_tuple[0].axis('off')\n            ax_tuple[1].imshow(\n                image_right,\n                extent=(0, *fig_size, 0),\n                interpolation='bilinear')\n            ax_tuple[1].axis('off')\n\n        canvas = fig.canvas\n        s, (width, height) = canvas.print_to_buffer()\n        buffer = np.frombuffer(s, dtype='uint8')\n        img_rgba = buffer.reshape(height, width, 4)\n        rgb, alpha = np.split(img_rgba, [3], axis=2)\n        img = rgb.astype('uint8')\n\n        frames.append(img)\n\n    return frames\n\n\ndef main():\n    args = parse_args()\n    frames = create_frame_by_matplotlib(args.image_dir)\n    create_gif(frames, args.out)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "demo/image_demo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Image Demo.\n\nThis script adopts a new infenence class, currently supports image path,\nnp.array and folder input formats, and will support video and webcam\nin the future.\n\nExample:\n    Save visualizations and predictions results::\n\n        python demo/image_demo.py demo/demo.jpg rtmdet-s\n\n        python demo/image_demo.py demo/demo.jpg \\\n        configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \\\n        --weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth\n\n    Visualize prediction results::\n\n        python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show\n\n        python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \\\n        --show\n\"\"\"\n\nfrom argparse import ArgumentParser\n\nfrom mmengine.logging import print_log\n\nfrom mmdet.apis import DetInferencer\n\n\ndef parse_args():\n    parser = ArgumentParser()\n    parser.add_argument(\n        'inputs', type=str, help='Input image file or folder path.')\n    parser.add_argument(\n        'model',\n        type=str,\n        help='Config or checkpoint .pth file or the model name '\n        'and alias defined in metafile. The model configuration '\n        'file will try to read from .pth if the parameter is '\n        'a .pth weights file.')\n    parser.add_argument('--weights', default=None, help='Checkpoint file')\n    parser.add_argument(\n        '--out-dir',\n        type=str,\n        default='outputs',\n        help='Output directory of images or prediction results.')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for inference')\n    parser.add_argument(\n        '--pred-score-thr',\n        type=float,\n        default=0.3,\n        help='bbox score threshold')\n    parser.add_argument(\n        '--batch-size', type=int, default=1, help='Inference batch size.')\n    parser.add_argument(\n        '--show',\n        action='store_true',\n        help='Display the image in a popup window.')\n    parser.add_argument(\n        '--no-save-vis',\n        action='store_true',\n        help='Do not save detection vis results')\n    parser.add_argument(\n        '--no-save-pred',\n        action='store_true',\n        help='Do not save detection json results')\n    parser.add_argument(\n        '--print-result',\n        action='store_true',\n        help='Whether to print the results.')\n    parser.add_argument(\n        '--palette',\n        default='none',\n        choices=['coco', 'voc', 'citys', 'random', 'none'],\n        help='Color palette used for visualization')\n\n    call_args = vars(parser.parse_args())\n\n    if call_args['no_save_vis'] and call_args['no_save_pred']:\n        call_args['out_dir'] = ''\n\n    if call_args['model'].endswith('.pth'):\n        print_log('The model is a weight file, automatically '\n                  'assign the model to --weights')\n        call_args['weights'] = call_args['model']\n        call_args['model'] = None\n\n    init_kws = ['model', 'weights', 'device', 'palette']\n    init_args = {}\n    for init_kw in init_kws:\n        init_args[init_kw] = call_args.pop(init_kw)\n\n    return init_args, call_args\n\n\ndef main():\n    init_args, call_args = parse_args()\n    # TODO: Video and Webcam are currently not supported and\n    #  may consume too much memory if your input folder has a lot of images.\n    #  We will be optimized later.\n    inferencer = DetInferencer(**init_args)\n    inferencer(**call_args)\n\n    if call_args['out_dir'] != '' and not (call_args['no_save_vis']\n                                           and call_args['no_save_pred']):\n        print_log(f'results have been saved at {call_args[\"out_dir\"]}')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "demo/inference_demo.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from mmdet.apis import init_detector, inference_detector\\n\",\n    \"from mmdet.utils import register_all_modules\\n\",\n    \"from mmdet.registry import VISUALIZERS\\n\",\n    \"import mmcv\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"processing rtmdet_tiny_8xb32-300e_coco...\\n\",\n      \"rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth exists in e:\\\\mmdetection\\\\demo\\\\checkpoints\\n\",\n      \"Successfully dumped rtmdet_tiny_8xb32-300e_coco.py to e:\\\\mmdetection\\\\demo\\\\checkpoints\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# download the checkpoint demo\\n\",\n    \"!mim download mmdet --config rtmdet_tiny_8xb32-300e_coco --dest ./checkpoints\\n\",\n    \"config_file = './checkpoints/rtmdet_tiny_8xb32-300e_coco.py'\\n\",\n    \"checkpoint_file = './checkpoints/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth'\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Loads checkpoint by local backend from path: ./checkpoints/rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth\\n\",\n      \"The model and loaded state dict do not match exactly\\n\",\n      \"\\n\",\n      \"unexpected key in source state_dict: data_preprocessor.mean, data_preprocessor.std\\n\",\n      \"\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"#Register all modules in mmdet into the registries\\n\",\n    \"register_all_modules()\\n\",\n    \"# build the model from a config file and a checkpoint file\\n\",\n    \"model = init_detector(config_file, checkpoint_file, device='cuda:0')  # or device='cpu'\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"d:\\\\anaconda3\\\\envs\\\\mmdet\\\\lib\\\\site-packages\\\\torch\\\\functional.py:445: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at  ..\\\\aten\\\\src\\\\ATen\\\\native\\\\TensorShape.cpp:2157.)\\n\",\n      \"  return _VF.meshgrid(tensors, **kwargs)  # type: ignore[attr-defined]\\n\"\n     ]\n    },\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"<DetDataSample(\\n\",\n      \"\\n\",\n      \"    META INFORMATION\\n\",\n      \"    img_id: 0\\n\",\n      \"    img_shape: (640, 640)\\n\",\n      \"    scale_factor: (1.0, 1.0)\\n\",\n      \"    batch_input_shape: (640, 640)\\n\",\n      \"    img_path: None\\n\",\n      \"    ori_shape: (427, 640)\\n\",\n      \"    pad_shape: (640, 640)\\n\",\n      \"\\n\",\n      \"    DATA FIELDS\\n\",\n      \"    gt_instances: <InstanceData(\\n\",\n      \"        \\n\",\n      \"            META INFORMATION\\n\",\n      \"        \\n\",\n      \"            DATA FIELDS\\n\",\n      \"        ) at 0x237adee4760>\\n\",\n      \"    ignored_instances: <InstanceData(\\n\",\n      \"        \\n\",\n      \"            META INFORMATION\\n\",\n      \"        \\n\",\n      \"            DATA FIELDS\\n\",\n      \"        ) at 0x237adee42b0>\\n\",\n      \"    pred_instances: <InstanceData(\\n\",\n      \"        \\n\",\n      \"            META INFORMATION\\n\",\n      \"        \\n\",\n      \"            DATA FIELDS\\n\",\n      \"            labels: tensor([13,  2,  2,  2,  2,  2,  2,  2, 13,  2,  2,  2, 56,  2,  2,  7,  2,  2,\\n\",\n      \"                         2,  2,  2,  2,  2,  2,  2,  2,  2,  7,  7,  2,  2, 16,  2,  2,  7,  2,\\n\",\n      \"                         2,  7, 56,  2,  7,  2,  7,  2,  2,  2,  2,  2,  2,  2,  2,  7,  2,  7,\\n\",\n      \"                         2,  2,  2,  2,  2,  2, 13,  0,  2,  7,  2, 56, 17,  7,  2,  2,  7,  2,\\n\",\n      \"                         2,  7, 13,  2,  7,  2,  7,  2,  2,  7,  2,  7,  2, 13,  2,  7,  2, 13,\\n\",\n      \"                        13,  2, 58,  2,  7,  2,  7,  2,  7,  2,  2,  2, 13,  2,  2,  2,  2,  2,\\n\",\n      \"                         2, 13,  2,  2,  7, 13,  2,  5,  2,  2, 28,  7,  2,  2,  2,  2,  7,  2,\\n\",\n      \"                         7,  7,  7,  2,  2,  2,  2,  2,  7, 16,  2, 13,  7,  2,  2,  1,  2,  2,\\n\",\n      \"                         2,  0, 25,  2,  7,  0,  2, 57,  2,  2,  7,  2,  2,  2,  2, 60,  7,  7,\\n\",\n      \"                        13,  7,  5,  2,  0,  7,  2,  0,  7, 56,  5,  7, 60, 13,  7,  0,  2,  7,\\n\",\n      \"                         2,  2,  2,  0,  2,  7,  7,  2,  0, 13,  2,  2, 13, 18,  2,  7, 18,  2,\\n\",\n      \"                         7, 13, 13,  2,  7,  0,  7, 13,  7,  2,  7,  0,  2, 13,  0,  7,  0,  2,\\n\",\n      \"                         7, 13,  7,  2,  2,  7,  5,  7, 13,  2, 13, 17, 13, 13, 13,  2,  2, 56,\\n\",\n      \"                         3,  0,  2,  2,  3, 20, 13,  2,  0, 10,  0, 18,  2,  2,  5, 59, 19,  2,\\n\",\n      \"                        13,  5, 13,  0,  2,  7, 13,  2,  0,  0,  5, 13, 13,  0,  5, 13,  7,  7,\\n\",\n      \"                        13,  7, 13,  7,  7,  2,  0,  7,  2,  2,  7,  3,  7,  7,  2, 13,  0,  0,\\n\",\n      \"                         2,  0,  2,  0,  0,  8,  7,  7,  2, 13,  0,  7], device='cuda:0')\\n\",\n      \"            scores: tensor([0.8862, 0.7469, 0.6893, 0.6781, 0.5375, 0.5372, 0.5285, 0.4982, 0.4508,\\n\",\n      \"                        0.4340, 0.4265, 0.4236, 0.4083, 0.3778, 0.3446, 0.3421, 0.3408, 0.3275,\\n\",\n      \"                        0.3193, 0.3192, 0.3101, 0.3064, 0.3008, 0.2683, 0.2595, 0.2438, 0.2287,\\n\",\n      \"                        0.2255, 0.2147, 0.2077, 0.2069, 0.2054, 0.2047, 0.1974, 0.1953, 0.1940,\\n\",\n      \"                        0.1933, 0.1909, 0.1877, 0.1811, 0.1797, 0.1751, 0.1722, 0.1718, 0.1697,\\n\",\n      \"                        0.1691, 0.1683, 0.1675, 0.1675, 0.1671, 0.1652, 0.1645, 0.1608, 0.1562,\\n\",\n      \"                        0.1555, 0.1520, 0.1504, 0.1457, 0.1440, 0.1413, 0.1402, 0.1390, 0.1386,\\n\",\n      \"                        0.1383, 0.1378, 0.1373, 0.1367, 0.1364, 0.1359, 0.1358, 0.1358, 0.1357,\\n\",\n      \"                        0.1354, 0.1354, 0.1302, 0.1295, 0.1259, 0.1249, 0.1248, 0.1223, 0.1213,\\n\",\n      \"                        0.1211, 0.1196, 0.1192, 0.1190, 0.1190, 0.1172, 0.1155, 0.1134, 0.1128,\\n\",\n      \"                        0.1127, 0.1120, 0.1110, 0.1110, 0.1109, 0.1089, 0.1072, 0.1068, 0.1066,\\n\",\n      \"                        0.1064, 0.1064, 0.1061, 0.1059, 0.1056, 0.1056, 0.1049, 0.1047, 0.1043,\\n\",\n      \"                        0.1035, 0.1024, 0.1019, 0.1013, 0.1006, 0.1005, 0.0997, 0.0992, 0.0976,\\n\",\n      \"                        0.0976, 0.0971, 0.0967, 0.0964, 0.0951, 0.0948, 0.0930, 0.0924, 0.0923,\\n\",\n      \"                        0.0921, 0.0920, 0.0919, 0.0918, 0.0917, 0.0915, 0.0912, 0.0911, 0.0907,\\n\",\n      \"                        0.0907, 0.0906, 0.0905, 0.0899, 0.0875, 0.0868, 0.0867, 0.0865, 0.0862,\\n\",\n      \"                        0.0854, 0.0851, 0.0851, 0.0850, 0.0845, 0.0828, 0.0821, 0.0819, 0.0819,\\n\",\n      \"                        0.0817, 0.0817, 0.0816, 0.0815, 0.0815, 0.0808, 0.0807, 0.0805, 0.0804,\\n\",\n      \"                        0.0794, 0.0791, 0.0788, 0.0786, 0.0785, 0.0782, 0.0773, 0.0763, 0.0757,\\n\",\n      \"                        0.0750, 0.0744, 0.0737, 0.0726, 0.0724, 0.0717, 0.0717, 0.0715, 0.0710,\\n\",\n      \"                        0.0709, 0.0706, 0.0704, 0.0701, 0.0701, 0.0700, 0.0695, 0.0693, 0.0693,\\n\",\n      \"                        0.0693, 0.0687, 0.0687, 0.0682, 0.0682, 0.0681, 0.0680, 0.0680, 0.0672,\\n\",\n      \"                        0.0671, 0.0670, 0.0667, 0.0666, 0.0666, 0.0665, 0.0664, 0.0663, 0.0663,\\n\",\n      \"                        0.0659, 0.0656, 0.0642, 0.0641, 0.0639, 0.0639, 0.0638, 0.0637, 0.0635,\\n\",\n      \"                        0.0634, 0.0632, 0.0629, 0.0629, 0.0628, 0.0628, 0.0624, 0.0621, 0.0619,\\n\",\n      \"                        0.0617, 0.0616, 0.0616, 0.0608, 0.0607, 0.0603, 0.0603, 0.0603, 0.0603,\\n\",\n      \"                        0.0603, 0.0603, 0.0602, 0.0602, 0.0601, 0.0600, 0.0600, 0.0600, 0.0597,\\n\",\n      \"                        0.0596, 0.0596, 0.0595, 0.0595, 0.0594, 0.0594, 0.0592, 0.0591, 0.0590,\\n\",\n      \"                        0.0587, 0.0585, 0.0581, 0.0581, 0.0579, 0.0578, 0.0578, 0.0577, 0.0577,\\n\",\n      \"                        0.0576, 0.0575, 0.0574, 0.0572, 0.0568, 0.0568, 0.0568, 0.0567, 0.0567,\\n\",\n      \"                        0.0566, 0.0565, 0.0564, 0.0562, 0.0562, 0.0561, 0.0559, 0.0557, 0.0556,\\n\",\n      \"                        0.0555, 0.0553, 0.0552, 0.0550, 0.0550, 0.0550, 0.0550, 0.0546, 0.0546,\\n\",\n      \"                        0.0543, 0.0540, 0.0538, 0.0536, 0.0535, 0.0534, 0.0531, 0.0528, 0.0526,\\n\",\n      \"                        0.0526, 0.0522, 0.0521], device='cuda:0')\\n\",\n      \"            bboxes: tensor([[220.5594, 176.5882, 456.2070, 383.3535],\\n\",\n      \"                        [295.4558, 117.2812, 378.7524, 149.9818],\\n\",\n      \"                        [431.2495, 104.4259, 485.1029, 132.0473],\\n\",\n      \"                        ...,\\n\",\n      \"                        [532.6029, 109.6420, 548.2691, 142.0297],\\n\",\n      \"                        [355.7601, 114.7092, 382.2899, 147.6601],\\n\",\n      \"                        [550.9938,  93.6035, 615.3275, 127.6485]], device='cuda:0')\\n\",\n      \"        ) at 0x237adee47c0>\\n\",\n      \") at 0x237adee4970>\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# test a single image\\n\",\n    \"img = mmcv.imread( 'demo.jpg', channel_order='rgb')\\n\",\n    \"result = inference_detector(model, img)\\n\",\n    \"print(result)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"d:\\\\anaconda3\\\\envs\\\\mmdet\\\\lib\\\\site-packages\\\\mmengine\\\\visualization\\\\visualizer.py:163: UserWarning: `Visualizer` backend is not initialized because save_dir is None.\\n\",\n      \"  warnings.warn('`Visualizer` backend is not initialized '\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# init the visualizer(execute this block only once)\\n\",\n    \"visualizer = VISUALIZERS.build(model.cfg.visualizer)\\n\",\n    \"# the dataset_meta is loaded from the checkpoint and\\n\",\n    \"# then pass to the model in init_detector\\n\",\n    \"visualizer.dataset_meta = model.dataset_meta\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAApQAAAG/CAYAAADmTEdUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9Z5BmyX3dDf4y8/rHl69q3+N7/GAMZgaeAA0IgATorUjKkaKkV1opSK1eSdRLiZQoyjLoRFKg6AmQoAFAEB4EMBgMgMG4HtveVXXZx1+fZj88Pa13v2GXitjYiDoRFR3RXdHdVXVv5snzP+ekcM6xj33sYx/72Mc+9rGPffx/C/n/6//APvaxj33sYx/72Mc+/v8b+4RyH/vYxz72sY997GMffy3sE8p97GMf+9jHPvaxj338tbBPKPexj33sYx/72Mc+9vHXwj6h3Mc+9rGPfexjH/vYx18L+4RyH/vYxz72sY997GMffy3sE8p97GMf+9jHPvaxj338tbBPKPexj33sYx/72Mc+9vHXwj6h3Mc+9rGPfexjH/vYx18L3tf6iYfubbu406EUjkJrhJZ4siAOQvJiiJU1dW2wTmCNoCgsURRgTA3CgZFI5aE1eCLED0OSKAJbkVVTEudTNlJMFuBrx21vXGO8NebsYzvcc/s9zB16DaefeZF+dh6BIfP7/ODfuIH3/do6Jx6a46Uzl8nzHjfd2CW/WnDqqSGv/aaIzprHaKTQ0TZnn4zpLXoY49Mf7NLtxWxfyZFRSC1KmkELiaXXq5kMKoRskqZT+hfhn/8//iXv/La7+fCHP8nG9h6fe+LjWAxSTQmIUVXJuLbUfsA4q5DCwzoQOHwMpVPo2tAlQhSG3lIXE+ygNTTbPkVR42mPnUsCPMfqDQFRo2K43mTrQkrjiEUmBq9Q+D3BxGhEFeACjRxaMgNx4tFIPKwtOHLoBGVVsb7dx487BHEXaofQFiEtpa3QxiGFQ9oaJUNqa0A6mo0um0+dQ4xTogjGsSNQMYEISG1JGQgOH0pY6B1lNClId/YobUWQjRB1SagEC4eXWB/0qVPBZFwhdnrs5QNWD64ymUw4dEMbuaJJ0xwna5yQRJlgd8ey2Ftk96UNZBAilxWtpYjN3W28skVnpUNV5NTFmHA+ZHd9islD4kAQtxzTVFBXjvnFitiXpGOFlB61KrHbFo8IO19jFBQDjdCgvRCnSoSLSFSJFRHpOY8jJyKSo3uk0wBtSxTQ6iSML3YZXJjiiXr2ckQeuqqRIqbXa1EWjuF4nSD0kCLAaIk2BUEQAA7nHNZanDPEUUSe5wTS0Y0UvnRUUrGXVWSVIfQ8fKlwaOIoIS8y2u02Vvt0O6so6XH2wissLqyh9ZQ0TSmLmk63xcrqAcbjKRtXz6PiiG/+um/ko5/+OFEs2NsbMT/fY6Hhce7SAJxPsyuJvC7ZxHLLbYd557vexSsvn+eP/ui3UTJEKIlQkiLL8aSiNpqiLPimd38L62fXefGlZ4kiD60tQiiktMQNAaKiKi1VEeB5AcYWxMkc73jHO/irv/pzNjd2+ZM//xQvPvcMP/GT/5RH73+Uf/CPfpDv/t4f593veBd70ws8+dxXueu2++jvTNnYOUV7LuLA8l1Mp2M8X7C8eCv96qvsXcxwWtHPSpYXGnQbIecvrDPOSlqJIgm7VDqjNAbwOHJoCVfFJPMR7/rm7+PXfv1XcWKbqrTY2mKNIo5j8kJSmyEq+F83i8VhSTVSiKDNVA9oBzAtPZaXD7C21uIrj79Eq9VA64qy8nAYbF3RaCZ4yqc0GdaVJEGLqnSz97LQSOVoNkKyIqfT6GCdYGdrl8XFJYywlGXJ8uoaFy5cQgiBws3WGd8nywuEUCz35pmWKZN0QidMiKKYTFcUeYV1NVVVIz3J0aNHUcphjOPc2Yu0um3qskQ4QAqQEgtordFVTSNqYGyBsD5ZVuAHoLWl1ewShILRcErciCirAmstvhfg+z5lWZI0YkzlsK7GIUAJJtOS173pYc5fPok1LYIERqMpzbCFQJNPSqx1rBzqEsYVtucjRz5Xr+zRbCb0dwcsrCyxMNfg0rPbdNcOcHn9FOPBBF8JGvES1o45cHCJMCrZ2Blxy21tNjdGDLc9hKxQMgFZUBaWMm0TL1VUdU3DKVxYoU0PX08IwiZlYdCmoKoFt916B6dOn8QPFEUp6HR6GHeJ0Z7Ckx0aLY9OO2Iyrhj0Rxw6eJD+aJM0GxGGHsaE5JklChWOAj+QONtE6wrtLMYY/DDAiwPipqLTaZAjYOMyHi2O33QHX/jclwiSmIPLXbqNReaOLvPMM8+xsX4Zz1PUtabdbmGMBaEwxuGUh6MkUC1EAJ0VgS4Ue+cLRFAgncRTwewhFxatCw4cXOH8+fP4KkAq79ozpPFVE6lACkMUdMmrKVVVEYYhMPsapO9hraWua5w21JXG8xVJ0kRrjbWWKIqQUjKZTGgmDbj2zMVRgDYGY2uSJCHLMowxgKPdbpPnBdM0Jwx9hARnJUnYwbqKqs4wAprtJnGzwXg8oS5KhBBYramLCqUUAHVtZl+vkiwtLVGWJWWW0+m0mEwmGKvxPA9Tz37VWqONxfM8gtBnOhnR6STUdc1oWOOLRTpzjrIekqcenW5EVRrqtMQGDiMkQkuUJ7DCEqgAnTvKIKchfTIcDRHjGZ/SQBDV/PAPvIc/+ehneOPf6LKzXfGet/4Tnnn8JB/6wh+zU6/jmwW8sMuCKsnSkGq0yfJNXXw9YX0wpBrGtNseg+EIZ3yC2CE82NvRhJ7EGUnQiwmURTuPUHaJFmB5dcyXfqcvvhaeKL7WqxdXbhdORhF+s0VRWQIV4UuNQYM0uKpACEdR1VTaYYyiLDVx5GGsRhhwoYd0EAgPv9HFFIbdyzsIX+AEtJSgsRIy3MnJh5J204c5jagNdx4PoAzx5SrNgx2Gg1cIQsGls5ppEFGXU0rtuOlQB70esbVzmfaBmDiRjDYqRmGNMD6+b1g72GMysGyvj/CaAl0ZhC8JgphABUReQa83z+mzGySNmLaY5+HXvYkf/uFv5T/8u1/j6Rc+y9KBkCxL8Z3EFBWNQFL6PnuZYTypcQ68wEM4i3LgnCATPoErCNKI0XbNoZs8nKfprIXktcNUNZ6n2b4K2iaIMCMUIDTInYD5WxUuqqkLga5qNKBrEBXEcQOUpigqVlbWePjBb+Cjn/wIlRA0O23q2mFNRhh4CBeRFSVR4qMkmEpiRYGhIjKOkRQkIiTd2EakJb2gxVjXyEyyvBZypYDBeESvEXDknntxwjIY7dLxfQI7pT/dZjSu8G2bOPEIozGiUOSFYTg1JCqmtTzHNLZkWzs0vIBSSeIgpiF6FCYn8ytcmpFfEmhS1GJMPdX4nTa6KKmKXWSkMLnBZSHSWTrLHllRkGeOZgydZkCW1uB8nGdQhWUyVMwdjRlkE+oxCAlGNPD9nMpYPA1Bs4XbSTl4U0QeZZh6Dk+WSFehMYRBm/nGPNUkYnR1xO75FBMawjCk0YjQ2jAajQj8BCfAuoqyqmg1mywvL3P+/Dl830frCmstYRiSZhMOdxsIY6hkSAqkaYqSkk67R7vtUdeGfr/PiRMnqCu4uj4kbjQZTXbptVcIQok1kksXL4IoiKKEZnOe/mCXNB+zOr/M5t5VoshHqgSnKjyRMdgLOLy8TOUbGu1F9tZ3SRLJNE3Z2d6j224ThJAXBRaHRCCtY3l5lbOXL9JaWEDVgmk6QCmH53mUVUYch1jj024uU9ZDynoIxscLBEKFRGEPo8fU9YjFlQf49d9+H8OqZP3yi1T9gn/4t7+X7/72b+Ezn/ssXjQ7qPaWSrIsJy88rCtZW76Z1eWbKdji1JmvcsMNd7O35di9coGFhSVqa6jqEW9961t57DNfZGv9CkHsQSioaksx1sShh9/usdi5gavbr+AHGdZpTCEJQo90AkIYvDDCmub1NdEPpiReTCUNNjHEYY3SJVsbJa+5/26ee+Y0tvRx3hRrIwpd4BsP3/fxgghrNcsr82xubqLwKcscITWlrjlxw62849vfw3/46Z/hkTe9gZNPP41xmiBKqKqaRqPF7k6fOPSp6xrfE2RZxute/xbe/j3fym/8u//K/KGDTMmodvqceeEcyzce4fve8128731/xJve9AaeO/kVBqMhUvjMzy9y7twZVOBRFQV5ms2IoNE4IfA8D08FRL4kn9b4vuK7vvs7+LM//QiDwQ5CajwvRKCQnsA6g5SSKIxJ0xQhFIuL8wz2hpQ6p9FoUGnNgYPHmBZjuvMhV9Y3OXT0GIPBgOFOThRrTAVCtwibhm4vxuv6SAyehfnOrVR6l8WlmPXLQy68vEdRj/F8y/z8IpcvXiEJOkymu7z5zV/H4489ThB5ZPmApYUVEIbBaAefZW659QSvnP0CYQxVqWkvSKJpgygekfmKy2dCVpdCtvYGKNlBSB+nNFmWISVICVqDEhqBRxT28JTGDwSeiKnrmsuX92g3QtYOJQwnA0ZD8LwmYWiwTiNFSKUr4jhmOp3iKR+DwPN9wkZIVeUEecx0uMud9zzE3/oHP8rv/fK/oRpeRXQOEAWr7A2ucPXqVTq9iDRNGY8tGoVxFcoPcK6mu9DCCwt2NzXWNsAHYX1MMUC7kkbSJVCz/7OQmrwYU9eWTifBZ7a/GQtRw6OqKgJvDk8FOMbUWmDtjGgJIXButh4YYyiKAilBCIExM94RBAFK+nieRxiGKE+Q5zmhH2CtpSpKqrpAKYVUirIsCSMfaw1SKoq8mh2qlMI6jbYlzgha8RJh4rO5fZW41aTSE+KwjacEaTpFCAHWzg72FpTycc4RJU2MrRFCEAQBVVVgqpkoJoTDkwrnBLU2hGGMpyKiMOSOO07wuc/9Fffedwf3vuZmPvznj/GWt7yVvcEVzp07x8bGZaZjC6ZGRgpRQRA1SW1G6AR1XhEshSShYHAxQK1aIisIfYEj59/83K+yuXme//yLv8Tf/LvfyhOPv8I7f/g7efqxP+Ozjz9GVfosde9g/sZDrDZfZGvjIhuvhLgwp8xi0lpjdzv4YZ+4GaKtQ8uMvK6xOsK6El8qVLOJKwuac4d4y6Nv5sOf/30OLnmc/PDgayKUX7NCaSKQMThPYkqLwWGtj/QV+IrYc5iqZLHTYjLKSZo9eotLnLp4GikMFIppqTl6YI2VxWXOXbyK1gZfCZA+nqgYVQ69DibyaS7VWFmSmJAqsjz4yAnaS47zF3f40Mc3WIosbRXSikOyag/nZi90Nm1w9FiHje3LaK1ZOwgH2/DKOdiONKrwmYynRN0FFlJLWYzwPYlTPrm2CKXwZIt7736UUj/FzmCDw3etsHRM8Ou/+UFeOXeaqFWQlzk4iecrdObInCEtHIOxRaHw5eyAb5BoK4l9TVYXOAdKCpKGRxAq8BxJAnunCoTykC2JsBbKHE97GGsII4mYq9g4Cwu9Fl6cITxJVVjC0McYS29ZoI2HHpYcPn6QvNAEMkDVJXZQ0573mOQThtOUVnKQJErwpKQ2FqtCMA7je+RA6HuEQqNXIgLXQtGi1+8zaIcMfOhZqFpNqqDi5b96kdsffogjC0uc2z2LnzmMtgROs3duzKRqccPKPK49gRB6xyqyS2NkcZRm5BPMxwBIJ/CDBsY4kiQiTTOEJ8jrMVGjgfMbdHqawhra7RVUW7G5vYO0Mc5zOCnJ64Ko5dPs+tRlRoXFKIFSjqCMkc2KZqMiLwtE4aGUo9IG4ee4wpI0E6wqKN2UziK4wOKAIB4jQ4MnPEQF1o4oqNE9n/lDirUbW5x8fAo2Z3dnF8/zmOussLM3IYh8tNGEYcx0ktHvv0Sn1aYo8tkLGASUZU0QNOlnFd1mwiStyWtNHMWAZZrlOKtot9vccdud5FnGU8+8SBx2yDbWUYFDV44bb7yRqhTEcUxtUsp6gBlVdDpdmkmD/mQHzwl0KfGjijIvqUSLtZUGoag4eOg4T7/8Eul4SjoN8SPHwSPzmEJiXcHi0jxb29vA7FSfpikSQTmd4gUlSkmk9NCmJAg8dC0QQjCe7oCrCaImRpYoGZGXBdPRFZz1SbqSSxuP873f80Pcefe3EzU2ePxTH0V6mrvvv4f2Uszv/84f0W436fdHRFGEzn2K0qNse1y6dI44sqy0buELnzjLwsIiunKcOXWaO+55DXGjxcriQZQMyMoKL4gwRYlQjk6ng3SO8XSXMp2QNCKETaiKPtYotAZHhWci6mKE9e31NbEuU0Rh8cOYbK9AeZq63WSaF1y4tEtn3mPjXEq7KdGuJpGOylSUZYUfKZzVbGxsoDywJgcscZgQR5KiqHn8C19GBTFeFPMN3/xO3vcn72ep4SHETNXxfZ+6NkRRRF2XxHHMqdMD2p+6wNmzu5y6fAFRCe56+AF+4md/hP/xC/+dN7/n++nrkEBZSvEsF9c38JwkS6cIUWOtQkmP60KDcWhr8KQP0uHLkCO3Heb0qQucOXWFqp7SaDTIsgKlACcxRoOYKfHGGKT0UEqRZQXNTkJQeaTFBN+LyfIRaZbSbCcsr7a4cOEC7dYcKjY0exHFpGZvawiqy0hqWrqgtxLRHEms22Mae/i6ohYhzfk23mRGTvIsx/NDbrztOA+/9kf44hN/xaSe0PFjPJHgezFllTPXPkKlS65snaa96HP33S2+9Nkxy2uO+mrF8sISG1nG/FLO3ja0u1209jBOM8kmCBmRZSVJIvF9ga0iPD+kNkOq0kMVAd2uJWkqXv/GNTBTssxnb8cnjiW1nmKJQCg8T6AtTCYjjDEEQYB0Al0WpEWOMQbtBnRWDrAznfC3fvTHONYJOboyx8vrG6wd7fLoa9/Mhz/yPjrNHkXmiJMK6zwK7RFEAidT0r2KMPIJQijLglg5ymqEH7bQLsDWdiYUuYo3vuFRnn/+OZaX17h4YZ1m6NFslIRxg42r24TBjKRa5xDSI44DhBBUVUVRzIig7/uEYYgQgrou0dqitSbwI5qNNtZaJpPJTJFMQqy1TKdTgiBAKIktoK4Lms0mnhcgkERhNPv82MdaiKKARqNBUU8p8gmhZ0kaOfNLkBcly4tzOKcYj9LZAUwplBKAxAkHWLQ2eFLi+wnGGJyd/bn0PYR1SCWIo4TJZIIQiihKmOstEYYhr7x8FmslOl+imPZYWOzwmc9+mO2tIVHsAYIkaTKqJ/jCgTKk1Rjp+UwLSTLf4si9ktOfGyG6LQ7f2aHXESzfUfKWE99POulTzHn82Z/+Np954gnq9qd5+vnP8YlPf5G40+Nbv+6tDDcdK2vzfOLTjzEeRiwfl1z4igTR4t47buWV0TNY0aSa1tR1hUqufW9tjZco4lZzJgoaQejFnHz+cVphQCJbXytN/P+AUBoQlaXKpiB8mm1JllWU2YhGo4UyoIsK7TRYg65KQt+n12gwyQps4BPVjt2dAelUU1cGP/BoziX4vqD0BX5uSHdLXO6gBUZD6Tw8Ab/+m8/TbsNNDyacuCOmf7mgmlREB2Pqiz6erZHWZ3d8mduO38bf/rHXMbVP8tRXv8RNx0L6ox7jaU6SWJwNKacjjq8tcO4Vy423JJzc2qKyAWEsybTmA3/5ERYX54lbglcuPsszT3wZnTXodGsC5RH6oJ1HrXOUgloKxqnFCRDCgbM4Aw4Pg6CuYlp+yvRSSG4K5g8LalvSjpqI0iGlj3aWQd8iPYhshFIlRSnIC0PLD1FSsH5xwoF7fZSV5OdK9IrFNQ3DrRI/USh81q9sYRcbKJUzHvapsh2S5jHmOstkOxvkRUojDBA2xvMkQSDxRIA2FXnpCKeSQAhMow27Nbtbe0RiRLXiMxqGHG4tIHSFpwTK5Zx67PPc965HaCU+01FNOtYEtcdC0mAynVLkMdlOTSESlu4C59Vc2j7PctLEqi55Zmk22oQdjXKCbHcXb7vk3LOb9BbniQ62EV4bs1vRaPYxaclgkNHqNJhOM1ygsKaizCRCW4xK8UOJdZooVjgdokxIpqd4TY98p8ZVHtILkV6NFBJnHP2tjJXFFnk9hcTHeRZjIJaK4bkGxJaFwxm+JxBVSRxbSuGzs5USBB55OaHZaIMw1HXJgYOLjCcpi52j9Pd28H2L78nrI56iKGajRykByTjX4FuEHxApH6c1VjhqWzOpNNsbF5l7aInJuKDT6FAbRa83zyTtk6YTLl26xGiYEQYCFdaEvmRrs48VBXHUJul0me7too3BMxB7EXld0GuN6cyFPPnkY+R1RKOp6LYa5HlNXWm63UU2t4YsJku0Gm12dvYQQpFVg5kSYSxGewRBgHMOpwPqyl0bJ9VYpymcQ5SCuXaby+tbPPjGt/Lub/9urpw7zflTT/OVp06zdeVTtPQmP/5j/4QnPzqi02zSnjvExc89RToxhGHNfOMeRvk5jt7QZf1ixe7gHG9484PsbkjOXf4q3/9d38bu8CWCzq0Mh2NuO3KCUTbkZ3765/nP//Xn+fITX+L9738/vbk2ntLk+ZBxH5pz4FzOdFSgJIR+j5qcIi9Ikia5mRB7Ad61ERmAK0OK6QCZaFzbo3Q+jC0RIXubGcdv6TBuGRwaaw0OHykrnBBYXSKEQimF0wVSgbWOJG6RlxWTacbVq+vUtuaxxx7j7rtfg++3SdOUIIhmqo+t0VXFDTcco9Vq8dJLL3DbzYsstKfcctthOgcXaeK45d43Mt0W7G1e5ru/7TtYW+1x7uwpytGETqeLNQXT6RTfi3C6ukYAIoy11NqysraMqWqSOAYc585expiaz37+YwR+EyEqoligDZi6IowDpJopVePxGCFmtgFr9YwgE6CnGt+Dza0rhEnMhfPrvP7NJ1Bqg+3tnKPHl7l8aZu51jzZZJO8zLn3oTvYO/00yJhzFy7T6I2JDh5h6/mMUT5k/eoOC81Vdnc3ueXEIfJMs3V1zJ//6Se4/6Eb2Nh6BWeGXDoDnsq46ZYV1tcHTMttTOGY81p84eNjtExYPz9H7IbMtWKGg10WVgR1pVCyS2536Q+ndDvzqEAw12uTTium0ylxVKGNJQoDrKiJI5/hYMzKWgsRFJisie+rme2j0aYZ+ISxx872mCwXOAdBEGGtpcwzoijCuRKlFHO9HtOiYm35MGl2mQVvTCM5xsqJB3n+kx8niVucPPcyu5MawpJKSLKqpNX20JlGlxblt8EIXK2IkogiyzCFw5MOB2A1QRjTajQYDksee+wLhGHAcDhka+sqeSPi6OETtOdiDBXrl3exzkcFBmcNVZYhpbyuTEopqeuaPM/RWl8jcbCyskIYxIxGI/J8drgW0jEajXDu2tpRVQAo3yPyY5wz17iIAwy+H+B7MVEUoZSi1WoxnnQopuuEDQ/pbxPGBiUblKnFuhqJo91s4pyjLEuccwhma7KUkGVT5pcWGY+mTNMx9913H5cuXWKwuzMbb/sC5xQqUPRHfYqiIMumOGdYXVlmc/QYX3rvR2g1G7Q7MY4aXIyzAm1SIi/BcxO0t4Dy9nBjww13rvDgewI+9HtnWDzSY/VYh72qxe0PBDz22Re5d/EhNkZ/Tpou8z/WP8MTn/oN5g8t89nPf4jv/tZv5Z7bX88ffehzfMePfjsf+pXf4fSLfRaXBVcvztNteZT+Ls9+9cvEfoARNaPBiLWVRSb5Hs5KnJAoL+TgkRs59/xJpAzIqsvUtaTZhG77a6aJX/vI+8jDTZelBc5JGt0eRqQIG3BsrUvazxhspYzSKXNLMXlpcS7GaocSJc4V5H6ArTVWSpTyaSYz1lvpEl+AlzjKvKB0FpU5dOkIUdhGgBKSwk5wBfTaEYdXjnPfnbfwzBdeIm9XjHb7DKohtQYpfLrxYf7rf/wlPv6xz/DJz/934tigHLz4VUlzuSTu+DSVz52ry1x8sU8ZW6YNRVGPKSbmmlJYgg4QdQBJTpjN4ew2VjtarRbGVVinQRuEcdQo+rlFC4VzNdLOSLgVIJTAEwlKp4xOB9x86zyyN8RQ4vkSPJ9a1EzHinRa4isfbWqwHmGi8ZVgui3wIotXQrAwT22meHlJLmBuLsELK6xw9PuGu07cz2i3xpJy9sxlikwiw4KlAy2SboJ1IUq1CDxFXkxYW1kmT2Fa9Xn5wmV6oo033qVzuEt+ZcywMCyvxowGPkFlCRYURRGzd3abI3e0WbxNsbMLib+IiHzOXjqLqww9zyDyEiFD8B1BEGBUjjOWyo+JKZF1i+1nfDpLHhyvoDCYLKPqGPJzAWoSsPDQccbrewyu5Bw94XPlxRTnStrHPCb5FAkIIZleFXz/d30H2u7xF5/8BPPLTap6CjWEdHGNCaUTlLlkOq6QVhIEAWVeETUU+dgjrHsUcZ+DKzVxN6E0Ds+EnPvMFOMr7nxrm6o1IFYSISusiznzIVBhRFXvEXktPN9QV5LOXIeVtUNE8SK+rHnu2afJ8xRPCmYc0mMwGuJ5Ps4YhOeBkiTBzL+U5wWtVgtPga4tzhgCT+H5kjyrqWuFdhona5QMqMqctdXDeL5EeRWPPPoQj33+i2ztXiWyPpmW1LXBUxEyKEgLjZSSN7yhw3CcUowXOX1qGxFIhHXgDFJ4xPE8ab7NsSPHGQ2mBHHCax55GM9TrC4u8PP/7meJGguEvkVIh9GzUReioqoqFhdW2RrtEqDxXEhlclzYYK57jD/8vfdy6w1HuenYCrIZE8gm01JQphmtpGY4mRCGi8x1PSbTgt/+3T/lfX/0P3n/H/1PjhxZodlJ2BuM2dnLiETFz/3s7zAsz/HeX/4jvESxduAIsi5Ip9t83/f/Q1q9mB/6/u9gcb7J7s6U7/+B7yVIWrzv938L30vwfENZj2iEBzl0bIWzF54jiHyKUcbC/AppVVxfE/PCMN/sMRwOkXrE0W6TOoC9KeyNLSqQCOkQRoJnyPIaz81+Bo1mROCHlKUhCB21ztFVSLfr0d+bIv0EjCRsRuTpFC9QxFHAdJoxPz9PmeVUVXVdCfpX/+pf8973vpdHHnktC/fczvBMnxMP3MWFbc27vvld/Mjrb2bxtqPoSUVVpgz29hDCmxHUyKPWBikDgmD2Tkgp0VqTFjmL83OMB306zRaDcY51Nb43U4iMrWekB0lV5wReQFlV1HVNs9nEOUFZllirCaOAqqpmo1xj8DyPoi6ZX5pjeXWVpBHQ6Tb55Kc/z6HjB/BkwN7mLtYYsqnh3gfvQogcooThK2cIAkX3+CLnn58y6e/grMDvRJRZwTu++T184mOfYzjcQPkVURThqQbf9Pa38/LLT/HCcy8RhS3yYkwQWRqtLtNJTkXC29/4dWxOrrJx6XlMXlGHNVLGWOvI9qCqUxpJF2skyBKtC5K4B87D6ZyyHhL4MZ7v8P2Q6aREKchSg6RBs5UwGY1xzrC41COKfc5fvEicgLOzQ0Zd1zjnaDXaWCcwBhpJBz0cMzQTfD9kKYqIOw2mlcSvfUbFiGkxQLgIocC62bqAMAg0jaRLXVfUtcbh4XkBeT7B6IogbCCEorYpvvKxRlIUFUEQEIY+CM3NN9/ET/7Tn+ELT3yU//gf/yvLCytM88FsfCx8wjimyLJrxHH2dUgprymTNcYYhBD4vk8SN6nrmizLAPD82efNrAMz9b3T6TDY6wOz0fh0Or0+RhdCYOzM6jU/v4AfhrP3oSjY3b3EgdU1LBlrhxbY3BiweXUPIR1ShbRaLfI8x1kwxmCtvTbenr1LBw4fYjweM5nOyHHg+9R1STNOmKSzPVtKZodn51AIlCdxbrbuOzIECXHUYG6+ybu/5T384i/+IkJalNMoD8alxPM8bK1pri6zcnOTVrPg4Td6/NF/usRdj76D7/kHt/Kr/9cHKOY1rnMOtXMLh5YNyZFdrjxj2L4y4fhd72a+0+aTn3mct37Tt/CxP/5Fkm6bdlCydTkntA1Ey5B4BeNRxWgQEoUdsnyADDUqDBnlBasHl7njjjv5qw9+kiO33MJb3nEvzz75HNPBNpdP9RldNv97PZRL9wlXlxAlTWyQICOLZxow2UPv1ExqSBoSbXKE8JA0cAKkqvHimqmyeMYgAonG4duQRrNDLRSekYh6wsZWwZFD83hRSl0IdoZTlDZQQ3NuDqVyVhodFo/N0XK30lgYcurKSdZPjtjzJKouUDrAOsVdD6+SNI5Q5hmXLj/D0VbA+ZcMA5FCCJ0FxWragtNDxkckK3MHoXZ0W6uc295g2shYWFrmyvltcjEm7Ws6rYBAKqw1LCzFqHqe4eAsvvUZj2syzyNHEyUQqYBsaqmNxQ8stfWJ/RpvsMLSSos8OE0QekzGAtHTuFJRTjSyjtC6wPozQtpsCJoh5LlHIWoCEeOlksFWiuoolm8SKM9SSMnWFU2v2aHpt3n+ixvc/ZoVdvvr6LKH6qY0OxqLZZwmSNVjcalHogRzyRKe12Fz7yX2drdw+My1fC5sX6ERdtgbpiTSp9FpMO3vooYdwl5ErQp6nTXsfAeZnWV3c4dgsU3DxezsjqiUYUlAHIfUpqDAQ4QFgQKqNmJTMDWQjjN8E9E8GKLWCtK6pusrJucl06FP7Vvyq2NWDrbpHAx4+Ss7HL7RZ+w5lIwh1RShoBpk/PC3/X1uufkE/+o//T3WbhLoUiDrBntXJzRakua8YncoGE0q5ho98qEjiqfkUuM7RThJ0KHEszB3NMZrjtg77zHpQ7qVcuf98zRuKvH8ACcK5o5a8ufn+OSf9FlaVUxGORJNEvXQlGgnCYIljhzssX7lEliDc4bpdEy73cU4wXA8YqHborSSoijA1igvQAYRnWYLWRdYT1NmBXEYUpYloNBaEsQz/wsuZHfvKocOHiGJO9fGTT6PPPIIv/sH76XTaSFFg718j8RJlC8ZVwa/DPFEQbK0wHK3zSunXsRPmlgTQG0IAkfY8JhOSo4dOsapV84zt7JCIQX/9Cf+CS8++zS/9xv/g9e89g7OvHwRnIcfCIpyMttENBw6eIzdSxeh0+TOhx7mbW98E3/6e79KnSqcf4T/9Gv/jX/0o+/i8uU9WrLBXrrBDbecYHfjCl7gUw4G2NAHz6fbvQUnMjrdkka8wkunn+P4CUU9zjl33nHvHW+n07Z0u006TZ8//ZOP8FM/+1OsLHT5sw8+w3f94DfwU//yJ3j5mZfRVc69D7yBT33+s/zE//G3+eVf/A3mFppUVTWzXZSalaOCznzFuVM1epxwz113XF8Tn37yBRqhR2k1j7z2Vs589ascO3GcG269h7Dr8cnPfJHh3jrj7RCpHM7NiBRSYF1NGIZ4KkabCWWV44sOws9pRG12RmN0VrF29CAUNbVw2NJhnL2u1Pi+QmtNvz/i6PEb6PXmWT5+K2/9vtfz3FOP0Y1u5tlnTtG7eYFXPvRZrChoRILdrQF7OztIUbMwN8+FizusrMyhXcl4VNBoNBBI0iKfbbRFSieOCZWkn2s8pa4FLwKqqkZIiTY1jaSJrmtqrSnLkihKiKKELJuidUUQ+uB8rCkR+CjPoY1g7dAKN9xykC8+dhIlJK2lAOV3mEz7hMqjGGuyekC7vULYitFO0woSYg8m5R7nzl5lpX2UyutTZhpPdjl28ARnzn6Fw4d7HDo0x9lTOwx2UxpLB3nk0Tt48flnuXT2IkHocFpRuwmB16HRc3h6lcrfIwprSpeSZwY9DKBhmO7VCELiKKAsc6SYKWRVPaLTS0gHGb7XYjwe0UjaNJtdNq9u05trIf0xe4Oa4zeuUkxy9rYnMzJupyg/YjKyKH/2vUuSiCiKSNMJQRSTZrP3+eiBG+jLEeWlPke6ixRRgAyb/I0f+C6efOIxvvCZzxDEkrw2THODNiBlMPMkuhwlHKOpImgYYuVhjY8fz54jjGY8yWd2FVORJAlCCJYWVyjLmptvvpmrOxnptM+Vy6eZ780x7I8Q0hAlMVLGFGVGXdczsmQtSimazSbGmJknXKkZcRQzL7EQswOHVBCGIWWZX1vf5HWl0xiDJ9U1a0dNVRV4nker0yXParSdhRwrXTM3FxGpmCoH63Lysk9VSeKggyVFejPCOgsI6+sWq7qu0Vrj+QLlhwAUVTXzVQazMXwjihlOxsTxLEBUFCVFUREGMb6aeT+tMEhlCFUHPwBPxRRpjfTHCCKq6RTV8REuIuhoujd4eMKSTlL8xTm+5R0/zDuO3c9HPvMkrdftsnnu8wynFcuLEYcW7qV/6Qq//4GXOLC6QFWNKdMmdlCQdnKW/IQzz20iVjycE0S1YnC5xJM+vQWB38vZvgK9zgEqnTEs+2gnqY1hZWWVPDMU/QG91SWIPXptn2K4zmSnYOsV+7+XUD7wpshtVYKJSOg2fQKjyfDI8hFqWCIjR0BIOREoClpzimyi0Naga4drgWtaQtGhnoyopEfcSAhdEz+s2LmSkqY57XaCoeDm227l8qV1imxCpxUgphGdFcXe9h5x5LGxaVk+2OXw6s1cunyS3VFO2BHIyuLLkPtfcxfjccUwm5JPzrF6cJGNLc2Fs3sEoaK7YmhKj4aDurbkynEwP8rBXpcRhq9sXyBaComsxUSanfWSWNW0PYXJKyrroeIGxW6BN9DojiWLJDIxNHyJcI5xETCZljQCgQhAKoUrHYm19LoeybLDBYbpOKYio5gKxNQnqysCJahxKANBGONsjhf4LC6vceXyDtiM0G8xt5pRa4+9K4rCZshWhwYlk3VB7/Y2rGcEsU8ZaSqtcM5S5im1DpBhxH333EwnjJjsCQ4c6GFJefzJJ2nOr+KHAeP+DlKDzAq265RcC+ajJhRTKuHTbCwT+B59u0vsUqzVSBWQTjIcPs5Y5nyBihRpVoAvUIFAoagnksm2IxvWNP02RZzSiwPiZcN0Lqe8FFG+ACJssnigRXBzyuSqZvdMzuKthmlRE5qATNSEAvw4RuxVdHsHqNsX8fHIygpX+Tx696NUpDz14lcQXoPSpDSigGp0lCqbMHBXacc+cRFwecPglY47Hm6TL++w9VeLhEGD9a0LLK0s8dDXSz798T1uuXuFm+70Of2C5qkPXWJ+voNSinSag5uZuGGmzGqT0Wo3SUcZ3W5AECTESYKMNGfP9AmDBCkExszGQkVRIJilpXVtQSliD2IVEsc+u+MxQdyl3WnQ3xlT6QFKhbMNRMzGocZoynJm8vdUQBI1Zql6aiwa52ZqvDWKhbl56rpmMNzEUy3quiaINa3mHJPpGCVnG4QuFQiNdTlhsEJZaIzY5siRu9ncuILnCbJ8ShyH5GWNcwIhFK7OZuQCyde99bv5t//ix+nvbDA3v8xv//qnuHT1Ih/88C9w4s67CNs9trfOcOexu7l4+WX6U02Z1ZRVRqtrOHzgHibpFfJSMxzu0uhEREuaH3n3v+DAgZt4/3v/mOZSi1cuvMRP/OOf5JZbbmNbTPjs7/wBx+94mNCW/MgPfh9rB+dY39jmn//Cb8HVF/jwBz7IS1fP040Uus6oxCKB1fhzBcOdgkcefIBk3v9fh+zeUT78B39ClQnuevAOXnz+eRZXb+AH/u6Psrl3lQ9+4H8QyozBjiSvZqb3sqwRws18X7FPI24wGmiMzfB8R547FubnGed9lCxIGitMc4PnxiivSxzBaDgmCH2UDHDSorUjz6d4nk/QOsLBI7dS25RxfwdRjKmyHarSMRpPuPOO21lebfHJjz/NkWPLfPv3PMJv/saf0u2tcfOJHk/81SuIa+pVUU5nPuu6pttr4wcwHs8CC2mawrWwTl3X+L6Puhaa0FrPPHNyltR1dkYWpJTkVYEnfISDyhQE0Wy86wcKIQx5GtBuJ1hRM51kzC90Cf0u6+tnCIMGt959nPWrV4iCDgvzc3zliS+xMr/CwkKT9StXcfjkVYmxr3pdPe655x4efvhOnnrqSc6efplRFrC4OM9D99/OX/7FBzEO8lTS63Torfa4cv4iS4sC7WqMjXDCgihxRlLlAXme46kYpSROW5aW1lg7MM/u7i5XN7YRckb2q6qi1WqxsrLCYLBHWeU0g5Ag7HJ1c5dmy9BoTFieb7OzWXPlUkl+LU3sBT7Wasqynk3EjOHG4zeSa4FLMzaGe7SB1twanfmEt73pHVjf8Wv/8V8w327Tz2BUpxxaO8DGzhWsiKGyNFs9lFLs7m7TarXQ1ywOxhiUUqwsdrm82afMDKpZ47uYoMxoLC5y6eougoKGH86Cd8Zw4NABirRie2uIURVKWKQVWAJKY8GURNKDwKMAOkGM7/t0u13quqYoCkbjAc6Z620A0bVnoihmwcKiKIiCkMXFRWpt6Q92CYKAxcVFpPDoj4YMh2PiOEaXFUuL8zhnSLMRVVVizMxWNPOlxuR5ThQmVFWFEzP105iZIvyqomqsxjhHs9nCOcd0ms2eb1viCYXQgHUYW2GtI4wSrAXpA56PyyYkjTbWpoz3arSCwBMsri2xcNsCfjPi0uYGzfkhelriobh4UXDj/Gt423d+K0dvThhc/hVeOJ1y2623Mtw0fMM3/RB/8ru/wO/8+ldZeU2Xu255kB/8xu+gtRjzH37xp9jZvswrJ3PCOYUShq5YwnM1raWc0gqytEZYQRLFVLVlXDj8IGY6HOGkQ2Pw44Buu4cXzTG3lHL+S5dQzQ57L/xvDuWMTYgf+yyqmHKiGRcRc/NtIq8i9Qs8D1whEJToymO458DTqNiS5wFiENBrGAZXK47dvMrlnT2K6ZggktSipjMvsDZimmZEEbz07CmCcDZ6mw419WjEpIwIujHDtGC+LdGuz8uXX+DQvE/SdlROsbdVUCrJSxfPkdCkMLtESUxVplRpQOB79BYMpm5R6JyLL0sWD/m0Fku2x+fZ3FjGa1oW5xUTOyXzW0hd0kgUgfGQrsCPoNta5PT5IXFQUqsEmdTkqaUdK5IwJogDJlt92k1ouxbWL6mlxgUhjUiS51PWmosUao80zahTga58okAg7Mwn4oTE9xRO5MRNRVYY+sM+2mQ0Yp8oSSlry2RsGG0r2ksBWo6x1qOd1PSSLldcQRQYmkEb2ZBUxYSotcw4BT9WrJ+7xME7HyZZ9nn49a/nmede4uhh2Nm6yHQnp53MMyw26Kcph4+soitLMZXYZofxpI/UU5xT1BND3Ao4fOwQgR9z+oWXGGUlUaOBF4eYvKTZShhMJhQGgkAT9TyW5xJGe9C/UhDTZDSAsZkw12rgdUt6d0dcvDSmN1/jm5BhWlKXOa4KaLQsBoEdG/JSMRfNQSyp/R2arZCqdKhaMl43PJXvsXYsIgkTLpwOuePBJrXdQ0bbXD07R6xiplmOyjvEsqCQAlG1cFuO3Z0Ri/MVcx2Y7pY89fEG9V7Es58ccP5pj8XlNq1WPDORo/C8miAIiEJ/FlyRgtjvkE0cC4sdJuMCXSv2hjuUlSZKQuqqAidxTmCkAOdTm4per8Ntt9zKZx/7PAEtClMSRg2ECkGW7O6m+IGj3YjJ0pokac4CarWichVJHKGUj3U5WdUH5VFk5bVxXISpfUxVUOspSkX4fkgjichzR1lpxqMKhMRiKasCHAjn02i2qc0Ag0aJDhfPn0EpgbEO5wxlWaKEoDKGIPCoVYjGwXiIzDZpH7sFceQYsQv45//tbs585XOcfP79vPzsWQ6uHGF3sMkTu2MG/Zrl1TZhkFEWNccO3YlwkmNH7qEoS07mT3L77bfyyY9/nsP/5CE82eUbvvXb6B6e58inP82Wq/jgL/4O977xDbz7R36MT/zpJ3njN72J93znG/nIX3yKZgSnvvBp/vOv/gpv/sbX8v3f8qNkboCvmjStYeSGyEmTZq/J+uYGN80fub4mPvLGR9lYv8CFsxcYZ9t8yzvfgO12mV9o8vGPfYW9KyPuuutGJvlzeJMGpspn1TBOIcwsFFCWJa35IVoDukcQDbnprhYJKzzx1FNol7E8b3BBiElLdnczAj9EKYE1GlxN6MUErQbptIDJJU5+4TxLh5d4+PV3cOaVHSKxyKULe6yuLHN1+zJOHOGWE3Psbm/wynOb/P0f+x7Onb3A0185hzEWpSqEhE67x3Q6pahS0qxgKV5Dm42ZanQtFPTq2M8Yc70O69VUr0IR+DN1J0mSa++Cj9YlptZEcZssLfBDSztusrszIvQsRTlFBiVhPPPuHbtR0ZiE5BPD1nafpYVlqCQvPvcsYeTTWuyysbNBpc21piM3I+eF5ujxBSbZOr/0C8/Q6TaJGj6Ly10efM3b+MD7fx+HZW5unvvuO8ZocAl0TLeXkOkAR0GV5izMdZjkhslQUOmc0A/JpjULSx1kOLNAbW+NGAyGpGlKGAUEgUej0QAkW1s7ZNmUyWTCLoqiuMrcfI9xv8DlAj3ss7a8ROMYPHfRzRRsa9HWoXxvpnxZx9nz59jrj+iEMSNVIoImvrRsnHqJ/laODByZCnFVxfa4wI/bXL48wvcaOBkQN9Q14mRoNpu0Wg12djKyLKPRaJCmKZNIkZZD0I6umONqv8+//Efv4Suffpxz52ua7YgaSza1SOXY2tihqh2OmkjFeMoxzTTaTDAOhPLRrkLmJb6SVMwUyslkglKKaTq+Ftziug3i1SqhV1VO3/epdM3Gxga9+TmMMWRZxmAwmK0zSpFEHvW11ozpdEoUBZRlie971xXTVw/nr1Yivap4+sq7rvqXZUmz2cRXIaPR6JrPvQJmRNMjBCdBgjE5QoHnSbQpcdKB9aHOZ4c87Thw0wJH7pzjyI2HmYzPc7WQuDghm1yh0Uyppzn5Ro+b77yJ1xxpESyn1JMP42dvIJ+ucMgb8sRTp4kGNzD3XTdT5yuIFiwtr/LQI1/H8Xvvwk1y7JaiMCXtHnixj9YWxAS0YzoSxB1ot3yKzFGUU5wLmZ9bw4oaq4dU2iJoMt/rEARHaC0WZOMNisxxw4mFr5Umfu3F5luDlNJoTAGj7SFRo8aLC4Ry5CZgOvRxFqIQ6tIRBgYlQGofQcXSItiBxK8s1RTmWqv0WglVNcQaCBJFGCuiSCKFQEkQoqLZUFitUd0I6Ve4aY4fO8quoUzb/PrPvZdH3vhNHL3hBu6+8zZavsRH02pNEfICMsjJjI+uFeNpH19JtHZUZYbFELQrtHMUO5bX/s2I7usLroQ7rJspaSnpb44YX8jJpwXNlmDct/S3JTvbQ7TNySuPGx9YImp3CMImwnQY7sH2Zkrkx3i1ZLJuqdYD5MQSK4fF0IiafOWjI17+1ALVwMfDoagp85IghLl5QasdkE41cQTSlwhpGQ2nKB9qA8K3DPYkg52QZtOjyiuUbuB5Acvzi9TDAq8h2N0xXLi0yfkLGxSVI4g9xtM9xtOCaWoQfkhuUj712ec4eeYC63vrLC3ezBse+UbClmBjuI3yK3yvTRQuIf2AIIhYmz8we/ljOLDWIOn47A6HnD19htWlDssrDYQQ9MdQ+AGZjfGSRRqtBZQXkeU1g/GIRgcWbsmpM0u70aQeS8xVQejVFM0JR+4BFQkCN6AcK+Y6LTAew7MJ/XVJO+xReQKbZuT5gHIo2XlBEoYV0vOhXGA4vMoTn3+a7XMB0+0BF14ocWmLSLZZXmrSCkPiUJIPC3yvxCjDxbObrD87JQgFUdjC1V2kmjDo7+LLilgKqkHKmacvUVYV0zRlMBjg+z7z8/PMLy4gPYVzjrqehVNGY0OjtUBlNLW2NBoNbB1gbIYfWoJrPVHG1igl6Q+2kL7mrrtuZml1nkPHjnL20hWq2tIfjChLTT4pqEoBLsA5iMIE6/TMz2hLHCVKdKi1pjbltUXQo0hrAt+n1/U4dvwQg+HeLKGpJ7OuQQBXY02BsRqcQChHqyMoqim6dijlgyhJGgGLS/MzUmlqrNXUdYl1miydYMvZRhkGLS6dX8fUJYOspk/GRmY59sjD/NDf/Kd89M//gF/7rZ/hlrvuYjAuaLcjRuM+VRaCsUR+wvMvPMOZM6d44st/hVSW6dRy7MAt/PzP/HvWy5ynTp3io3/4ZQ689jC6mmCC0wwufZFf+E+/xcvnzvH4k8/zzu/8SVwKYTfiI3/+AX7td/6Qiybge/7OdzDZNhAkaFICL0ZaRVN0OfvyOnPNu65/ZAN48gtfYWXxVv6Pf/lvcYdXqZzPpStXufHYGmWWk08dSTzrDfW9eKaU1AG+L6gLQZFr7n3gIHUpSZKYRqNFlQ6onWS+18VD01kK0OWYMIIobMw2C+uoypLA98nSKaPBECUlb37He7jz0SO0lxM++/lTXFgX7OWK3JbgQ1bVbGzvMBw7eouH6S0s8+RTl3j22T1eOXuFqlII0WA0LvmO7/p21g6tsLq2hrGOwqQ4J8jzEs+btROAxFpm/rt6lkx+1Sf3qg9Ta810Op2plbrGkx5J0kDrgij2OHb0ZkK/TRQmSFVjLeisTeDHZLng2PHbOXzsMKUtmExzVhcPsLW+QT7NuenWm7DCMZiMkUGANrODjK4VjcRjobeErSXdBYfVFWXh0d8tefLpx2gtFsRNR6kneKFkeekEL774PKaOKKoCh8TzG/iJxROCxbl5XnPfPUhVI5TF2Flo5NzZC1y6fJaynBEXgKrS9PtDhoMRo+H4etJ9cfEIh48dY5yOqIWjcAmZnuPynuD8Vo1xFqSgrGd/t+/7zC8sXvf9rSwv0JxLWI4XuP2WW7h64RwyN5w6d5LNjUssrK6B1+Wb3/l2jBlT1SUy8DG2oM70taoyTZpOuHz5MlLKWX9pNgufpbnGlI5mEjPcmXDk2M1EsU/a36bVWqQVRCQi4aYDHm+6tc1hpTkYSQ51I1Q6oawCJAW4AA9JaGukl6CTBCf9WRer0+zubTOZjq77JgGknIVjjDFUVXXd3/jq7wklGQ77aF2hlGAyGhJ4CufMLPAlDJ43+8uKokDXlrrWgMBahzGzijbPm+1fYRgThuE1m5GgLAqSJEFrzWg0uq6YvuoFtdaSxCFxGNBo+ghPEYQ9gjhCO4PVPrUuQfvE3Qa3PRLhOopxwxIuzHHglgfIrCOMU44f9jh+ICHbXaQ7P8d9972d7/ub72Kzv8GRO15Plnuo5aNc2BIU5ipnNl7h/OQ8C82Yg0sBnYbg5dNP8Lt/9ml+7rd+nbTzCsJaWh2BND1ajR4Lq4ZGzycdtagziedClK8JggilQqbpmHRc0W0dQYoGqwfnSfxFKuOwrsDoKW99+1vozXf/9xPKQ61DNJxB5AU3rR6k6fnktUPFEVFgCFo1XqxptASmNPQvhYgC2qHHQjzP5sWSdJhjTQmuIgoK2skxOvMhyipQFcqvEELh+Y64ZbFOop2ktaAwuqCsoQ4TcuNhRz75OON//N6vMhoE1LsLvOHet3Pzkdu54eaQ+16/iFU9evNghyVnnoMwCtCuos7ADwSdxYg7HgXhS9R8QOEvMx15rDSXCHOQ0wLfOYxQSAnptECqhDRzDHZryhwGA83Z9XWuro9ZmFuk3V6mLGNGwxJTOupUUBQ12W5AVPVwQ4dvC7JRiaebpNt9Lp8UuGHC1osN6klIq+0x6c+MxwdvViAF2VQSRRJbB4AkSjTWCUZDO9vEKUk6MeQVtk7ZXJfUoxDhaQIfaiMpS8X29pThKKc2lqLUGCepbcVLFy5z6fJJeqElHw345Of/kq+cOcfr3vyDLLZuYTCwnHzmJZ5/+sucefElWg0Pv2Ep6xyTBzjfY1ppigoOrh3Fj3w63YikIYkiizFTlDQ4W+BsNju1Bh5e4pEx82qpcEA63SBQhvHGFMoGTgrqymMqJ5SuRdIKacYNTBlyNDTE2ymB0cRNTToak6UF/as5O+crfDNP/4olzypGg4p2s4WyHp12g8lVxcXnIi6/7Djz4iVsbolEiHCGWs82jEBFUAXELY/CFRQWXABhHBLGCicrrKhZXp4njGYJ57gRgbCk2YRTp05R1xWtTpvSTLFOzUj0aA8vnI356kpS64xG0iMKmzO1ujKEQUwjaXPk8A2cO3uJu+96kHe/+9088OAbeMNbXk8Qe7RaHeJE0m53CYM2VWWoK8NgMKAoSxAC6c1qMWYmdA9PhQjPR3qKznyLvcEub3nb1/PjP/ZPmU4qPDULETgn8H1vNiWQDoGHkLN0urEz8uCcwhoQUjMaDej3d2k2E9rtNlWlZ8+pH7C8tITUFVYbXDPihdNf4Q9+6Ve582DCK09d4fGPfYX1DY9/8M//IQ+9453MLR3j7OlLNDoCq+pZ2XtVUOSap57+Cq22ZXvzEkcOHSX0FJ6bxw8E99+Y4K6e5OWzL/GJv/hl/u3f+z/xhhV3HLqZKh9STJ/m0FzFWivkscee4fgtx8mNhariYx98L5/7xJ9z9I6j3HHHUepyF+vHRNLj5lvupL+5yQ23HaG5lFz/eOq5c1RVgK5GnHn+NNtXRghVECcBl9e3cV7IhSunqVIFjJESPNVA+hOE8LHUPPymZQ4dOsS477O9tUd/MOXC2SlBcJC1tYPc/5pHGezNs7cesbMlCCOPW2+7ieW1ecJIUlZjwkiwsjqPo+JP/vjDXHx5i+G5XVYCzZvvvY2rp84zHeQMdqZ4GPJik/F4SG9ulWdfOstH/vLTnF4/Qzw3jxfUFGWGFB4rK2s88vDrOXHH/STNBS5duQrMDkF1XdNutzHGXCuxhl6vRxBE18eG+pqXUqmZR8+5WfWKFB5J3KTRjGi2YtY3Nri6tYnyHHHcwNYahyUvhjTbio9//JMUZYofKFpxh5Mnn2dzd4cbbzvGJB1zdWODKIjRlbnmuzPoKmNh7gjDQY4nPBa6yzjZR+sxx2/usHqkRIY5SVcQNppcvFjy9HMvsHC0yerBJRbaTVreMZrdkL1hivKbNFuCne3ZoeuGW2cTiWlqMHI8S8hT4CSsHjhAu92cBUiY+Qh7vR5K+aT5LkbPFMHaGgqrGZuSC9tb9ItyFi7RGuX7aOvACYbDIVle4vkh/cEEKUKkk1w6fxGpS247cQt/64e/F0RNuT6ingbcf//beNfbvxFMgbEFKIPwa7SpqOqCIPTwfHldWRZipooKGdDw2mRFgYs1rSjhF37hLzg/8OjO+TSd5aYjh/i6r/sBlo69laB3gLe97W2EdcBP/J2/wzfde4C60HieIQrbVKJD7ipiKTEY8jylKLJZgKUukZLZvyvEdc+kUuqalWKmGr76569Slk6nw3xv7pqPsaDMC+IwwtSasspRvrw+vs7SAmvAGonRAs+b1Qs559jb28MYdz2U02y0Aajr2YUVcTz7GWutZ+tPUTAZ9al1dt3yYVxFXQtwHp7n0W40KcqKB75+BdksKXQD50uefvolnnrmLO0wohMalg7GnHp2k3bPcvNDB7lw6Qt89umn+aHv+HE2Lme8dOk5ZN3CNSt8Jzhw9FbWwjbtg4sMRpaLL23zgV/+IB993+9y/srj0PBZuiUiGwh0lqKLHCETiqqm1hacjzECTzYIQoEfaExVMx2W5FMDNuTEiUd49E2vI4hqinwX359j6ehhVg71vmZC+TWPvH/u536eP/jj3+ajn/pLMlPS6LQQgYcMuyDGVDk4qbnxroiDRySf/cspk1QQtxSNtsKfKqKmxfN8tten3PyaFpUxiOwA5XiT6UQQhx7puKTdSVg9sEZVWtJiA6VCwpZi58IQl9WEbY/KFvhtePa5r3L3XRuMM41TGVEwz6Q+y/oVQbBUcvQ2n4UGfPFTNVZp8KHTEbTmAlKTIYqQA8cEF07VfOSXt/CCkvkDbbx6diLJ0TTCAOMrpkUOOLwmGKORkyaezNjedAhjGA2G9BZbzC83yNMBdV3QW1LUDcF4UFLpBk3VQqc+65dLPG1wngMrufoSCJvjex5lMfP/TCY57fkIGWgagcHUlrk5iZXgh47pBAQg0Hhtj/4459DCrKLgbL/PciBZWG6RliNiExM3FUkU4+qAlfmD9NMhzchjd/Mqx+Y6rB2f5/lXzjO3sEgQVFx46ZP85tmT1NQzL6lf40ceUtdsntnk6O03IjhNWW6xvR3i/JT5tk9V5TTb8wipOT7nUUxHXLyoSfMx1hmEiHBaItTs6/RkA1TJyk0hepKwc3VAo5NQaYkXOuLQUVdz7Jk9RNgiMF1Ey+OAP2SBgOd2HJ0ln7SoIezMyLYuOfNcidJN/ECweqBJmvV53UPfwsbVCzzxxBdnpeZDn+nY4qkGrXYbZIERNZHXwoqQtSMd1revkKbTWVmzjajtjFx5UUxVaLJ6DEiU54GYjf+GwwHtdgshZmRIuBDlp3hiFnARokWSNMiyjCRuzcqQo4jV1SXyPCdNc2IvYHNzk7oq+NAH/4KV1UXOnL7I7XfcTBj6+F5EOp2QZuCFGdIDYyuMm3kvrZXXlKQSPyyptaWqLKUumZtbmhnxheXU6U0u/9J7Z6qSiFhbWWCvv4OSYK3BV01qneGsZHH+KFtbW3jKoBQo5ZFlxSyYUFVsb+fceOONtFodtra2CIKIO++8k8986pMIa3HaEQYtfuW//U/OXYq45a7D/MLP/J/ccu+7eOgNb5wt9OunkBIawSJZMWFSBrz1rfcyGWnSccWDD93D8688xisv9MGreOH5T9LfzTh24wnGX36MsNrimZNf5l3f+Ca6xw6xEj7Mn37gv9CXE44+8na6ty1TffnDfON3fA//7T/8DPPdiJe+8BJff/ubePLx89xw951cPL1OMO/QxmOU9Zlb82h2pvzxb/3e9TVxd3uPIDK88PIZ7nt4i294/Zs4c+YKn/jox3j5zPM8/JaHuP/uu/iNX/o1Or0G1rQoyhG6CjFiyoGDS7zp9d8FQnPP/SPe8nVv4td+9bcobcgXH/8Ih44dJW4GbJw/T9SL8K3CWI9HHn0zo/Eun/zYJ8hyS6UFLq2w1mO5abHO56bbb+NK/wJ9J7jnDW9ivmcYbU4489JlurFmb7fmycefQoY13V4PsOTZEOkEYSRx1uPJL7/A3nDAU09/icl0gKdmm+trXnM3X/3qV6+FJ0ApNSujDkN836coiutjxlfH31LKa+qdh7MlZTVFG8jGU+6+7w6iKOJLj38VF8UI4QiSMeORI27CgYNLSCnpdluo2rKxtcnDb34DhZ7QP38BWVsCz8MoSX1NvYoTj3S6y+rqAXZ39igKTeTNMxzvYcqEl5+/QhwvE8US52LKvMBvKFYP30A9tYz7Jb35s3QXehy64QgvPn8Rp/psXQrRZmbpyHOH58Vo59DWIoRBKoUfKMaTAq1rwjAiLzIuXrzI8vIiiJpBv8/K0gH6/T66LmY3ZnkB1s4Uyaqa+fJeLdfOsgLnZl7sBx54gM2N81ze2OOGG9c4uHCIOkw4/fJJFudW8YtNUrXLT//sT6BEQNJuIrXBFwkmqghtSJZlCCHodrvo2hLHMe12l36/TxI1qOsBVeGYmzvEztXzBI2ScaVxmyMe/q738A//7g9SjHb44Ps+TqRCsmKXQydW2fNjdi5dZa0JVyufqc1o+AZjBZNsQiICTBBcCx0lhGGIMeZ6+rssZ1VaeT57rl61VLzqyW02m1hrybIMqx1JkhAEAUEQsL29izEWbWbj8lcPO7OD76ukVF2/aUfKmSXB92e2JK1n3tXRcEQQBHhCosvqWqUb134WIYqZ0un7bcbjHR58+C7GQ8OlC+tIf4jWMVGjQ7IQUw1vpR1LimzK2tI8u6M9In+ZYvoSpzeG3P66w9xzV5etjR6yLTg8fxe11+U1Cxn9zr1s5C8xFi/S6vS44RgErsnei2Mml0rsZIoqKlx9luJ8g+aiz81fP8e5v9pCiIJsWLFpDQuLHS5fGJJkMV5jgq59TFmBCBDXbtiajPaQcY+tTc2hGxMCNWL7ygCv2WO7X7K4MPc1E8qvOZSzeGPobr/xDkz7Ehd3xnSSo9TVkEA1KUd9tidjyszSbUasHNZMJ4qyDBj2a+ZWCgIr2duSrB7XDPfAuAbH7jyM1QlmNODky+cIr1UHzC82WVg4zOlTZ3nwtTdw/vQunWaHwbjPlUt7dBYlfmKpNh2J1+Mbvu+NPPXk01Cv8Pr7b2K3+gh7VU77oEez2WTjqynPPTFkCkgbEYiCuQOKSQ47Fz3CoMRrgWcloVBYX+OcosotVlqyKQhP0ogtVepjtcb3HekOCOnTWnaY1DIpoDGvkLKm3fBI4tltHsOBwFQNbP8o3YVL+IHk4mmHc0N8FVDXFZ4LCOKAI3cHjPw+uu/jIkvYMaiyiRAFo75HI5m9fE4ZRiNDlUHS8EkLjRCOQ90O+bRgs1+TeBFHbm9z+eUdwuWQpBUReg0GOxlW+lin8BAcP7BMtDDHpZdy2g04fGvIld09Xjh5Cc/mNAOHZ2EzM5RGEk4t1aBm4dASupFS2pRWnCBURYBEKB+ZzBS2JIqRQcV0ErC3s4k1NbaucNbQjELytCBQHrZZEHgSkwuwUBiDESFKBXjW0GhoChQ90WDvdMr5zZLXH4ZWaPnEpYiVxYjRznhWuVF4hKGPQ2JsTSNp4fmS5aUDPHT/o3zsY5/AMmZ7MyNq1hhb024coBEGnL1wls7yAlIHCKkxqqK/M0HamWnd4tAmJwpjgtDHuQlKJijpUxTVdWXm1Y3WmJpOpwOiJktrqrzJbXf0WN84Tzpu0Wz5rB1YZv3KJlWdE8WSqioBgUTgBx5lPsEqSxQ2sK5kZ0dzww0LZFlGOhY4JxEo5hfnuHzlEr4XoNRsY5+bm53kt65u0ul1KYoMhMP3ApJkdpXjoD+mGUesrs0xHBWsrhxiZ2eP8bg/e7amGkeBUi2qquLYsWP88A/9KP/m3/4LtJ1gjT/zSV6rBgniiLIsZ7dnOEdVFCSholQh0kEc1BjVIk3HtMKDNCLHeDQiyyukl3LsyOs5fqPPM08/RpXXFKnPPQ88ivIsL7z4NItLXcLmBr3WXbxw8hzalFjfg3GELfdYvPEwP/SP/4DKvsDWc09y+MFDfOF9n6G/NeBy33LotmNsbJ/im9/5bfzF+3+RbGdAJznCe9/3e7zvE3/J2kKTX/6//j1OGLTS6AxwhtXjHquLh6+viVuXRyysLHHvfXfwvvd/kkb3Zn74e9/NY1/5JCdffpa5uUM8eOLr+MAf/jyNlgA/xJgRnoox2nHk4H102j0cmpMnn+en/83/yZef+QQf+uDHOXos4uSzQ44cvou5pZqnnnqe+eYSGp/V5RVefuU5krCJsQVJ3GGSjQiDxswuUdUIoUDUSBEyGnu85zu/hbOXnuT44SP8nb/99/iJf/bjXLm8RzYG35uV3HtiHiNGsxCVVTO1ejoiagbce98dvPTyC2RpSRiGMxXRudlVmsaQ57PbbzxvVorebrcZDPauk8lX+wg9L5iFkpwhiCLml+aYW5zj5DNPYy14TiF9ENKiy4RSpzSaMTfcchhjDBee3yR1Gfc89CCXr5yjHE/R01m/oHMzr6+uDUnS5ODheZxVXLx4GSU1SbhKUaccvauBnyj2rhpGewFJR4AaIKqEuvbZ291mabHLbbcGnHlhG08t0bvxIucvaXZPCcJIgY0JYztT4m1IbabEUZtaZ/j+zCeYJAmj/uT6mjAL3EkOHzxElhUYYxhnKX4YkGWzzsl0Mr1+y0zozchSce2KwDAMOXJ4lc3BLuun1nnbN7+VG4+s8eu//Nt0OjGLnYOceOBhuu2KD/zhh5k4n6TbRVUjykmOiwzddnd2rWBZzuwXetYXGUWzUW+Vj8kRdKMewjqiYA+jDe35gxxbvIHXvu07+Yf/4Fv5+F9+jgdfezs//7P/mj//3c/QXWqxcGgBO52wXfpc3r6IqyzNVgffN6TTgkxblKuuK4J1XVPXmiAIr4+UPS+4HvR6VWVsNpvX64Imkwmm1iRJMlu/8pz5+XnG4zFRErO3twdAp9X+f+u0nN3SIwmiWaAojmOKLL9OZJMkwhhDWVcYM0t8/99VU9SMmwhncddqkCwQRx2ktHQ6MZNxynAn57Xv7JBHHocPPEISwoHlm7j1lgN87otfwFrN+pUv0lw7hItSHr35NvT0MLe95h7saI/f/9BX+am/8//kox//T3zs3G9y/NZFpKkodQO/5/Pyh87zxGckSzdI8t0Wi/M9vOgqAYZUROidkI0LQzoLEas3ziaS6W5C0lAsHq5xJkOXIGTM1c2U2OvRTBbwmk1cMyTsKuqNlK3NZxC9BV57/ztYWhjxWz/9J19TKOdrHnl31wynd55iYjRJZ5nCaowbs71zjsorabUc3XmBto6rm5ppbqgpiJqauobCBNxxx1FkvYAXRcSdlK29l+gPr7CxdYWVJZDW4UuDElPWL7xMPal46jPn2DmzzYXzZ5DWEMSCOq1hbPDnBMu3Kz7465+h6O+wvfkCD97/TTSDH0KIBmLS4Nwz22yvW1rzHj4+raQkCAOyzBICh4+UOCXQ4xgpLRaHbx1Gz6p8vKlHEIQkgUfgtagMEEimU2i2PKJQMDcXEHZn441m4rG81AFlyKqavHa0el1uuu0wx+5tYP01rl4OaCYB1kBdK9qNg3hBTFUGJHMhXsOjtjXdpdniqk1Nc07gqLBSg/QZj2aX4Ck/oK4kZSVpRR796ZTKlcwtNDC1R1pWCNUkaSVs7fSZVCWTasooHzBKU0aTDGs1p547S5CMuO/RoywdOMSBgzdww80HOXjsIIUNKcM28wttVjoxcRIQLyr69TZlWZOIWWJP2gTlNWd3t5oMpjmjy7tkk5p0J8U3LTzbpZksEzeWKGwbF85RezE6naMsJZqErIqpap/ClNRugh84DAZhal5+Yczi2u0szB/i1E6LTQmxb6jrglarzVyS0Ah9qqrE90OCRpO8GqErj/WNi/zyf/8FvKRPXhicGlMWlnzqsb2zwdbeVebXFpF2VvRblGN2t7cJ1ax3zPMh9CMC1cQZZqp8nVCkFmNmicAkmfWiKeVd8+04QJI0IoIgBqaUU0cjahA3UpyRbF7dm20wGra3RqRTgxQxde0IPUUUSoTt0N+xlFnCQm+Bq+sjGvEcVW2xzqG8iG53garU1wuE67rkdY++lnd889dT65nPqNVsMtfukQ7HPPLAvdx/9wluOj7PynIHIWfeykuXLlDpnIMHD6M8QbsT46suWqdIP2NjY4Of+lf/FusqEBYp/tcGYAXXi4qruiCMfLzQw5gEKS3Sy7FG4eyUTrNN0t4jNZs0V2D+aJOgI1k7aHjg4R/ikW//Ae79xnfSW23z9MnPcPK5p7jxxjUW5pYY7S6glMRLMpZXjrPaXeMnf+VfcuKh+zHG8NN//1He/7vvZW75EB/8yJe4MtnllQuvMEhPcunpT6H2Rvz5L/8O3eYBwo7HcDrgtz7yecKmz7nnNhlPK/AdUoQIqzl0eImHXv867nv47dc/UiE4fNOdvPDcZQYbm2xfeIpvec87aYdt8sEee5uX+K3f/i+cuGue1QNd0rTEuQZFpclLmFaXubT1LOc3voyIdvl3P/df+PSnv8hkB86frwk9nzAQvPTKBfQE7rrvNqbTTc5ffJFGIsmLIb25xqwOxtNMs72Z50xl5DpH2A7//Zd/iQMrgg++//2MNg1xdIgf/qF/xuaGYDCasrCSoK1AyiZW7WBsOVPUpUH5Fe1uRCNqcencFmjF4cOHqKqSvCqveSRnXZjtdpuiKEjT9FqIYXagetX/9mqKWEiNlB51ZVleWaTda/PkV57BDxs0mhG1NVgqjJasHmzwwEPHkdLi4ZONc0aTEcvLi1w8fRaT1zPfuJylojWOwA/x/ZDFxXka0QqTyQjI0bViccUnbCmCoMNgTyKEotU12ApMEXP5yiV2dzcQQrB2OODppzbQ1RxXt65y9WLAa193iG6vTeB18LwIoyVhGM6CQc05lIJbbj1BVc1uxpoFK2e3q8RxTBTFMyJTFGxvbzMaT7BK0lvq0l72UY0Ji/M9lBD41753syta9fXanRdfOMml8wPe+tCtnHzyOf7i/R/gvpsXecPr3sbexjlEM6E/1Dxwxy3ceaTLaHcDLWq8wCGNz2g4YTrJECiMdteJ3Gg0uGZVaBEFMcIryfIxeyNDbTpMRjkvnDtN7GX8z//5cV7/jW/ij//iMT74iSeQSyPGZcn3f8/fI1xchnqX+aRDaaDUhkqHWCWIPX297Nw5RxiGxHF0LdglWF5eJQ6j60Tu1ZGzc+56CEcJyerqMlpXODerYDtz7iyTdDor5/cVeZ6ys7dNWeYY82qrgkZcqyZKkgTf90EK2t0OSTJLaM/NzV2rLqoJgwB1zbrhnCMOwhmxvOYLdhgCzyefGqoyZ2f3KsPdMa97d483f2fMqD/h+We3WVm7m/seuINTW49xdfxpdHaBEzc9zL23Pgr9g9xy9B08cP/DHFu6mVfO7LLoJJ945aOcH/4ZR46HXDi5R3/d4gUVrnGVxUMhQeEzzwIdoxicucpwJ8bKNqMXS65uZEg5U4F7Cx51BVIK1lZXyfuL2LqN8j2kcphaMBlU7O3usDfYwIiCdFoz2dkmHwgiX+DqIZ/+1Be+Vpr4tSuUN97Xdr2bNKNpTTFukzTajPZ28GRNriukkcRxMPNPRAZjZtUwvqdRJmLqah648Xbe/s0/wO/+2Qe5vP0YNY7dXUc39mmHwawTy8tI04put0Hoa/pXDK4wrJ5ocmlrQp4LbB6yeFCzt21pJwHdTpOjK/fhN3Luu/tW/vCPHueFk2f4ez/67eyUL/DS2ZO0uw3c5ACR8vncl57n8C09dD/jzvslX/d9R/nJd7/EyoGYsl3i0NSjBBNkuFpSOUfbm5mGpxnUGCIEeq+Jkhmq5eG1NcXIEIWShZUE2cjIUgsGdC3odE6wsHYDQaB55eQn6PgB6dhnaz0lHzuEgzA23PWNijwICTNLqXImo4RGkNFdbHHh9ATfKYrSYpxDeooqny2I2sFas8fmdEynZdBTRSMSqJWQzbOzFF3QtCwtrnLh3BaVqfC8iEArvvdbv5X77znEen/Ec6/s8cGPfI611R7NlmJxbRHrF2yubyOpuXzuCrqUeNLHofBC6CwK8ixge3uHVjMgakIraDI5VZKPDCt3H2CsBzMvnvQQhERRhO+BNgUKQZ3OCpNtkDIZgTQ+LjJY4XGoGzHWOREzs3Cxu4CXLXN28wqdOUdcahodn2w8ouXPMbQjCu1RpiFeomHP4He8WR1KYAgbGlcvMBrtzUbw1kdJS3ehR7u3RITi/JnzFKTUpQRjkaFByQhHRhg2UMpR5jO1GONjXHV9xNfp9BgO+1RVRaMZMx6PSRotojhHGEs69PCCmkYH6jpmd7ciTuT1q8CKYjYSwmSsrjaZZntkRYckbrK3myHQKMVM4VnskZeW4XDIaNTHDxSeCjDVjOSK2RrIsduWuHj2Mvkw4OjBg6weEJw7e4YwmMPzArZ2RyTNNq975Bt46tkvMc2mtFvz9PuXMLaaqVJGkJcFRZGhvJmZf1bOC7WZdbbJa6nK2VnVXu+iM74mKGebh/IifL/GSo+8mNWQ+KFH7SoEHvm4oq5LwqTHwvJBlEqZ7K0TRrMKj/neAbzQsD08i3UBznqETvK6t30nf/T77+XwgYN807vewqXNDT70+39JAsiODy7jxIl7SK0h3a1I7QidFwRVzLC4yr2Pvok777iXl5/5Mi8/f4aiHiMDnzLPuefB13H4+BzTzf919eKttx1jZAMe/+JnSPfWwYb48QLD9W2q+iLKb+J5HsvLAaPpLotLJ1hfX6c2Q+YXE/p7E5Q9RBCVfPu3fQ9//Ce/y5WzAz740ffzK//tN/n8lz5Br7uEIGWvXxCrDirQs3q1VoOHX/tGPvwXf8Liwiora4u8fPoMophgaNNcCMH5+K7F7t46QRKD76GKMcpLSPMxcQN6vcOEvkK7PufObKNEkzjxmE6HWAvz8z3SbEpdadqtBe667za+8IUvEAQBtjbXS6xfDeRI6SGu2T6CYHbXsxDi+niz1tW15wIa7R5WWibpiLXVZfKsZLgz5viNS1y60KdINTfcfJB2TzHYUVy4eIrXv+VRLpw7w5WLm/Q6s2v7sqKk2UnwvZDpZEQjCnnDG97A1atXefGF0yjhUZspc70FUlcSd5bJ6zFznYgkkOzs7ZEOBLYQVDJlca7BZNzHaEVZBzz6ujfx4gsvMJlcxlYeCEsUtjCmunbDisLJmiRJWFxY5dy5c/S6bdavbBJFEdJxreNQUVk7u95PSIypieKAaZrjy5hGmOA1BP1+H0/6VNWsjgmhrl+FWaY17W4L5TS74zFvfeRmylpS7GwTxws88cJZwnaPB265ndgM2NgbcnEwpLKCXE+J1OzGolcT1K8Wel8fgbsCWXoMigE4hVQGIRPqvOCf/av/ANUVfubf/xb/5B//a37vd/4N5XSDVncVjcBXPcpyj3ll8Oo+ca9LKlpc2RoxGE7xk2AWyvK8695Eay3OCbSe+SSTaHaPeavTJkkSBoPBtV7YWSemxBH9v1j7r2jdsrM6F37GGDPPL64cds6Vs7JKKEtIIAESEmCQAAHOJvrYB44zv0kmGgzYBoxEMAhJCCRAoSQVVVLluFPt2jmsvNYXZ55jjv9ifnvJp7VzwQW7td3qolrVSnON+Y7+9v50z6PX69VnJGA5Nrbt0hsMcCw5ScVXWFLVdZVlXeDgBT4oSZHWMH7XdXfJBEWWMzU1RZrGjMfjultdSuI4xvU8AIpKY9khmAJTKSqTU5kExw4ZjyLue9V+/sXPH+W3fuMC508mdPYucvjYXXS6PZ548bO888H386qj95LLNk+98Bhq2+P93/d9zE63+djP/wZxs80r3zrP5z//K6xdHRPeHWHGOTeeiCiLLvd/OOTsX23Te/FWjt81xx//wZeZnlNY1jRpv0+WSEpRELgpaSx4wzfPc+NahkxmUcLhhdMv0GjaTE03cVywLZ/VG5vsbCYQBhy97wHcIGPt6efobSnahx0Cy6MfDxmdHv/9KpRpmaOMhTAK4QkyhqRFwfZGhVfZ+FZFVaR4gaEsA5AVWIqoMIxNhrBKnj15jl//L7/I+ukblLHHeKDQI4nIKja2MrJiTBoLpGxTGElpZew/4dKcUaTDEQstWJ4BR4EvPfYtSTruHFdODnGtkA997z/kxIPfhgpn2L/XZ5xm3HLofdxz66uQsuTMU9t84AMf4b3vezvDnYTCChkKl8DZw4MfOMAgLrAjUCWoRoIowcbFFxZ5AaUGy9H4NgglsaYjvGkbS2W4tsSUYGG4+HxOemOGmdBCVC7SkqytnOXUFx7nhYefpRcVDP2IqeMVx2/xamU3Lpld6DLeaXHhyzkXHnFolo0aim4USa5RNjVKQkiqTKKMxrdDbNOhFXrkKsVGYzIbLSSxsVBli/aMh3Edmp1ZeuN+fQGQEt+GRrvDte1t7Pnb+OLXYg7tW2bJhcvnznH2/Is8+pWHufHyFYokxVdd5qbmkUIjLYEWEXk1QhcOIklZ6MwQ2jZu1cT1NdMHCyrfEGV9XBmCrtBlQqljknREEg3I4yFp3KMQ22idEQqLrq9RIkEWBW6lSXTGfKtJ2AnotOdxD23j7jnPfCdisJ7TdgPaXsBwDKujlDILaEibxa7EKwSlLevUYFVQZBb9DYv+zjaWtLGEU7/gBJjCY7yzw+z8HPuPHMKSFp12iLJl/T3XGa4dorCxRABGUhYVQlW0Gm1c22M8Tun1Bvh+yMLCEhjJwsICvnIxeYc4dVC+QKk2VR5iqRzPFTiWA6IGgVt27RfKckmeW1Slz9LSEoKA6aUMjaGSJds7fUzlsL2+ynAnIfQC8riA0sbzHJRV0Z6eYv+hWwimmtx77738o3/6XQzSVcaRS5kGzC1FbA4i9uxfZhzlPPzVzzEY9NG64OqNkzRaTWwnZDROKKuCoiq4/a47aHe7tSpZ1ZVpnu9iTwYIrWszvZQ1vNiUGkqH0lgox4AcERUZ2hQszM/gN2yKWJJHCToFt+HQ2ePQaPTprz3PzuYKltUgTmOyIqGXXGZUXMBxJba0me7M0Nq3zPu/6R0cnlrinnveiHDm+Mwf/hX33XOUow/cQiEdFg7cwhve9kbuvO12Ll1+maookVbBoNrC8RUXTr9Mb5jxLd/3vbzvw+8jMwm2P0JSMBwO2Vwf01ic3f37wsnL/K/f/k1eOnuW7f6IJB4zWDtHdyblxJ1zzC8ZXMdme7uiLB3WV6+ixxEms9nZqNh3rEEVbLM1HvAXn/ss3YU2J+4+zL/6iZ/h2eefoSpztrcG9Hs2tu0wu9Di+PF5mlOKrV7EP/nX/4of+Uf/kBs3bnBhbcR0u0lhORQiJk9zovGQ1a1zGJUxHm4i8gFRGeM3G7iBQ5rB+tYN/JZPb0dhNLSnTF0NWNbd8VIZtK6HpTgZ8syTz2MJG7SY/Hz93S5lZUFVlbueONf1aTbbOI5HGmfoskQKhakkgd9k1OuTRxkNu0kRadpBG8v2uXRxhSDw2LN/gRtr1ym0S0LM27/1XeSe5t3vfw+uF9QKFSm27ZKMM+LhgFazS7M9w9bOiF5vhKEkK0f4QYPN7R0ajQZTDYvp0MNRFju9ITubg5quoHJC0SAZSfp9H10JhC548isPY9Nn79whsiilylyk8OpyjAOG0TgmGTqI3OfS2YvIsmLUSwkbHlJ4COWydDDE9mxsUzde6cogLZvuzELNarUMg2TAeJTg2D5BI6TZbqJshaGcNLNoVNtCJxOgdqvFs6e2MYOU/bcdotdb4fbbp2i5ki8/8RjnhxXLJ44hZYLRJb7XpqT+uNp8fe18s2+7KAqKpKI37tNtTtVBl1ww251iYWGBU08/zK/+yu9xZP80v/NbP4VlWeQmZGOnz9Zmj35/Bc92MNondgV70pir5y6T5grpKgJRgpKURlNS4AcWnq8IQhtjCmxV2yI8z6EsEqJxH1PVLUdT3RlsNyRoz6C8JjMLexnGGeMow7Z8Ws0Onh0gqDhy+CDLC4soKbEmAR+APM1IoxioEKYiTxNcS5EnMY6t2NpcJ00S1MQ3mRcFUimysqA0k4txachSTV6M0VWCFC5FGhC2Pd79ra/g1//9OZ795ArHHrgbvWnIkxf44uc/Q7Lm8eKXNuh0TnDjyhp5X7F45BD7puc59bWTbAQ3OHinx+XNP2LYy9i5Nsu0ZRCZy33vXGT53ohn/3CDZDXgmaef4RN/8DAd36foO2xdv8o4zcnlCMcNkV4HqwWXzgluXI3YHu5w4cqFWgBwfHb6hp2Bh1Qd9u09hG1LSErSG+sUyQYmEVgixfVgfbNHOSz/rmPi312hnD4hzPL+Nl4HhsMA21Fcu3KdeFuyuFjhu10qUZKkIyw7nMjCOZ5nqEqBNC5KpPRWIBlDJcFy2kwvCrywZHUlRhcVTd+h1WlRkqBUhGXAqQQaF+Xm5BWMY5vx0OLgbYbA7zLbvRWr8nn2ySfQwQJ3n9hD07MYph53HNvHY4+/zEr5ECe/MuDWO6d51etu49L5M2zuRFy5UZGWhtvulPRXIBqkYCkKXeL7IXmmKUyKqixsT6PL2mROVWFbiuluwGhYgnIYRgMkHp5tsXYeDhyWLBwt2R6kuBK2tgVu2WRwoyIfaewmTB8N6e4rsceGZ748oEpbOFZGZRyOvWWACT2KUUmUlFBMdJ8KlLQodYmoGsRxRmdaEw8rTGkReBZ5nuE2ApzAJSvH5NkUyq6Ik9Gkd1liWxZSNyl1gnQXuX59hfe/87UsT4X80n/9I2b2G6SsyIYumTRYEvbvnyeJU3Z2UoSEykT4gUJig8pp+A5CKISCwJUk44zNkaHpztUwbanR2tRmaZ1gWSWOo1B2SVWVlBW4gYNlQRE7mGyWqhgzNj0C2SS0LES7D75NyzVcfTohWm/QadZIFJTEVDm+ZzM3PcPq2oCdXowQdYBESZuaeVxNenLN7oqu0w3Z3Bjjui579s5x5vTL+IGN7/u7t1UhBLbl7iZYG5Ne2Gg85u577mNra4vhcMjU1BRCCAaDXq3W2Ir1zQ1sV7A0f4R4PKIsBjRa0+z0d2quYCXwvTZRvIVBo6SL1mMcGZBmNp0ZSWOqz40rEqNrr9FwNMKxmkzN+AyGG9x66228dOYyiPr3WkmPRqOBP9XmTa99kHMvfY2XTq/ygz/w/YSux6/92i/jdy0G/R1GfcWdd9/Kyy+/zHgMQegghMZQwaRCr9VqceKW45w+fZrhcIgUahe9YYyhNLWCV1WTmjVn0nCRxEhLEI8Nge9Q5hVz3b380D/6MP/pP/8n/sUP/zCnXvwqTz73BFLb9FaHVI6PFXhYhcTYObYDRVnheAFZWqHsHF04/Oov/08+99d/zbv+wYe57xVHefTPP833fu/3052V3Hv0lZzvnSYea6KhRejPk+URcbaCZZXce8/9XLm8QjzaoShs/O40H/nBn+D5x57ksSc/gR9YpD0wekBuhThWuHsm9gdrWAXcdcsJRDPn8rkVciNxPZiZb5MmFUkkQGh6vW3KQrF3X4M0T8iykHAmYTSMSGKg8JifPUC/t8FosEMY+LWaayvKQhBHOfsPLPLTP/sz/MJ//fc8/9hLfPAf/Bgf/sB7+aF//AE2NtbIc420nLoffBJmuAmtLrOcQ4cPcue9S/z5nz1KuzWF43g8+KZXcvLUs5x+9gqz0ws0Ow6XL11HKMnCwgw7OztU2sLzHbIsmqxz7d3fG8uqVcmbYOqbK26ogdH79u3jpZdewnHql/pNcHWe18qmkWKiIIHjTODoac0kXFj2uPv+Yzz11HlcZ5406+N02nz423+MX/r5/wfH6SMJwFhIq6DS7CaEpztTDIZ9dna2sKw6gDEex7Sm2zUrMnDRpiQMA0I/ZGN1izwpsVTK9EKFdEpuXPQRWhK2NM3WIe6671Y++acfx/VCDh3ey8bWDp7bZHNjQGm2oPAo45yZuS7CLllb20Epn0InHDoyQ1UVOFWLS5ev0Wq1J0ilfBfb43u1J7DRaNBohlRVxdZW/flnac709DRlbhhlPcrcpumGHPA1sttmZnE/Xig4/eI5Ll1bpdVpkMQVTd/CC2AUgeW2SJPBrmew5i+aSRpd4/s+WxvbeIG76wOvdP1znJ6e3Q1h2cqi3Qy5vrqCEIayqn/2w/4AZcFte46z1j/H3Y0Gzw1yVqOKqkxphSFaCbIkwbYVt9xyC5cvXybPS6qyAlMTK2xbMRgMGEfDmhkpa5pEo91hemYW27ZIk4TNzXXQN7mT9ap8caH2lldVRZbGGCRZlqF1/Tkq26rV8qIkTVMspRiPIxqNEN93cV2f/mBAXqQg6krFcgLJl7IuBlHIyZpeIYVFWQ1pdDwGvYIqc9l7ZIYrG2ucOBxy+J0SNe7iWy3e/+6foLnH8PCX/pLZ2bt4zWtew5XHh/z8r/8j3vpPu1w5NWDvUY8v/nKP82sOr/9en7n5TUoESzNdfvb7r1KWDpaqcC2PdrPDTm8dQ4bt2uRFfYkbR308z6Y7PcVoMKIsKyxLkiQxU1NTGGwsx8NUCelwQBqXpIkGITn6wDLrJzdJs4L2EYtolOIIRe9c8ffblNM8IIztwzgWHDo2j1ExW2sjLGmghGbXZ9xPkBa4jkVZgjQenl8fbNrUfMUqCelvCu6760G++X13c+KuBf7Lz32W5y88ShqlpHGJ40j8pkQJ8GxBMywpxhXChjxTlAiGI5vZPRZZNWbP4gnagcVXH3mR/d37+PYPP8iXH7rG4nRIlMacPHmK9ehlxhsFd71iika7wdMPrWDLChNUKF9B6ZEUCcVkVa8sQ7NtKKsStI3QBb4vkJZLr5dhe4rWdI0Ect0p3KrL2tYFihxcCTKaZbRVIIOII3c4JDLCsRxsmWOmILrRZe1vK4xjiEzF/AHFfDvk9HM7KKsizuHV3+ITmyHZNuiixjrIykJzUwnw8F2fLI9RoiCLQRmP/k5M0IDmdIgT+ERZjs6bKKVA1U0Xlqco8hKdOhRljKvHqHZIFPm86TVv4InHP8+1qxtMz7XJrT5W2cS1PXZ2Nul2OyRpNWlayevOdumQ5DFL000sk2Acn0pplJOQFyHjnQrHaqIrRWUKSj3GcRVQd6IqR2FbKVJUpEmN36mqkiz2GI1iSBWZZbCx6bZT7OkSPwC7P81zXxwzNW0hpCROkwm+oa58A0muS4SxMKZukXFdF9upX7ph0GRnp0+cDGm2HLKkPoAct1ZS8iLFshSO4zIajXdBzbWR3WN5eZnBYMC1q9dY3rNMo9GapCZbXLx4kSiqKwi/8V3v4K477+G3/8d/R0pJb3uIkhVCaQw2UuaYSuE6AYUe1522hSKJM6an58nSAW5o6pDXdl0dppRNmkXISTJyZrbNG7/hLXzucw9hqFfTRV4zAm+96zbSAZy/+Dx79x7kW9/7HTjK4md+7t+zuGeR0Wi9XuNUYlJrZhgME9C1N0xZ9ZDg+z75xNCfTQbxm6srlARqX1ZR1od4GPrMzc2wOd4kG+bMd2dZ39hCOSFSeFR5QTbuceSW1/GOb34bq2tfZbgZIYzHEye/QJ4qsrSi5TXqFia7Is9sLE/gN+tDPlC38rO/8d+YPzBPZmaYb455/quP8Kd/9Nsc2LOX3/3jTxIP1snzEinrGrjFpWkeuPdVnD93jstXT+K5NmWuwQ3IswZlPKLVAeXY5HGEJRqEymE76e+eiUeO3kqjHXDr7a/CNS4vXPk0zz56huW9HXa2a+W+2VZoGdMIu1y/tk5V+rS6dZgjGSt8r1MPOK7NOO6xuDhHnmREwxwQGFGHCm679W5OPn+GV7zuXTx/+m/ZuHaJvUeP8G0f+CDPPvUoptfn5NXLDLeHNdppwvC76QErs5zjx2+h1Q55/LGnaDVD9u7bhx9YXLh0miIGzwoZjEe4k1CVPfkdqHTNULTdiuWlfaytrZDlKVmaY9suRVYyPz9PFEV1V/1knWo59u7nUV+YqkljSb1mBAjDkCRPsKw6ONFpz6KrAozkxK23ISU8+eTTtUraEGjV5bbDb2N17QkGo3MouhMvb0EWGxaXZkjiDNdyKMqcXq/HzMwUw+GQqqptIkmSYETdH724WPv2Vq+vkqUlR+4LMAy5cSVntFkrXp3uLFu9MRl9vMrGdUP27Oty8eJlyszFdgTTCyWrV0v8pZyZ1n4sVdJpV+zckGQjRTS6Wp+fUTphdapJqEnW5zL1qlYKNUl2x7sraSEEWmvm5+cZpxnb/QFNkXL3VBv2HcHp7uWhv/gb3v/t34U263zqz/8S168tKr4bUOQpluuRVQkmF/X5L2ufYhRFNULIsnaH8ZtNSJUxpGnKvffeT5kXnD59mumZLt32FBuraxgpan8iBjO5RCTpmLuO3MN2tIbYWWeAxRgLz7KI4gSpDDMzM6yuruM4DlQGKevucoHC90Mcx5l4X8ELfDwvQJcGvxHi2D7Xb1zDmFpJN8ZQpF+nDZhqst72HKJ4VA85QpBnJdWktUxrTegHdatTUWBZcoK+MnSnZun3+5RVQZIkOG5t6xmPI2y7tnOYUu1ilowxuJ4kzQts2cVrg0orolHM0dct8d5/fi/Dl1xa++7jlttDnn7qD3AXJWpzkeun9/G1hx7h+D1DFh5M6YYVxbWDfPS3n2ft8jYis7j/g1O89psWGb004vd+ZYtOO2A42qHKwfUUyXiyhpcRtmrgeEUdiHQ9GmGHoijp9XZoNJ26EtJ1yXKD4wXE4wHtMGBrrUdZiDrYuSdkuJJSVILWsk0UDwhCxfap7O955R359NahTC3WbmxhMp94CJay8EIPbRKUC9Wk7cMPIM9TqqJBWdiowMbyBevrNh/6we/lx//dD7Kw7xU8+xycO7+KEGMsVQ9yRakZ7mgGPUORG0aDitIoHNclaEhsqQnthN5lQ0t5dPyY1715gXe8523obIfPfPI5PN/n4N5ZDhxewgk9bNXB93xOPT3m0b+5ipO7MO5QZXUwZhxF5JUAoZAip+EUYDTIGiA+rhxWdgwrWylIUQPS4wKZO2xc2aHpBbTdI6QJSMdCzW6y996Epm9z/bECBvNUlmRQWPSvd+lfl7gtH0iZVh5bL2WcP7lJGDRBw3TQ4dqTLfykhRIuWVKbmSsUQpXYHmhd1Em20pCnasLqKpEWOH5AmpfkBejCxbIljUabZrO9C7bd2t5ks7eBkAUmDBmVBaU95NTp53n3W99OFRm21yPQ87gyxxQJ7YZHmkWUOkZXaY10cDyErAh9G1uOmG+7hHaBMTE6beHoKVzHw1Bg2xm2nREEDcrcxRAglI0uBWXuQ+bTcDxkWRHvFLhCcXjfHHOLDu2Oi+2MsWSTtjxMPvSppvuceJVDr5cQJxW6qofCnZ0caKG1jWV5SFnDaf2gBrpmWUoQ+FRVRafTQSmQhLi+xgvqF54f1EOjoE4Fuo6H4zi7iuXs7Cybm5tcu36NsOnT7/d56aUz+L5PlmWEYVg311QVp09epLcTMRzEDIZbeL5Em5KiqrlsWiukVIzGOxS5pipd+r2YY8eP8Iu//PO0ZqZozyY0p2NsJ8QPLJIkImxWKGUzt+jR7oT8yZ98gjwzFHmFMYpG0MZ3A86+cJJz555j774pHEvheQFnXjoN0uKVr3yA7/quHyIa14PBieO3k6c2YdhAqozZuTaWZRHHMUwg7XEcU5Ylw+EIIwWaCiHYDWC4rovruuR5zvXrK1iZIB1IXvGqezl2YhmdxphiSJr2ELbD7JTiLz7xu3zhL5+g3TrCzkggc5tbDvjcf99+kqiq2ygqiR94JGObIpfMzu3jez7yrfzWz/57wuYcT5x6nr/88kUe/sqjPP3QJf70k59juJ5B6WIpl3bHx3EkqyubvPD8S+xsRQTONBK/HoSyAiG3mZpysa0AYRmEEuR2xSAXyErv/v2hf/wRfuxf/lt+61d+mYe++FHuPd7GWAW9QcooThknY1ZXUwK/SW7W8ULozkn8hsC1Wxw9eII07qEQVFWfPUsHiaOMtZXBhBnqMR4lfMMb3sof//EfEjQkX/ib36e/uUK31Wa4PcBxWjzw+rdx7MS9HDt+/+6g8vVBrn7um802Z8+e5dlnTjI90yEvY65fX+HcS5eYnZnnfR94G69+8CB33DvL8nKHZrOBFJpDR6dpdjOyYoguYX19EyUs8jTHsS3yLMEYzXA4pCzL3aS/67oYXdWDgjC7SlgNVa4/J6UU/X6PwwcO1p7MqkYXjcdjfD/kySe/xiOPfBkpU6bbDaaa04gy58qVxxmP1ikLQWVyLFswHsYszC+RpjHRqIZSp3HNJ6zXqN6kUcWr8S9CgZakUUZ/u48uKySCiyf7XDkZkGz6WKZAmYje+hUawjCtHIQuKdOIC6fXCNQUnhtT5QXb1zWOFLjbHpefucLwWsTK5T7r2yO2kgH3vv0VTN+yzP79+8myYhcoXpbVLsrGVPwfyu/XBzyow3Kbm5sUcYXKDU2vy/XSYX0rYTwY8y3f9C5e+8Z7CcIQXYEftvA8j7KsKIra8lPpDFtZhH6AMaYePGzJ3MwsnVab0WBAnud0u926tUYplJRUuuDChQt1rWwUsbJyHb8R4vs+apJEt6yJt9Szubp6g/4woycc8kqhEAhd7vJKNzc364uOrofJPM9xHAfXqxtxeoM+UZzURI2yhpGnWb0dGvS2EJWmFQZ0u1327dvHgQMH6HSmsG0Xz6/LNJIkwXFsXLd+rrSp/z83Az/j8bgeWD1ncrkxGDRh2GRhaXm3MlYgydIcy6pDUoIaCg7WpPe7pMgtfHsKZI8kSsn9nKTMuWPP3fzrb/kf/F8f+nEajef4whM/zd989BwLByS/91//jD/4Xx/l8vo5rl3KWd47i4rm+PRvXmA42qbTDjh4yxSXXhzw+d+HJ77coFlClm7hWx0cx6coU8KGhaU0wkBZDkmitA69JZpoGNNstHfVcMuuKRuWZdWiiePUlIbSYGEhKsPm9S3iPEZahnwMVeJTJvb/x0T4//3n76xQqjllTKWw3RKjDK5Ty+Kl0Xg+SFEXveeJRlmGTtcniiJCf45Op8PO+GVs2yYZuxw7vpdo6JOkGYeP7+Whh75G2OgjJSQxCCy0LrGkhaUqFhYtdJ4jBQgBjvTROmP9usfMQn3Tnd43z9zcDFsXLQbbDne94ghvfuP9/PVnH+bPvvBXmNEQS0E4b+FPGVxdsXnJwusUxJGilJIoKfFdCystOHxoL41Zj5duXMDybLbXQMi6XkqJAiUNeVrQmVLEsUa5LTAN+oMVXAtsRxG0wZMW0XXB6EbJ4n0OrWWbiw8n6M0Qf8aiMorBcANLVmijoDK4ysKzBP1eh8U7C9rHcnbWcmzbIIwHaoQuOiRRShCA65TEUUmlBUpYSEtQVZJcW1ieR1nlSOESeNNUJIzGO1RVSRrXhmLLNrheiNQZWWRj+4pbb72V0yefY/tGn6UZh2LSS+0GkkxXjCJZe3GqFCVqT2kYdOh6KeNVjfA9vJmY/jakkUd7NqyxDHmMZddsMK1dKiSIEllZ2MEWQoDR9aDtCJvBDUHSc+jucZhqL9Ndyrm00efw/vs5vH+ex574PEV/i+unPPJiDJaZvKAhcBskWQrcVM1LqkpTlnUq1RjD5uaQdqsJoiKOUpb3NfE8h/W1PiARxkNrQZoNaYQ126zRaLC1tUUYhrsDVDkx6Du2R6vVIo5rJaLdbjPsD0izGgBuqD+fRtNmHEdYskVZjqm0RAqbO++6ndWVbTbWBiwstfnYH/82P/3TP82Tp7/IwROabGRx+axFPrZY2ufSnlKcerbH1LQijgqisUWlHaSq8D0LyxIYkTLVdElii2anzamT13jFA6+nM+Ow0++ztM/l6qURp0+/yDve9jaOHbmLX/7V/8zMnEs8FrhOi7JM6fcHgKDdbDGKhrWx3ujd4UBKufsydN26uuwmxy3wPOKoIGw4uH5Os7XEv/hn/4qf+jc/xmAr4Z//8x/lTa99DSs7l/jN3/t9Tj9zkmNH9rOycpWpTof/59/+R/7gj36fr371a3Tac0TpFkmqeOUr3kqvp7D8Ea/5ptfwp7/1SY7P7OfalRfY6W+ipiVlIZCVTaFN/axZkiLLkVZJK/DQhUWcF7TDBtrkVFKzNHMMYdmkZovL567jNlzKQvCut79390xszrdQKN76+rvBXudXfuV/8+zjV2jP5Bw6vEiWwskXztNut7DdGtPUnh2xuTbgxLHDXDy3w3AY4dpTXL22wnd/9z8lbBV8+s8/CWXNzEuLlNDby9vf+s184lP/Dd+zyBKB5WpyXfBN7/4efvw//Ed+6p9/Pw89/Flcq04R32T4VVX9IpWmBjY/8Oq7yYuEk8+fZGZ+juMnDrK1vcp73/VNXL20wfWVM1y9vM2NG3WITtkS2/IxlUNZVCwuLHDu3FksGxYWFjDGsHJjDWDy/NuUE1SQbdukeb0xcFybJMtQyF3W4k3f3vLycl3NGI/p97foDVLCoIk2JZUucG0LJernSJsSIw0CBzBYUhJHCY6rOLD/KOcvn2Hf8j7iOCUeR5OAWD1QdDpTtU9Qa5SqKw3zvO4eD32/HuhKB6lKmu0GmY7RumB2ukNVJvQ3S4xnsG13kmivPXjKEuw5CEFDMBXeyWhY8dRTz2C7PnleUJY2rakGt9/ncuaJPuNxjG3V9X/VpPLPVEzadL7OY3Qch5mZGaamplhZWWF9fZ2G5SMcUKrJsNTIfIfv+8B38typl3nlW97E1o0b/MEf/D4zs12yJEdZgrIs0LpCyhApNEVR0GgENcbJrpFOZhJeMYaJcvr1ysIsy/B9vwaNS4Ot6iFEiDolXVMBDKPBgMqUKG1TWRLLlVhakRY5vq1IKSYXnQljUhsqzS4CaDgcEiUplmWxuLi4W414+fLl3aE6CDwsy6Lf77N3/0GKQiONZDQaT9LaBVka141AptgNMyHkJDBWD5RMOJ+mqiYhspLKlHS6y3USP4vZ3NxE6xpyfhOLVl9OGhS5qc/YQFJpRRynSCujIWx0Lugsz/HXX/ozPP8Ej138DB//4k9y8mN9omKab/vRKX7pXz/B/N6Q9SsR737/a3nfD+/hX773LxjlhunWDCgPq+NQjYf0d1JinWEGQyzVpqhGGFNvkoTMsW0HjEWSRthWfTGOk5Rms4nneWxsb7GwMEsUj/AcwSjK2bd/P+PRgK31LYS2qHJw7BpzV9mGVtulqgRl6TBOxujt5O935d04JIyUUJUWWJosM0jpYLslGAtETlmC50PQoMZtoGu1bWoOnZYkeoTXLsjykiqFt7/tXTz3whqPP/c0U1O1VyhPS6QUVFrh2Jpmy+B5CmIbITSWU2A7Nepga9XGch268xH9TYVSitGG4N3v+BD3veYAtxy7ixdPr/JTP/MTKKuHyFuUzhDlwXRDkCcGpwVXr4IcCG69v0ORCy6+FDE/73HoxF4uXlxnMNzElpI8q0DUOAPXDyi0pjIZC8uzmMTiytpVkrFiflpSZRr8FCkEDSdg8FJAlkR0bjF0Fi2yJCferNg5a1BOSJLWCWJlJSg6ZHobW3XITcEtb8wYjEqUNEgFSsHmikIIm+lpnyzpU1U1K8tSAVor8kJSUR9QwtIIZSFMgJIlaTpGSCjyMUrW61+FItsYsnW1xGn6VJVFe86hOd1DJwIlHISU5DrBKMk4gUazTZwM6/7VSjIeV8x1MpqehbE8LD+F0mVj3cN2NEqGJEmC7aZYboofepS5SxIZBJpGuyRJIoyQNYuuMpB3uHiy4tbFAFWu0lmcQ6iYsj3PwXtezc6piC//+ReRvkYYPUFEAEhsZX99NTsBA5dlga4y2u0m43GMqRTDQUKz2UaoEVpXdKcatSdQBwR+k6LQjKM+tm3juXWyME1rmPdNT5iuCtbXt2k1G5OVzaiGOSvF/n0HsV2L1ZXr2ErSG27T6gZI4TIe5CTpiMBvgCiYX1wgjgrSfIfv+OD38K3v/SG+7dveg7uwwvxcl2iQ0mpIFNPcf/+tfOUrX+HIgW/gmSdfYKe3XntTBUihoFIEocPy3ibj7ZiVtYTXv/613HH7EX75v/4mt99zP3/4sS/yMz/3I3zs93+H7jIcObTI6tWUne0IaSQSH+HkyKpOTE7NzrG9vU0Y+pNVSkCa1m0UxhiEMXUXruuys9P7ejrVaAJbEQ9y3vu+93Bp5SrvfNe3Y+Pw8f/9ewx6JY888ih/++Wv8ou/9MPcdustfOkrJ4nyNSwqbjvxWr7/Bz7Ij//oT5Dlw7p9KMuJY8Wb3/5qHnzFm8HP2Rlm+MWIP/ofv0ew6HHy+SG+72JUPewa6oRmGEzheyUVCTqXRGVC22tRak1aCo4eOMTr3vAObLfBr/3qT+KEM8iiz/2vfdvumfgNr38jh44cZuXGiK3rF/irh/6UrNyktzXGd6fx3IBLV84wNeNRVR7t5gKXLp6rFYS09kQGgceZ01dxWxpHzpElFv/kn32QG1ev8Gcf/yxT8x4Lyw6XLq5TJAHCilEqJMtqz2pTLqLas2yvnWZuRjHKJge7EORptgsUV6J+cY/TlLl5l9nZGS68fINKt7n1xG3ccecCcbLCjavbXLu2QTTO6fWGCOExM71Mv7/DOOrVSW4FRZHRCH0OHjjMyZMn8X0fXRVUZT3E2pMUr+XUFw178kLOs2LX43nTb+n7IZ7n0Ww22d7uEY0TsErChlcPjqWkyNM6javH+F5IWTooq0AAlbboTnn0hwMOHjyI1oa1ldXJ8DJJEgsotNlVT6enpxkMBpSmHpriKK23GFpQiToY15xqMLPUYWenz3AnIXA8wpZLd9pjdt7n+efOMhrk7Nm7wGCwwup5zcLiMst75nnxxRewrQbIBERBVXp4nk0xYUA69gRDI8yEB1nVnv28vgwEQWNSaSk4dOgQUTTi7NmzmKpA2DayqhBGML20hF+UnL1yg07jAMrpoay6hnVraw3PnSSqKz1R12oLxU0YvZBmcukzUBmUbZOntV2l0+ngui5bW1u74O9S1/+9RCFUzXWMolF9Yc9zLMumSDPswEXYNj4uoyhCTqoqy8m4IUVtd7KkvQtw7/V6WFatGM4vLuD7NSEjSSb4HwNxMsaya8XMSEUWZ/heWCOH/IDNjdXaGiMhy+siEiEklTE0m21c12U4HDIeRuzdu5csjRmPhwgBlSlJs/rZPHL0EFevXp30gavdZ0moCiE9LFVf2uemF9ja2iJNx1i2QkmHoR6xd89R3vm6d/Ann/1L0myAHg2Z3X8ApyzAhhv9LVpeSZRI3vCtJ7j47Gkun5HMHlri2C1zXL824trZK1R5SUlFq6HQKiTeiTAyBgNCuChl14UDJkapmvFKVavP0pK1Z3KysXADn2i8RZoUzMzMAVV96cAlHsVUOqMoNKUEy8lxLI9SG7I4x8TV3+9AefT1TZPnOeM4JxqBkBZSCvKywHHBDyx0aSi1pjsNOrfrFYDwUcamtDIqNcJvKIQwWITE42k2tlYpRIbSYNt1PZIxhmZoY6qCVrM+OPLSoHUtURpKggZkicXq5YDDtyXYqkAVe7nr1qMcP/o6trZ8zlx+ku2R5OEvfhy/C1YFOYAb4DoxNgKNwbZCRE/Tas6y98BedrJtcm0x6G3gWRnJ0GLEoH6IrIxoWDE15bM0P8tLL66ytLCHuZbgyvgKvbGm4YPOHGw/R6QNei9XJBVMhT7jSDN1JGXqRIokxFyf46VHr9FoVeSZX3vpLIfEKEIbROYwfWxA40CBJUKKtKC/ZWMmfbnCGlNVBbqUGAGO00ITYoRNmo6RRuI4PsZKKLM6+WsmKcw43ahxEe1ZikyjkhFF6YArsMSAaMubmO0LKkuSJhWWDZYjwHKxHIVtK7LU4DopUewiRILnG+IYptpT5ElOon3KbIw2NR/Rsg3trkORj+rbfqLr7lelUaJCKQut2xiZ4rhw46JPJwu55cEZLu/0ed+hO3nqk39CtCwoGk0unhFs9wc0/AYYNUFu5HieQ5JG9dpDC4oyw/NsdJXh+x7jOGL/viPYlsfLZ1+mM2WBUfSHtbJWlS5CGhzHxnFciqJgNIx2O14XFhbI87xmcE4YZxvrW7s3fGMMRZ6zuLiIkCWOskijijjdRipFWdlMdV0kAidM6Pc1vZ2KPIPl/dN8xwc+zN985hHWd15gaiGk3x9x+NAc1y5vkOVDfHeaB990H+fObHL2hSs0W3U69Mq1K0gJSjQwWiHVpPM5y/m1X/slttb6/Pj/9WO84RvfyR23fyOPPvszKDvn6vkdpqe6DLcyttdKPM9hOBxgWTatVouqgsoYFpaXWN9YJRmPabcCbKnY3OjjuPaut6iqqkl7SO3ZsgWUJscISdOfYmamwZVLaxw6eB/bowtkgx1KschMx2F5VlFg4YdTXHj5LDgpQXMPb3zDm0iiAU888Qg3Vi4R+rPs9Mbcc98rUU7I2efOcezOu7j3RIfnn/tjLkYN4kTQGMJmtobRYCkPaRRzcwtAxYXzF7n7njvYHo9Zu3INz+9SISiKAXnicvDw/YjqPNfXVvEbNuON/OsD5ds/wm2vupvZfYZHvvZVyu0hX33oz7nzrlvZuJ5y6cI1pmZt0txQFBbL+1pEacTC7AJHjhzhM5/+G8IwQOuKIFREUQZSUMQewmg++J3v5unnH+PaygUWFjqkQ4trN4aUlWGq3QJHEm1u4zYclO2QjARZOaDZbDE/P8/5ly/iuTZal3iOhZSgq4C8GOE7HnmVoUvJvr1HeMc7XokuE4bRVV4+u83TT1xgYWmed737TTz++ONcubxCEmsQdSinVvuhLCuazSZlXiuBGEOS1ipTENYXLmX/vwdI1/F21Z66U9mtgxlVxVu/8Xa+9LmzSOEQNAyDwaD2pXsWiAxpHIocLNvHUKDLnGbYIs1jjAX7lvcxGAxIopiiyEDUH7PV7rLTH6FNjTcCCEIP1/cJmw2UssmLirLYYHt9k+GGS9jw6cwE9PslcbqB7wua3cPcdp9iq7/GxpqhzDO2rma84c2vIc/7vPTSyzQbHXY2BiRRji7BC8B2gKr+XYnjFInY9WIja1ZtWdQ1iJ7n0Wi0UMpmPIong1FtITNC4VjQbjXpZwnusCKxNNo2yDTHb3gYrchTm9e+9h5urF7j8qVV/KAe4AR1iG48HqMsseuzrTFPNoZawSsmBQbj8ZjhcEiWZZMLmUSYetBVSlHoOoVtJpsfKW2qPMcKPAI/JE9SXN9jezTAKivMTVA4NYbPdRwG/RGu607S5u7k/1tOKg9zlKjVSdtRte9xcikoKlMTJLipOCparWC3khFR7X6P0zSl0WzWl7lCk6cZ7XabwPOJkzE7O1sYU+EHTYbDIbPzCxO4eQ1TN2bCUrUqpAjJ8xhERbMxTRh0GA62KMoYoSQNqSgM7Ixi3MDBVRZWZUgdm7YHWWVRWTlOYYg9H7upiM4N0Q3NnXft4eiJ2/nMp08RbV9hfq/NeGTo2l20kKxurBCEHlUFaVZiOx5h2ydoOGz1dsiHMVLW2wlLiUlYyZBkKY7jUZYplnSJRlE9lEpwnUZdpdrwyfOcuFCYfFQjpaoCVVmUcf53Gij/ztWLVZUTJxqjXXQJRmU4NvgT8GqWgHA0FZBENqFn46qEaKtCxxZJB7KR4Pgt0Jm2uLY+ZnuQEDRL5vxpUDnb2wlCgDCatMywBPSHKa123XtbolBWCaUgix0aTY0rC2ZbCywcaVBcbfGvf/Kn+Ccf/N/c9XqPnrzO408+xfS0TSQ0Jq8IpE2pY6xUIpVCy4IkTenMajx7k4c/s0ZncY6ZfYKl6WXGaYbuaOx4QJ4nVJnED+oHaqd/jUPHHbZXL3PJFTS6DZSwKftDWq05+oXG9lM6e1y+9f7Xs9Hf4W8/+zUGLxV02k2S1ghrrk/3iMX2FYHtaPK8IvAEjoSySJB2SWd5ivFgm6gHgdvk6pmMI3eCsEuyssBUEtsWaOOghYUTQpoOUI7EEi5GGwQSy1ZkWYrnNepbqVYEYYiwHZJiTCgFRg+J4/rCQCsjSSSqsjGpBDSWVcOrdQ5JmhM2LILAxdMS4WZkecVwoFB2QH9jiGu5GGuMskoEmiwr8fwmlYaw4aOLEkGFcWNKDbmGpu+y/nKfatwhlBl+kZCpIedeLGDG4eVVw9LBDqP5ZR7fimnMDVhfB7vpMOj3sW2LMPQpy9pHYyqJwOA5LnE8pt0JULacvOCKehVWRmRJl3bXxcsaFLkFIqlXcw7kudj1iN30UHpeQLc7PblZ16s8zxvverbyPEc4DltbW7Q7AZXRKDy64R52ohWydIQ3N0MQDIhzQ9JvYVlj3vWe19Fu7OdXf+W/cc+9R5m1Q9IkxpFDtq41yUaa7uw8h44tkY46PProV/iB7/sWdrZS3vDgu/j13/xVbqyeARVhWy6dboDj+hS9HitrMf/zv/83TtxxJ5kZ8Ad/9c+455UugzWXSkiuXNnCs228hmLUG7J/3wm63Sanz7xAZQSlqViyFHlRUJmS0WiEMBrXbVCWOUKyG8DwHEmS5zi2jbY8Gr6NqhRZvsUgi3BaNqfOPkoz8GjPegw31vC7HqPA5sbLhn2LLjMLs1y4+BLTi01W17botgIc28NRsxgDrabDqZNPoIXAtxXPPfJxnv6qz96DDoG9RlbMsJnEVLpJZVIcW+HaNa5kPATbDgmaIb04RZclWm8jXEE7bBA5OdfXHkJmFsLSlLlHa6m1eyZ+6fO/y8NfanLbvbfzmnvv4q8f/yoLe6HVarBSDlG2ZmnpINdurJEmGWs3Ig4c28/yvhk2t1KE9EizGF0aikzU7SshCDtj2I/467/8W8ZxhOM1iQeaPBvTDFyiLGdxtos9G3Jqu4djuvTjIaUT4UqHPM/Y3NycvOjrtK7jWBRlhu/VF4Us1djKIITh8pVzLC1/I1/84l+AcUhjw959B/jQhz7ETm+DF184TxjW69JKaJTlglBUWuC5HhhB2GgxGPY4fvQIWmtefPHk7gtdCNCmTpqCoihzhKhtEnVoSKCkjW1J/vahs7Xi5QGJRbPRIUljBqMxprDodBQHDs9z+dpVKEI6nTZHj+7lhRcvEucxqytru800UhiKIsf1A2ZmZtjaGeA365dnEPgs7Vliu7dDnCYgS6Jxwj1v2Mf+VLLYCfnr/32R4WZJEJZ0wmUABuMNXni6YG5xiqUlwfScy+PRJa5fvkKejRgPLe68e5Fb7w5YvwqXXh5gTESR2RQlFIyxrDqMUodxDIYaCcTEVjQejylLwHzdQqKrmklZiQqhBcM4xpcw8jKEcnDzCit0KXOXsGnx6lfdxzgaMBwOcbz642SJRFkFBo1UIJXCsm2KPCeOk3qA0BWHD+ynv73DS+fOIYWoK2mFwJlAv42p/YbGaFCCkgIxCeMZYUDVXuoizSirEpEnFKbCs1wqwWTIqy0Mpa7T5fVFtP4+jMcJnuftboAs28agKYoKU9VJ6zwvkbaFoZoglaAoEoypffFZljE/P8/29vbuBTdLU8pJ0YQSFtvbPRKv9q96k7BWnIwnRINk8nzWpRZlWavJZeZQMUYpm6KAJB2CKSgnFyJtBIkjyMuEsNnBFDnjQqBMhRVnRFMO8UiwZ2GKzBojDRzYF3F2J6fTbDMwI56/9CW64QgdBwxykLnNdrpNYkn8sEmh64CzpS2kN0L5Mf3RNGGrjW9bpFEKKMTEvlCZgtmZKba3tylLDUphWQolDLbrMx6OmZ6dRSpNnJUYXeIEClNUtRocqb/rmPh3D+XEuU2mNdkYKpER2haVAVNVKAml0DilxBW1ZymNY8aJxAlc8qFBrKQ0LJ8zLyokTWYbNnMtgSsAE7E0HbK00MXzDRpFUYHlWjRaNQZDuSGOqm/ZxjJYtiAelBw6cAgdH+T+5ffwi7/zW3zt+Yj56Rbf8K2vI7pgoKoImhJloAogkRXChdQD5YNbQqNVYbtwx6vvoNG0IV3n9iMak1xmbjrg8NIMs629ND0XTwl0HiGEYmtLsblTMXfIQuQVV05lCBwa88ukIiZwXJaWKpypmK985Wuce+kSOhA43YqXnxnRGLXoiJKpWwoO3OGgcXC8BlmeY2KFMgrflbz8/BY3nmgTXZ7m+tmUxTkb104p0xF2JVG5QOUVShsoErL+OlZU0qhKyBMKAzoTKC+gnNSgudKmEy5jyiZ5UmGPBmS6olCSSkM6hmSokFVtqMdkWI4iThWjWCMthTSaKqowScz2ikXad3dv4baocFtTVEGTvLAQ0kU4ggpwRMjepb0kWYLjudjCpeGDi40WEKkxU0ttQmlDJSlsG7KSeGWF2YGPsHqw/3U8te1iQp9eaVjYM4+mx8JCm/m5KfqDOthgDCiVIawCKk2rPUNpMmRZYSub65dXee7pJ3DULEVRUWYp3WYDQY4SAikVaaaJkoi8jGk0XYwRZKnBcWrs0NbWDlmmuXbtxi7sO01zGkFz0hAiqMoUqpQkT7j/za9ENhS+H7C1tcXp8yN6owC7UbDnEJx/+Qyf/5svYEzC6Rcuc/n8Jo4Vc8tdbcKZhPnFBk0vJNnRXLz0Nd7zzW/h9jtegbRC/vjjv8dLZ09hSpciccmLmN7OiO3ta3Qbgn//H36UC9dWOXTsKDo1WHEbe3wrzz00pOw7+KJBldVKred5jKMddra2UFZt4K7ygpbVYKm7QJFolHHqQJisCBsevidwHYnvWSRJTtPv1jzMZky7bVPKCOVYFJnBQhP4Dsq2SeOM7swyqzcG+PlRfuJHfhjHcWiEXcZDwfkXL/KXn/hzPv2pT+I6Dq4HRRmjTYnrKlxVo6iCmWkOHFtifSfGUvvIx0NElWFTEDoSNPQHGT/wQz/CO971Xj78/R/i8UceR6c5XsOn0BZNt40lUhqepukFBF2fbneaqpIIne/+7XRy3vQN92B5OQNh869+7Yc5dnCJR77wIkW5wW23L7G1tcEbXv8a7rnrFiyRceP8JZ565ElePv04jl3VyrWvcEKF9HNaoUsrnOX//skfo99fw1Ixs9MzbGxoNnqKoOUyP99ka7DNtbMrWLZPyghHVvg6RGBhdMWg30cKgZIuYVAntKUUSGVIk4rlfYs88OrXkKQ5VVXwiY9/npdOJTz3zArf+Q++i+/63gf50kNf5YXnLiBlhesIGk235rZWQAWVzsnzIaWOKcqEdrfLR37o+6jQfPeHvou5+XmOHD1OURoUtbcsL8vJsFf76ApVsXBwD1pW7GxvcvvtR7jrzluII40lmyRp3eS1b2mJ228/gqkEBw9N8YoHDhOPI37kR34UKUN2ej26jTZawiipK1hNYSErt4Zz2yXdqQUW987RmZFordnY2KIqHNJxSrS9hVONePIvz9Lvh9wYJgxisGzDOIVeskE0ivFtRbGluHJ6zKEjD7DTz3ngdXvpLlcMM80dr2iSmytorXn5zDa2VeFIH3SFo3K0UbhWG0sEZEldBiCkRiqNEnJXoayqnFKnIEpKneL7daBFGYOyBTrLGCcpqpKU4wStBLmWlFWEpWwefuRRHvnbx4nGeb0CTQosVSuDphIUuUYJi9BvTC7Abu0pzUqS8cTDabl02lOMRyNcx4HJcFJUORW1smkrhSVq4oQRsn4ujEQJhRf49fmZFnjGIitrD6UxtapY5DmWgunZKZTlETZaJGXO4v5ZlKNJkhGu7WA5ik63S6kNusq4+QBKU2EphajELhYoSSPG0RBEtfvPsixxbI+yqMBobAtsS+OonCTZRqmYSke0GnUXfVGW7Ozs0Ov1dhvAoC7DVXY1ScIbbFtQaVM3ldkSqWyqrJgwVyW+q5CWwHfqn3FpaXQkCeyU7e1toqHGMjlFLplfnGNpzxyibONYbew9LvuOL+PqObQocW0HqUGTYQeCtDBQGJbnuuzfF7LYnkHaDmgJJsdRBtd2EJSUBfT7CZbn0O22KcoMpEE6LnmpsTyH7e1NDBLLFSByKqPQ1IgoEei/80D5d1552/stY6sKpKEy4FoWKrNIswxtS7zWJGmkArAqfJFSZmA7HlNN2D6X8fpvnuOWuzv89194iakpn8acpjeS5GVKkNuE01BiyMuSZCSQpQJjcG1DEE4jwjF5ZqiKCi1yrMChGHZ4673fza2vPsZ4sMUXH34C11Tc/vrb+Pgf/yFleBUTeYwjQcNPWT1nsNqgQghzj31LHZzFjPG4xdrjI1a3djiwp8n7f+goX370LFtPluyZP4g5vpfVlfNExQp5WftGtclxbQgDcFWXlWsZ01NtXLdNroek5Yip+YxOt2DlqmHrHFiJi9ESrW2klXHwbhfRTqgsSe9Fh5XzJW4zxLaGVFmLQlSkSUR32qrbY6oW4eIW3nSMAarcr1ODqkQbsG2FFB6ljoAWFT6VVNgCMpFTVBo/cPHcFmhBFW/gVYJp3+Xy9VVyAcKFKIXKgLJAF4ARCGyMLNG6IvRdyqKg3VL4QcH2NQhaNklRYrshlu0BkrxMKKqSTnuWeLSDbTRH9h9j9eoKdiMnVkOSymIqtMnzhHGswFa07TZbL0T4ls+wikFV2IXD7J5DOKHirobH5849jeNMs7m2Q7PlEfWH+LaH57bZGa4QNOo2gGbDYbRpkzd7dBwJBIyGkiqCuOzjN5pkUX1rdp0KU9l1QKwYU2QKg6RCYyuvXoWbulliemq2XsmVKZZdgbFIE02rNUUcx9i2qENTPowHAtsZoYucVmORVsfmpdMbCFWjqJJYM7/okSQFhhQ/DCizAGRCUaTsOdJksBlx5VyF65e87Z3v5ud+/td4+dxl/vP/78d48umnaDZazM4doDOzw9mT1ynTFrY/wnWbDAcZnZmQo8dPsLXTZzweYiqLK1duEIQWlqzX9INBD8/zCIIG40FUqwLG8I73vJPQ6/LHH/09PvKRH+eNb3od3/WB9zK71CVJRgS+qqF1SDyni5CaXI9pdxZY3rPAy2fXGYz6tfl+1Ieqqm+zkwuL61dUGgQe971qD/fc/k62trZw3DrtG+WXa1XbPki/l3DbXTM8//yzJImNskrKvEbVJHnB4uI8y3v38PjXHqPd7mJKTaEHOFYLz1fs9COyNOCBV9xB6Id86aG/ptkJSdIxtmphK83hIzOsrKxgTEjYmGN5/hBPPP1Zjh7/epf37JEBsjrAjYtDzpy6wrFbj7NwYI0Xnx5w9Ohx/EbG7cfvZ7w5zyc+9VG67ZBUDwmbkvEAsrRCizGeM0WS9TDaQeOSFymve8138bXHP8X0LAy2dibPXkW73WV2dprz5y8yHmV0p2ZwvFqRG/RH2MpmfmGGG9fXKAqNJaDVatW4lKpEFzmFLlG2O0GIRJPOZAtLWDi2z2sfvI0oXecvPv4cnc4sFRF79+yjLGOuXV2j0/Upy4JoXOI4DmGjZruORpq773wtZ86cotEIyIuERtNja2uNOBnVrEmvWTdj5TkGGOU5R44cQ5QVF146z7333U5ZFpy/eGHSyR3UHlBZd2Pnec7SniaV3IRiietXRvQGqzQ7DvFYMtWwSAtq1Uxq+jsZb/vG+/gHH3kjP/3Tv8OhpXuZW8wJ3AU2dzZ5+dxl8thw/coqDd/GKIth1ef+V97NhRfW2Li2TrMbMIzH+JPecstz8TwHY7f55z/5vZx54Wk+9ht/ze1vOMCHvv07OH3uc3z+Ly+DKSmzOrxjiYA9e+c5f/48odOt1Tud1ZsgWStkRS5JkqhGc+U5Wt9UMdnFB8XjZLdhR028fTdT9LWtoPYnGl1bcIoy211rCxSWLSfr7RpqfrN6EWpVkYm3sapK3Ali5yYPtU7hp/8vvNDNSs2b/30wIVwEQd0NnqZpPbza1sRqUBHHOWHQREpJp9NBSEMcjxmPUnSV4Lo1x7oswHZrP2QUpUhhoSQ4rlu31giLJIp3N0ZlWZLltdJ6swXnpv0oibPdJrM4jjFVhW1LWo2g5h+XFUYo8rLuqC91zUtut9sYo3etTcCuracOLBV16YQQbG1tIanX8mGzwczMzG63uFKK8XhM4NtYwmeUrCPVPPtvs9kZjInHCiU0+450MO0hGxdzFuePMBhuc+mZNTqzXZTfY1zYeLrBKB1x/PaAo4cf5JZ7D/LYV57ka489TJE0aLcc8jSjiEsw9c+nSGuvf2u6DpaORmOUqANXiBrb1Ol0mJrqcOnSFYRQKGmBqVFf8Vb09+uh7N5hGRMbCmVBBXmR80Pf/D3c8+rb+af/8l/iNgVFLrCaFVgeOi5oNizQFouLgs6SZH2jYH6+yY0zPZIeSGVj+wVSFYy3fPYeN4xGITk7SB1S5DHKVDiOTZkXTM/Z7GwaVFgiQ4usVCzNKsL01fzId/8jDt1zhN/9jT/iwtnnWHxlmz/+1Jc4eKRHPKzoxwIz1Gy/7GI1LSIrwnNhrqNAdviG+9/ER37gvfzBn/4un3vqC9xyaD95NiKrOmw+V1LKELeZsL0zBKcirepqMssIFD5pnqGsCpcDLC4vMBxlJMWIrd4Njtwmsadz+s84XHkiwfN8hBcTeAFl0WbvnRnOUg8RCc48rBC6hZEZfqMg9Nqsrw+obJuG8AkbFu0jG4xyB2kq8iwBWfd1SlSd5qwM2BVatym1i60EhBqTB+gipVRDdGXjWpKWW5Btu5SbPTJjcDoemawYpTVyRJkSoyUoKEsQFDQaIUIlpHHFvr0NRuMxsrSRtiTNMxzbQdk+o1FMXhiYpH5lNUZHGhPPMRvs5cixFi9t/i2mqyijojaNG01JzszcNOsvFqTX24j2Dllp8G3NoKy47cRB7nNsPnHuJRynQ1nEjMca31aMBxqtS5b3hfR3asU4bJT0+4q5qYBMj0H6LO45wumXTiELjWNKHL9Lmo1QMsd32sxMLzCO+txY2cC2FZWqTe0CNWFUloxGGe32NEWuseyKNE13b8p5nlPqnG63RZKOkMLnxH1HOP3Yc7z17tuxnIIvPbdFpG10toYlLL7h7bdx5tRl+jspx04c45WveAO333GcX/q1/8D29jayXAJTsLmzyi13Hmdx4QhPf/UcYWcVZJNOuMxOb4uy2gFgYTnk+tVt7r/7G5hfPMbv/v5/x3VtgsBDWRB4AaNRRJknu8qR49W1mDVWQ+y2Q3Rn64+9vrLCHbffy5GDt/EXn/0ooRdiuyVFKggCF6kqilxi2QYjKlqtvczOz3Hm9AuISfJXSkkYBGxvbzLY6dUvoSJGiTYHjle84S1H+NQfXmVxaRYnAEs2cL2I4bbPuXMnCRsOgTtHlGziBCXxyCOJUxrNkLzUuK5PUUiiUcRtt9/KYLDNxQvXaDYkRZ6wd99hbrvrCF/50lNs72zSDHyGw5TFfU3KxGFx/hAf/OB38ou/+AsYMSQvEqQwzE55ZMnX1z+9EXzb+7+RUyc/y/kzCXEG99zvUuY283tneOaJNbSxmF0MWF9fJ3DmCZs+vcFanajVtR8QYGo+Ixq6pGMw1ZiyKPAbbYSjKIqMQHp0gg4vX79Ao+niecGun3dheZaNjQ0ENtFA8i9+9EN89KO/i2936PV6dKcaJElUs0FzkKpCKpc4zfECwVR3lt7OGF3lSCPxA5iZbQOK8y+toSzJ0tISUsLVK6t0plyWFhY5dfJllLJoNG2iKKEqXbIinfjr7MlgUtWBHT1pQDEV8WiMqSpa7S5pWdTA6tYUZ06eQgDd6RZRPKybeoQ9SWOnmKrAcRssLnZptHOk6bJyY4fBoA+qYKo7R64l48EWrrBJ05T5fTPM7gl59WvuZ9DLacy7HD/4ShaXW/z27/w6vjuD54Y89rcvkud9yl7CKE7odA6Q5at4jkPYlAjVYX2jR1VVjHtDvNDD2A6ve92DPPXMV8BJuOve29i84nHh/GmUEtiWwvFjHE8z3A4JGy6UKcOdDMeyGY3GWE6dXE6zMbqwdtPxWn99SLw5wBVFwf4DS1iWxcmTZ2k2fcqyqlWySYBHoHeHKK01QeBNAPcueV7s4pu01pjJcHQTIVN7BCVBUCfAsyRF2dYuy1TK2ud5kztrT5i8ctJ5XaOAKhqNBuPxeJf+UJYlQlk0m82ae+lYjOOadrG0PDehv2SsrFynETokqaHTnqU9o9hcH2NKgdYVrpcSRzlSWSR5QeA38F2Xhbn5mo1a5Oii5lAivs5gNdXEx23bwORzygsQFXme0mo1EUIyGkUYUX89UkrSrA5slWW+G0pyHRulrF0vJ0harRaW5TAej8nzfHeYjaKI2dlZoF49jwZDgqBBGPogSorCQoVjkqyg2eqysDdifk9AWs4zTmLOPjtgdmEKo9e4cjqikhrbCOKdlAfeup8oX2Hreou5vdOcfPIyTU/idhVB4NMfbOE6DaJxrQoXeYkwDmWaM73QJkvS3crNPK9wPQdjBJ7nMBiMcBwP3wuIk4gyL6jG+u/XQ1nmFY2GQVUTJEso2Njq8ZYHP8zhvR/j7PoLNFwXURgkKW7QReuIThix/jJcfRGOvRLWrxQUVcnUosvGegG5wbUVSqZIZRiNUgQtijKl2wHLcjh4FKKxT7CQMHu0xclHh9xz6+2cvHoZt9tiyj/Hb37p91j86quZnfb4wE/9Q37nf/6v+hZZNaj8ESaTFKGmcyIj3jTIQpFbmmtbhrbT54775nl2Y8Tc8X3cvr2fLMpItqEwmpl7myx0XBrBHv7mc08yGEXYQV0BGe2ALHNyCe2OjeWscObUDn5TIh2HVsvC80d4riAOcqTt4bUd4sKgTZ0YvvAMHIjnOfEqyeqeda6+EGPZJbblUDAg9C02RxXtsACt2LhhM3tYMNw0WFVA5cRQGpRw618kK6EyIVoIpGvjBQ20FoxEihd6qJFmHPUY+TAc2VhpH9sXVMpibHKy/OYhAbrUKKHIixzHDkmTAmM0nZaHkjFZAVoHQEFZFDi2Q1Fo0nSEKeuh1HYUusyIIo1JbVQ15srGi6ysuLiBz6LwSKciNjc1Hb9CebBltmgeapAO+hgkNkOygaC52OG6TjGphbd3H+PRiPnZLvELKwzHFhgbITNWriVgHLwgQ8l5tDVCVC579iySpEN6az2UA3NTx7l+5QVQNQvNcxWDnRHT7VnQFaFrYWRFKQK0LhGy/gXMsphWx8d1qWHMpeTI4S6nzjyNkCW2AzPtefJME/pz9PprHF88yMG33cozzz3MgQOHMeUZZAbvee+3cvbic5w+dYODB25h2I156skXWZy9h7Ubz5MXMZ1Wl6k9PcaRy40nXZAJLzz/ENPLDhcvROzbu8Rb3vI6Tj3/HEYcY25fhy986a8oioK7b30lX3niYUyRoXxJUURUlWI7ihGVRRLlGKsiCGo+XRrXidebB6FlS5JhxHg0pNl0eOnUSS6cPUPLCyhLjbID0rKuwNNmzIFDC1y7voIwPnfe8QCPPvZFpBWT5gXjuGJ6ap71zTXuv/cBijTjq199hOXlfeR6g2FP8pcfP8few7PIqovrOHzlS08QhooyH9Pu2jg+7D/Q5dTpLdLIq4eVQBClI1wnrHl5cUSr7XPlygWGoz6tjkdZjMA4uK7m5HMX6PW3CUOPvCy54+4T7D8QsjB3hDd/w3uZmZnj7e98kI//2ceYmvJJypxxIdGit3smZqniq18+z1vf/c0I72FOv7DJE4+P2LN/D9rb4cCJaQJvloe/8hgLcwsYnTIYFGSJYGbBYtDrUxY2YDHcKcni+hwsMxfLNpSJZqE7x6Ur1xibbTINnhdQFprEJLUfCrh04TK269BsNJg64PD0k2fx7AZl1efAgX3ESYSyE1y3y/Wrq9iOR56l+G592dzc3CaJy0l9Ys6wLxGVpNlNmZ6ZYjgYce3aFbQ2OH4LqZrEmaQ7NY0wku3tbZRysSwLv9HEUvYuVsf3XYTQ2I4k0ymBE1J5HkWe1y1SolbMbCSNZoDRMDMzw/jKoH4JR32SJGV+fo5oNCRJh5w/P6IZtrCdGD+wUdJBKbcO/GVjhBYYryQpSnyvtmqcPKs5evsreOiFh3j5+nM0xCYvPvYc29cFSIEb2DhOizQ1TE912NxcZWpBkMSa/rURnmdIdcb99yyjKsmTT/QJrJCHPvV59h7cz8HbpvjSXzwHpExNd1F2TjSECo0xEtSAN7z+exBVxKc+/hfkVb1mNgIsV9JyQ6IoxZLeLhfxphJ2E8klpWRhfpmZmRlefvkCRVYrxEbW5SHSkvhuSJqmuK6L4yhKnZPlBZax6HTabPd2EAiEEmRJ9n8MWnVIxhhDv9+vFVGpdoNCjXaL0WgEml3W6U2s0E2Qfpqm+G6w61msqoo4TvDDul0qSRIafgfljRlGOd3pKYQzYn1jhNJdPNfCdxbResD2YIWNDYl0Ja1GF8GAsjQoyybJC9qdGTqtLr1eD2U5CGVhmQpb1YO3bXsTnFu5y2S1bRvH8XZV2SRJqUyFNobQDxiOkokfs2aBGlOh9SRAJATWZDi++UcJC2VbRFGCMfHuzyjLvk5XGA6HqIlfWGuNLk0N/1cBjbbD9o5HoUvWekOWlxpsXC+4cmWdYTUkcG0KnVEWHvsO+MwcsDj35AaVBqujOTZ3L2V6ic3eOfYeCdjpx3itBp1ul0Y3ZDwaEEcpjuWgsxJZweze+f+jQ71+xsLQr8sAfI8kyWrBMJ3g9lx31+/6d/nzd8cGHRFmcU9AbyNGOtBthswtdjg8/3bOvPQMZ1bPYUwKhSDPNX7TBS05sMclH+Uc7LyVd37gAX7xN/4zyk7xGobBqF5x5VlMt+Vi+xn9gU2cFOQZuI5g/+GAeOgx1fZJ3YwHHox56hMOo7UZDtzncvbiZR583WFeuniKtfNdPvDBb+W3PvpXHDtoaC6NCZw2L1+9zPaaoNCCqG/wKoNtFBoLYWeY2KE9K1jcf4JIrrG9klKWOXcf8FkoA3a8nJUrQ97ylveytOcw/+m//CJ+WyCdhPHAQGGDdMjziMXZNkVmsb01xm/lBO48yt3m0O3TLC/4fO2zG5x7rCRs5TiyxeLSDEUZs7o6ZGmxwZ0PuuwMp3j8c1fQWYoSDVqtFHyLUEB/S5IGYw7drVk9V+G6Fo25Eo82g3FMhsbxDWXVRkof5YaUGCwiikqQ5ymeEIh8hFQ5hVbkuiQva2OzApQlajh2AZbFZL1vaq9KAVKWhA2FQRONwJI+ggQUeL6kEdR+nKqCopQ0W1021rcpS4HCJfQ1gV+RjCDfsWhIl+4xD29hyMbVgqBjUTU0w5dmMddLwpmc8SBn9qBicwSZXzE7PUM28MhkHzvVNEWb69fWELIgjwW/8F9+mr/6/Md48YVz2GKB9eEAb8Yj39rk0N5pTGYQyZidWNKLE9D1jdvojMBtMx72mFtoEEdlfSu3HPIixfdCGo02jUZIZXI2N3oszB2kMjlpNmY0GtbDRpYxNTWF69Z1dL3ekK4X0mx3udq7RDRweP0r21y5fJHjR9/HZnaVQS/l+pUtulOaVqvFypWI3mCFE7ctok2AbN5g0HMYD+D+B+b5ymeusWfPFJaXkSYJve2MX/i5X+NjH/0zzl9+njjKCP0Gw9EOUZTRnWpRGU3YbJIktZI07PW55ZbbGI+H3Lixiu/7JHk2aSYJybJ6ZSYBy3JBFHQbC1Q6I02H2JaHqBTGqnbrVmdnFun1B/hhl+Xlw5w881VcxwKheOMb38SLL5zi/Pnz7N+3j9tOHOdLX34Iz5cEgUuRWwgkO4MeybhietGlLOHYrXUlXzc4zPWrQ3Z2diiKDD9wELL+WoocKmyKQuM4FrZdKwmj0Zh2Z4p0lOH6MXlm8EMXpEsS5zh2g3d/0ztYWz3P7NRRDuy7nRdOPUejJfnkJz9GO6yVoGboY6lg90xc2t/ijd/wNt72tnfw/R/+MVAZ07NtnnvhNFKVUIW8+a2v48rl69y4uopQEf1RQZEWeIHEkiBERZH5SFnRnXbpDzOyyaqzyBJEGnD7LYeQjstzp56m0exS5DFFkaPUTa6eQAiJweKOu/YwM9vm2ccusrke4/gpyqpoNjrMznVIk4gb1zdqQoQQGKxJe40mzQ1797SJooQ08th/oEuWVqyurmI5FnlW4DS7eLaD49Trxng83g1BTHVnWF9f57bbbsMYw+kzJwkCjyDwyIuM0pQEbq1+5Xmt+pgJX/ImcLko6md/PB4jZIXrKhA131VrwdKeBqsrW4x7No5X1MON9pia8RkNY/yuRTYyjEc7dGbmeOWb7uDqxilcM8tMq8PDjzzH0aNHefCNd/PHf/SnYCyyeIAqIbCm0IFDEfVwnRab/S08p02oPAozojk3zZE3TOE1M849ts36uQzbEYiyor85QtoBmR4QhLUShsnIU4G0JZ4vUdUeKrONhUscjUmSGMtxkDa4dh38qEp7MnyYXfWv5nXWw19a5BRpgbLAd71dlUmpOr091a3LFvI8x7IlnmfvthNpXeEFHnEcT2wEdfq5LCeD6UQRBXY7vrXWZKXe/RiBW4dXiqKYDBvOzZbXXTh9XXgw5K677uDOu+/io7//MZqtGrSeJxW52UIqH9f3WdobEg0L1q8lhKFAV4IKydzCIp/6zB/xMz/963zs9/8HgRdSZBEgKStoNLvs2bOXyxcvcWDfXpIkqptxqnqdPx6Pa8SQZWEqMSGgOOR57W2vE+Qpli13a3mz9OtczjiOqSbfDzlJ/dSKq5x8v1w8N2AcJ18fFrUGqt2wGUCZF7v2ACEEnu+QJBVpNqbValGWJdPTs5SFIAxDjt/t8dSFFylTn0IYdno99s/s4657jnHyxdM02i26sy7Nzn72Lezhq099mv4goUzGLN/uc/UJ6G1pZufalNWY4SDBFgF5mhA2fCpdD7pFnpJlGUmSYlu1RcD1A8oyJ4q+/jVJWV80ovW/55X39G2umV7K6ThtTr804OjRBj/5G3fxVx9/AkfuJd5weOHURe666yhBsMPHfnsV14FoaPHm9y5gT/k89Nd9ulMDQiVJopRCewiZEw0qAm2z55hiODJsbeW0Ojbbmzm33tWg2bbp9RuMxjlFPsQzHr3rOb2dnL23drj3wQXi8Q2+errP8UNL7Fzt44mE1G4z3fBYubLOdqypthVvefsrOXdlnbMvXWCqK0FWlCU0vYDpY1OIapqFOY2OS+JszOZOxKI4zlRgePlcTBw3GIpzWJ1tNA7bmzm+44BdoShJx7B3zxJV5bGyepF2pwkVJFLz1re9jgN7Ha480+f0E6dZmt/Hnj238qm/+DSJySk3NTNLDV797SdwvQP8zR9+GSJDWhYEQcpokNDqBOy/u2JzIyfbajO7d8g411SRC56mtAHZokLjurN4gQ8SxkmCa7kMt6/RcR16F3Okzpg5pOnrEq1tdKko8wwJJKnBCLBdmzipcB1DUVZYou4SzxNwPSiL2h8iKxssjVAF3WkbS1ZksSbNwHFbNbhda6o8Rxe1D6lyCjzhMLhS+2333duhuygx5SJG5Ky+fJHNsw7tOcNoXHHiLftYX+kxXs2ZWVikTCMuDde4deEIh2b389nPfJFXvOpWThy/ja8+/DRzy5pRtM76ik3LSpja3+bCOcG+owWDNQ/PylldjTBCk2cCJaER2FSFxHYUy3tm2N4a0O8P8Dseo2FKmpS8+c1vJggavPD8GRYXlzl79iRb2+sEQUCz2WQ4iHabORAVU1NthqOY/jDl3rtuQcfbXLx4GafR4PCJAzzx6Mscv/NBHKfkicceIQgMnh2SJXld/5kHtNpTFMUOlS44dnw/2/1NnnnqMntnl3DtlO/80Hfxyc/8DdPzJzhz5gl6a2t197tj4XdtPBqMowIDTM9NMxiMcD3F9s4aD9z/KtZurLG2toZSNRMuLfIa2WHX3sR6QWbQZcm999yFkg7jaBXbTDMcxOzsXK/DBVJS5opWN2Aw7pHnFq12A6o6NCOk3O3+bjVDtjc3aDZDtDZIYWNIkVIyHCTs2X8A5JigM2ZrtSJJMhZmj7GxNmA0WqXd8SmKgrBhURaSIgddWhghGI17IAzdzhTRuF7pKVliSkWajTHC4LjUa0Yro9s5ThJvMRz0yIuKhX1dXvPaV9LfiHnkS19jasEn2sw4cc+B3TPxn/7kv+MVD7yD5x/5At/x/g/ys7/2M+zttvie7/phFvZUVEJw41qO54cYMcJyXYRpcPSOnGQcsXrFp8hhZl5w4k6fl84OWL9sY7TAcku0Knj/d36YV7/6Fdxx7Ag/8D0/wMp2H0yBMRVFUeHZDsNoOFnzuUx3p9i//yAXzj9DZ9oiTwXbm3UzVXuqzZ7lLpvrQy5fuUDg1w1ViBLbVUTjnKPHDjM9X5GOfIa9imvXrqFUTfJQNmg8kAlK1N9vCfiBZGtrB11KvLAGjjeCupdaCIHn+vWZkZeUOtrFw/h+3S510xt5c9ApC41tuyA0rquQigkn0uHAwUWycoMbl1KCoDmp4JMcOLjMiy+cZXl+ge1+j7w0hK0mh44f5Y1vfxMvnH2OQ4eP8OVHP8W73/yDXL94jo/+zu8xM9/Ga2i2t8a89z0f4vq1izz7+JNgFO3ODI6nsSoYjfq0ppcYD0ckZY5nSRpOi7wsEG49/I1GPUTlEEUxApfAs8hSU5eBaIfS7IBU2NLHVpI0iykKwcz8LONhn6qEoix3h7ebKp8QYtfvaPsW0jCBcwuKogQjsSynXtGqep3darXIspozaUw1gV9L8jKb8HjL3YvvzbKGJEn4evK+/pryvERY9XBhOw6mKHfX8LUK6lGVeuL51BNLC0RRxLETx9nZ2amh9VFEFEW7K3Bzs3rSrus3TWlqEUJLUj2g5b2C933LD/Pyyh/yxS98mtCdJUt3aiuFrEs1mo02w8GAPIk5cHAf4/GYNI7wA3eXN1mWJXLC3PQ8b1eVUxP25s3B2BjIsxJd1SzNPM8Rk4vWza/XdiwsOakI7XTx3IDBYIA2tee0Vkbrf28M2JbCUs7uxcDzvBo5l4M2Y4oc8izm4IFD2J5gayNlGPXoHggxUrG12UP5Ga+571U4skust5mab3Hj6gYzrQW21l9ifRgjbJ+G9BhxA9lzGA0rhsMhra5HJSRZAlMz06xcvVrb7MqSRqOB0dUuDkrZzgTflZLn5e5zoHVBu91k+0rv73egnL1HGMdyuf2uBoduafNbP3uRd73/CO/57ikuXxtziB/kk3/xpzz+led41T2v4P/+T+9j5cYqF89t8Ief/ARr6RZ+6OE7FfEGtKcLLl80TM9Kbr2lxRNf7rO4f4rlg7N86QsvsXzIJR5XjMeGxT0uoSMJp2YYjzTbV7bpdnzsJmxsx3zzN72aVKzwwhevUroFiVuy8aLLnuOGG+sp+QAWpj1aleHbP/BPGNltfvnX/x2usPA8aHcC4p5hZvkQWbLN4uGSq6s9Nk+leA40j89iZbOcuGWBzc01Hn3kNM2uhzAFUU/guYbK0yhAKpieaRK6dzCKthj0NhCqT3e6xdqNiAfue4ADe/bwh//t44SihZQViS4wpotgB6vy8BemOXzfHC8/0UP3E0zYo4w0uJqFwy5WMGLlHPjCZ3afYWeUkseKqTmPSCcUVQfl2YR+jS2gqnAaXVZH25SDhKUK4s0euu1gGhllWlEaqLQ9ObjAMDF8K4EuJVIYGs3632dpzRkFDbo2J1cmR9oWZaUBQxAIQt/B6IooLtCFhW3BwkK9Vr2+MsJrtAkDh2wcU+1IRknG0n7JvsPTLC+9gs31IS8++xjjLYUrS3oa/GYD4eckkWRprgluRHxulvd8+wP8ySf+CN+bxbFDLr10gUbbYXGvQlPS62ec2LufSCl2+n26foPVKwlHF1yaUvP8Wo7v2bheRW9zzIEDhyjKhKtXVihNibI80jSrPTFCsLa2UXtOfMHCUpPbT7yaL3zh81RVSbPZBqNwHI8777yTRx99lIWFBlGUkETQaM1y6OgcZ1+4ztKeBpevn2actZifXkKbDYpszHCnPpxrXEaObRcszc2imOXD3/sd/Pwv/hrdaYeqLPmT3/8bzl6+zEf+8fcRzvYIg2l+7t/8NqfOPMLv/s4n2B6s0PQa6FIyHA9odht0O3P1sCBdbNWit32lbs8os4mvK2AURZSlxnLqlhkhBEYkCFzmZ1v4rsfa6g5VIfAcnyQZ4bgWGAfHE2wPtpHSByqUNLsvKs+rVZIwDKnKfLeXt9IuStoYkVKZGixfiQJlCZIsxnN80jTGGIGtGgSeRxSNam5fS5LEdbmCkhZ6csALJGlR4jiKO269nWeefpp3f/M3IqXFk0+dBAbcuDJAqwxZ2TQCnzgveP93v4NDR+b5xB9+iZ31HUo9RqQlzXln90zsHjwGeho5SHjTGw9xeQU2Rzu88MJTzHSaXL50nv/47/4tZ09f5fzFU3S7B/jzT/8p+w83CMOC8UBy/XLCA6++l2F+CpMt09uJ2LqxTbMZUnoV7//ef8a+PW/mG996lPe/6/Wsr20jKAFDVdSrzn0H92KM4dKlqzQbXWbnuhw+tMCZM6fJU6v2yZJw9NgBDDYKi9UbN7CVJMsTqgmSpqwqdCm4/Y69NMI2zz99Ea0NZSHxQwetCzItQSRYEiwVogRkRR8joCptoqxiYW6WLB1Sljm6rMM3vh8SRQllmWOMoNPp0O122Vjf3F2/1U1aKYFfr0g9z0MqwXDYB6AoSvYsH6Awa4wGJZQhusq47a45hn1NvxcRq4RiaAikwQsM+w6/ksWDe7hy4wlOv3CDdrvNvuWDvHT2OcKGQ1qWFKaiNQeiOMCUN4VOI44cXqQ/yHj2uacQEw9eoTWqcpGqxNiKLMlxAFwf6Xk0VEklYDwe4vshoRcyGG5SJArb9pmeCbh6/RpB0CAaD7AsRVnKWh2kwndC0jzZbTq6qXoppTDUwRlT1aSTZtig1PnEs+dQ6JJms8me5YOcP38ex3JpNkM2N9cpdT6BpQcMJkPWrrdx0hxjjCEMQ/I8rzvF5dfXnNUkkV9VFZaQlGU9IDmOQyMIaxj2ZI2qVL1CT9OURitkdXV14s+sfZ1h6BNF2eR33gYjEdKQZQmYuuI2zQdIFRJFtUf4lmPHefnli7huThKXYNfYsqWlJbY2N8mTlNnZOiDZbjXqgpPRcLdT3oh6AE+SpFbmqVfelS45fPggaZpy48YqUtY4Ha0rqslArVRdDSmlRCqxGyJstzt1crwsJ5zK+vnVZtK/br5eQ+v7/qSFyGBZHoVOqZBIYyGNh+dbIEuanSY7gw1UUKDCJh1fcPRuh34/5uWnY77/Rz7Cpz/zec69+DK33HYI3ynwWi2urqyydmnM8Vv3s7y0QJYVnHr2JXqbfXzPQxcljh2QZQV5lWJJNamWFCRRgrK/7tG9ubYHiTGasixotgIG1/+eFcrl1wjTDVxWtzJ+8j+9hr/61Ao3Xtzge//tLOdXxywGx+itNfjyJ7/E8X238Qu//jO4HGRn/TqXr67xQ//mIzSmLaJRRRbl7D1scEOHbsNC5pInPj+mPSfpj0pKbbHvqMOVSxGVVghLU2SgsPEbilQbCm2zd5+mKDIWl7vc+8Dr+PzvnEGr6xx7W8ypZ2B5rktv6//P2n9HW5bd953YZ++Tz7nx5VC5qrur0RENNBpANzIIEgQJBoEUFamRlmxJtrWsZI080ow0Hq8lc2RbsiVrFE3FoUxKIgmJJAQQOTfQOVRXd1eul9+78eSz9/Yf+95b4PxjeC2+te7qrqqX7kn7u7+/bzC89tqYT743YXPrFF/81phpXbIsB6S3ImRSE7Q7VCdHFG3DOx9N2FwuuH4ARsDwQHI3r3FHNgQ0bnuMs5qigrJikbunKoPvu3SXFOtnBa4bk443GR4fkI4VZlDR6TgUjc/WhcfodI546+s7VFPIVIqvXaKepqhjfBHR78LB6ADDErHb0OgKfyNl7aLDaLfDjRfGtFsNnfUQERWUuUuvJ8mqCoIOwl3DC0ua8QiZSeTKKqL2SNM9qnKKlBXdXshkUpMXatZ0IlEYlDY4josnjA2sLQEpiSIP6dZkU2G1c/M+Vu3jeBUN4Ie+1X/VNWsrbeqyoigqtEqo6inCgfZSl8ALme7n1JVkeX0N5WdkuxPS3YZkLWPtwQ7t1QuM9k8Y3hhSTypaS+c5Hg/IiglGQdVzaTsOHzvzKNf3p1y7/Sp+JMkzTSuU1GpKf3mNsmqoJxlFU7HSkwyHEHcijJ4iaRFHPe7c3WNleQ1jSlwnwvMcghDSCdy6vYfnC/xAWGOJ28FzY5QumGb7+IHhvksP8eabb9JptZlOU4q8YWlplY997GP823/9bzh9apmihr2dA9pJyKWLD7B5+hy//fn/yNZmj8OTHNf08P0az/NpJUsMhjuWJWgU2aTiRz/9DHd3Drl+7Q4PXn6En/3pP87Va2/jxkN++Zf/OZ4RLC8ZTo4dfuZn/zw7Bzf47rd+E5FHJH3F44++n73DI27evMngZEIQCvIiY7m7RJHVPProw7z51hWmU9tEM51mqNmD0T5crdhdEuE7Ie2uRDUVUlTUjaDV6jAeDzHao2pKtLC6zJVNh+uv5bi+T5yETKdjHnnkYa5du4bn2AqxaTqkqTzb7CRsj65mjG5ahKFL0Io4PDwkCB1cKWgaayp3JTiOhx/YbL2yqK3WSgYIbEWb57tsrG+ys7OHEA4f+uBHGEyOefW1F7h49hJ7ezdYXX+ARx96hMOjt/j857/Bu555is2tJZymwyc/8ST/x//2b2HqHCHv9do+/pEur7+yy//0t/8TL7zwTf67v/l/Zfuiz2Avor+cMR4UfOiZn+Tu3tscDg5QyqNSB4yOXAwlRgvbb+xWtJccyjxCFjVGGJR2abTh3Lkz3Hj7hH7vAY7S7+BoSeDbsRUKjo+P2djs44UBhwcDwpbk1OY5qrzg+GQH6WjQEVku8HyHtVNL7O/ewRUh6yvreK7m5s2bGO0hZG11x/4KfpAznY5ZWd6kKjyG0x3iqEeWZQRBgNYNvu+imoqyLq2juK7xwi3qumAyOWRlqcVkPKKuS5QB1/Fw3Jmr2IhZnIs9nnXTzOJyoChTlpaWSKcZKyu2mamqc86dOwVCMxoU5OWAMjO0Wyt8+BPn+NLnXyQJzzCc3EDXM5OIJ6m0wo08trY65OmUtPF47KF3cv3GGwzHeyhZIglZ6nvEYcTOtYLI7xIFkpPDKXWjmUwHnLtwP1U9YXh0jNcLKfIUr4loKCnrmlBGeE6DcgOUavBdnyRMLOMlBVk+pZX0SbMC3RiEbHBcQ1WBdARSaoySVE1NXTWL8PUwDEGYhS7PdqNb84ueZVkaLfDDCMdxaLVi9vYO8NwAz3VtlZ6x0UGe56E0VE2N697T9f1g97tC482ajOYaTmUMnU7HyhTKemFuKcuSVpws/mxNifbfk3a8GMenqU0SEIBBglC0kjZ5lgGKqlEIY9lD6djGoOmkIAx9ksS25gVBxGR8bF3YjjV/grGVnFojhWV0u602vaUeBwcHCyd7mme0Wi3rOq8q4sC6wvMspd/vznrSj4nC1sx579gN9WSyAN5WU8oChOtG47nBYjNksMePGfPaitsLYD5nOKuqAjQ4DdrY+8XFAlc/jIhaEdrkTBrB/Q8ITp1pMxxqEj/mxq0RWeXSOHv03PMMmoZe1+P81ip7R2P2Dq9w4dyDZBMXx8m59vpdJieaMHaoVYpqoNNdoqonNks5iKlrNRtrz5lYGyWllCKIwlk9qvVAjO9mv7+A8oFPCNNUHlI2TIcRf/zPXiKvRqyfjxilGUloKEqH2BtSpQmJ/hTHL7d55gOPMPV7/A+/9JfJzTWyEw/HaO5/zGfpQkgtB9z4nKZIXVrdhiuvQBglbJ7PwdUc33XpLTcMp1BlIToocAXUU0F/2yHseiDadJMYIQS3X7nOOx7ucDDQTIY1H/3FNb7+/9hHhA4yalMWBb21MdNxGzGRDPcNeTLm7FYMTsHTn1zjvZ+4j3Gu+K1fuUV5XLIWr7M7uMvh0ZhSS9KysbE6Chu34giUCiidkqByaG82hJFHuB/SeqhFeuySHhkm1+7g+gGsJVx+4gLnznQZH2maYpcXvnWV4lDQeCE+LVQ9xesEDA+mtBOX5WWB7EuizYib36pwmgmjRtFtdeitZwivIXPhTHKa63lJP/IoqxPMRBEmK2S9dZzpgJEakQ8HCA1rp5YYjE4oj8Dz3NnNYkEyswxHGzXi0tSNdfFKSZblSGl3a0pZRggcvKCy4deNZm3do5UYTAVZqigxSAVNLUAYkrjH6lqX8XSH4UChTUJ10uA1hqaEpZWIpftyJrImzX0i2vjpJnsnB6wt+QxOMjbv26QeHvK+Rzf47ot3eevGlKff83H2jwZcf/sbbLTWaa82HN2ZcDJteOC9LYY3FMnpCf1ej9svlIT9Fs0oY29P0O1FpNmYbKpwnchquEJBWda4sg2ipqqnnDlzhiLXDEdT0jS1D2dpiJIYISsm44pON2IyGfGH/+Cf4PP/5eu0e6uoZsjDD1/iNz/7OdZXV2hKF9cvKQpQTUXcCRjsDQk6PmVTU001cS/keGfM+555H+1OyJe/8rv0+23yrKJIQ4I4orea4aqAwYldALW2WYPQEMU+0nVo6tl97kiSJCGfaacEdvfuaGlrK01Jf9UuPuMTl6ywYzDdgHFclFRojAVEFSx3Wzz8WMyLL445c6ZDnsLJ0RjpeNSVpGoaVCNBpqhKcPmBRyiqijDxePPNq3bhTdrk44xGpQhp3ZRx1EVIj0JVdJfbnOzvozUW4DYGEIsFy/M8fNfDc0tcD3TtUBYCAzbHEAgcF2MUrh/QaS9z+84tlleXOH36NPsHO6gqYXtzjYsXTvHq66/gt9vsHRwSRy267R4Ht15BCU02LRbPxHe+axtlQs5f+hF+7d/9cwJdsrK9xsc/8Ql+9Vd/lV7vFNfevMrSehtMRacXzBayiqqEsqhwHIeiLFlbXeX4eIDWPgKNJx1bj1rXrK4u27Bro2fRJTG4DY5oULVPVaY4rkKpkNW+Iu43HOx22FhfIQocpuOCxjh0V9vs7+xSFCXtpIXruvT7fU4Gh5RZSlFmuNIuoIGfEHghFy5d4u23rtNutzk8PrJ1uli9Y5pOiOOYsi6YTHPilo/UwSx2pZ4F/YcMBoN74Ah+j2avqdXCJKG1RrohgmrmzpUIqa2O25GcPn2aq6+/QRL3LesS1DSVzyPvPMPbbx7QmCl1rSlz+NGfPs1rL+1x/j7J26+1OTg4IAw63P/OFY4OU04ODGtrMePxkOFJSRBavePS0gpFNiHPKqKww2h8wvapNeJWm8ODESeTEW4jUXUDnqCuFY6pEcIlqyq6rZZNBZBQlz5ra2sMhzs4jk+RN+BZ6YjUEUKWeK5DEIScDE7wAx9hNGHQxkhFVmZEYYcoCtjd26XXXcLBMJ1mNvfRGKIwWRQMWCmBZTZtpE2+OO5zwGiUxHEbNAqBzY+cd14bLWfgol5ME6wr2m7yXM+G2QdBMKsk9JhORgDEsZWfSFy0Mba0Qt8bqc5NMXMHdJK07WZvxnzOHeHTabaISKqqiiiyZqtypvnz3BDHi6jKEa4UGB1SqwrcGq1dotAncD3SNEVgUKrG9b2ZaclHMmNbseeurmsC17lXgymcBYM7Zxjj2Lbv5EW2OMZzFhljI7CkcBeB+vMkC8exWb52BG4oCuuanwPMOWs5n9rEcYwrPeK2S6ZOCBLoL/cplWL/aEqnv8yPfexJnv/2LZ599lnOXexw68aI9nJAXpVcOP9OnnrPOb74hS+x3Ovz6os3WV5pkaYpvdUuB/sTqlRQFCme66C1RNWWwDBaoox9trmOJYiqQuAHlq3Mj364ppwfGlBe/JBrotgjzws2T3u8/8Nb9FYc0ibFj3q0lqd43hpZIegGIS/8huGL/5+7/Hd/88/zla98ns9+6XOcubzE0eSEsopQA5+kM6W9Ldh/U+A7NXHk4vqSspZM0xLHszsMoQ0ePie5hromDl0aPNotTWvZo5T2whnlgvJE8tgD8PM/8RD/5O9c4dN/I+TVb7T57i/fYPmiwVt1UUIwuuswvevTNCU/+6fXKI3hjdfvcOniA1x/q0RGKXeuVZhCsrUF7/7og/zHX/smpQbpgnQisrTAcw2+FzI+KHBiWytY+NBFkr4JqxdjmriNSg/QmUd/2aWUNV64ztbpLVJVkAcZj5/RvPD1A248l7HS67M/OObn/sjP8Pyzr3Pr6lVcH05dXiFlyMELin5HkGpBuw2d1iqVP0AnDfpA0Dq9yc1Xd4k6sLoi0P4qqfRp6ZhpekxbTinUCKEjwnbJzqGmUSClO2MNIpTOLbBsfNtsMcvAT1ON50obmSQdhLQ5aAhj9ZSuh+tJvKBidc1BoimnIbnKoPaQjkCbCq1sZubSUkxZ5qixpB4HDOsUP27TNhOMCFi5tIR07nIycbh9JSZNK9bPrLF9+jxxuyCSMafcFnfr17n7Vo/N7hJf+N3fRXiay5c7LJ0WPP/VipVzPucvtnjrBUPcytk6JRgfaurKakrGQ9v1PU2HBIFLUZQURUWv16bb9xmND0G3cUTLZjgaQ7e3jDGG8eQY03jUeowjAuoKWq0WYdjife9/nJdffpX+0jpX3niRf/HL/5pvf/WbfO53foud/QErGz3evnETnIrTp5cZ3RpxMipYXl9jND7CeA1bS+f5sR/7KX75X/5DwshFm5wkiSgLCEI7ypAiwPEqpnlK01hAm6U1TePbGKPU5seVTX0v4mP2YFNKgVa4IiaMBL1lqCtNNgmpSoMb5uSFi+drkDlVKTAqJIgaglZBXUl07qJNTRTFeK4gSZbZ2z3GDQV16TOtxyTSwXdccHwG6QlRWxA4EpVrSiUIAgdlajwh8ZyAItcEfozGILyGulZoYzWeeW4ffgtHrLESi3YnpKlqJuOCOOnYcZwqyacldQOdXkBR24XaiugNG+tnEabkeDhgMBqyffYcCA9XOuzevEnkuWRFivHtvTH/aHmS5e4ar13dYf10n//0m7/JZ3/r3/Lyi29x9fpXONoNKMoJjQLXE3TaXYQoKIqaPG2I4w5Zllnmp9YodW/RVVWNNpY9OnPmDNN8OuuHB+kElE2NJx2ksOySsHY6XDRRu4XSHr1lmB44GBrcaIRwu9y6cZs4DvGkhyPtiPEeQEyp6wlgNx1NrZlOM5aWlnnooYf4wue/yNJyF7Bd9o5jmRxlGnq9DkVdcfv6DusbazOXd4rv+5SlZTDnTVOu684ctz5SOEwmk0UjTBjGtiJSKcvcCWtQkRIalROGLRqV4Yk2WT5B1y73v2OTO7ePKOoRQkhQXX7ypz/A0fAqRwcpH/3AH+Gf/LO/y5lLHSZFzAMPbvP6a1dRxqfIHHrLmv7GgIPdEr2/Bc6YplGMh9YI02q1GI1SPE/YZpgyRQCNkKjG0A67TLMhcSeejccNTS144IH7SNOcnZ0dfN/mHyol8DxBEEQkrZCdnR08zyNKWkzGOa6wOmMpJY5vK17nOaHGKIQ2GGPX9iCI6PV6pGlKlmVsb29y587OTIMpFjrJOcgEqMsGIW22o+Nas5kxtW3pmbHGnu9ajaQfU1WzZqUyRwhn4Qj3PDtN0DOGM8vseHp+f4zH41mlpDXDTCaTxQjaxnrVC3A2Z73txoLFiLnf7xPHIVk2tVKIOKYcpaR6gidDjJEUdYpwXLRyaKGx6cwsNilyxio6jgWRniORwplFJYU0RqPrBnfGnBdFhevOJgDMW33cRc6l1nrBVtZ1je9ZJrgqm9/D9jmOQxRFeJ7HYDBYMMGNViTJPbPj3MhkjGF7e5vxeEpRZPSWe+BoJpMR3aUuTuBSNQ3HE03gCuoyQzImGwr6S21On2vTCi9gEoPvuOztHPDGK1eI45Dl1Zi4FXLz7h2oJY4MQYVUVYNSJQiDaqykwnMdwsiC3qps0Kai1Vrh5PbhDwUof2g/uHQVhorAdRkf1zz/VU0SrdPvreIGmpe/HfJb/26Xf/nfXOXv/Zm3eP7Vm3S7bd7/+AP8/J/8Wc5uuyh3TG9JIkYr1KMx62suu3caglBT1HDrtiboNMiwZFoYjkfaupCF4WhaolWN57rkssHoHKkcRpXBJ6YqBb4qWWoX+B2X8+9d4Ze++BRx0uX5b6b4p7s0XZ9B2ZAeQXVc0OuX/O1/8R7+3N/4BNOJ4eZtn6v7b9A9c8j4oOLiqZCl1QGf+iMXePXq2/gth/5SF+n4KFURJx7KQFkrPvbjj9Dr9ShH4OMQuponPyXY3PY5vVWSZiHDvED7OVErQApDcVLiDjKWp5Jvfa1g+4HTPPz4Fll5Ak3IV3/tK5zcPqJ7CoJ+iJeMaSYQhxpXONRFTa+1xd7dY1Tj4giJ7xnuvHZIcVSxknjkE8k4zzFGYiJNEVSYpI/qx3htzejQIS0FyjhoYTVAjuOQtB0cCWhBFIEjBUJqen0XPwBj9CLQtm4McdgnigVeWNPUmmziMZ0qcMDxFQ6CTs/FSPt3XgRGeIyHikBGOMuCzoMBbq+FClLGrZATFfLG94eEZoPzp2LO3+cSSBvCerB/yHe/8Qpf/+bz/MxP/1l+5P1/nelI8/1nv4cjXFq9AOU13Lw7JVmSTIcZ3/3cIcNsShRF3L2ZczT1qIsJQRCRFyOyfIDWtR2d9Fq02m0GoxG1atjYXmJtY3k2cnIJgoiyyJlOxggzY1KER+CVhJ5me/U8505fsI7VbMTtG7s0Gv7IL/xpXvzuy9x5+waNSnnHIw/RVFN8CVdeOOL9H/gE/6v/zR9lODpkfXmJ1eU2Wms+9/nfIEkipkN7Tta3Eqo6RStFkadsbq0hhY/n+nS7fapCIrBC6+2tM1x+8AGEK/F9DykFWis8R9JUJZ4j8b3A7lBFxd7dkqaKiNsN0i0oc0XDlGk6hcbDFQ1rKx6TcYFwIs5c3CJJEtbXTnHx3P0IYRiODkGUlGVKlp/QjX3a6yHhSoBwHHrRKqFKaHIPKbvE3YSiMWxunqO3skVWKYyrmVYDlEwxRix0W0VRLhitMAxwpYOUgklaMR5WSNel3bHnp6oqkiThyafewzvf9Q7qxqUuQ4q8mTUeaZSZ8vCT7+bSgw/Q6bTZv3ubfiugHbsoXdDtxfyBP/xp3vX4OynGxeIlZchRuUd7wzLx5/v3c7gn+Q+/9jX+5l//+7zrvRtkU007WsUYw93bJ9y5NaJIXaYTq/nLMwsmpQPtTmwDruuaxtj8xk6nw+7+PqPhxNZnNqBJOXO+RxBZp6pjXKqiQJkJTmRomJJWRzz5nsepdUqepxSp4sZbt9la24bGIUtzyrJkmk6Ik4BGlYSRZ6vphGAymVBWOdLRZNmE73zn27Q7idVQlyVRFFOWFePpxHYjN5BlBb1+m7LMmUxGeJ6zCIOeM1TzMa2Nbclp6go/8Ehm0TIIGygdBBFGaTzHp6lZmES0bkiShCyf8MhjFwgil+OjMU1TWpmO5+FHGV/50vf51pf3ufFmzj/+Z/+YsOUxGYEoKz73q1/n1pVD6uk+qtrneKfixmsu5ahLXkyYTkrSSYMjIwLfZ5qOCCMHpSuEMpjGoc4THAxJFCEdxdkzZyiyjFbbslNRFFCVsHPXms7qpsL1FVEQkk5zNtZX+fAHPkFTgXBcWkmP3oqPkP4sC7KFNC5xHBEHIYEboOpq0bw11+UdHR1RVRVh6HNyckJZ5rOg7gatbRe1EFaO5fs+rseMQXMXTuR5x/rcUW5mG5mqahDMR7USrSzr1uv1CAKP8XhMlueLDYkQgizLaJpm1pIz3yDIBcALw3hxTShlGUK7+Q4XRkartbTxX8PhkCAIWF+z3dppU9kINxlSIHHcCEc7NAiGeOimQTcKjJiBQol0fYyQmMbQVPWCMTTGIPQ9V7sF6d6CIQUWrGNRFAvjUVVVZGmOarQNSQcaVSEdG7MzB92TyYTj42PbKz4DuXMpg/3eZqFbdGbGIc/z8NyILNWMDirII4Z3U8Z3J9TDEj/fJ9/fI9sdQ95C54JPfuoz/NQf/HlqT/LmtZfZObjJG2+8ThA7VKXDZKS5ce2ITtzBdUMCvwfCQTgKxysJ44owtiyxQZDlJUVZg3SoS5hOsx8WJv7wDOXljzrGhhUbwsBB4NNbifjkH97iaDBF0uGX/18v8Uf/5Aq//Y9dXv76Ho89cYHPfu03+KW///d4681/zeFEMnjL5cb3FGcvBGw/OeXKjYpERTRuTuwluJ7i9u2CRoP0LN3qBwLZaKYV+BVEq4qVpQ71kUNyaoXpdELcwfZC62Oykx6U8NRnVljW6/zKP3yezrqGZYWThTAQaEexfh6C1hKv3prQaQ/xPcPZ+31WthqOd87z2MXP8Ju/8puY5jb+cog2OVXls3NnjOeD8BRNDXUp+fRPP8Jbu3vc/d4JJwPDo4+u896PrDORJ/RDhy/+TsH+gUaoEXEroHBd1sOzdFxYunia7Qtdrr7yKm9+7QbT4ylVIzBNiRHw8Z/Y4vXbd3BFwO5Vl8AU6MYjJycMEqpUsnnJw++XFAc+d24KErfi0uMVkxzq3gpGWUlA4QlCXzCpDknSnLsvKrzTNWVmiBJwhI+gJmmDQ5uj3Ql+IHG8gKLMiFqCpobp2CC01YEIqXGcmDBpaFRFGISoBoq8YH0tJkoysqnADx2qBlQJwtOoqsNqfAmn2aMIRyTSo6xcDg8d2u02veUed27ewG3GXHiXNfMMdrrkZcju/h7xkk+jPN559lGe/9ZzHAz2We46dNYioo5gOpqg6RC5mqYQFNOSaVDhlSH95ZDdI8NKYKjLPpPpTVzXs+dVOKSpIWl5KJ0ReD0mkwme2yHPSuKWg+dIWq0uR0eHSKfGcyKkgbVNze6dCa7TZmXlDBcvn+Xu7h2uvPYSrW6PtreEVNdxI4/rOxCHqywlAyYnGa2tTZ75wE8yzAb87m//BxLTYf30Iwg8TiYvUTcpn/7JP8j6+ir//d/8H1ld3cTxJ3z8Yz/Dl774Wfb3Rwgh6HQ6jIYFYRzQNAUbG6d457sf5Tvf+Y5t9ilLy1bOxla6UQjHLiZNY53Q7Y5Pb0VTFobRoKSuLcsR+R6dngvS0O70CFtL/KFf+Et8+fO/wmd/8z9x5vQ5vNAwnhyiGpe6kbiepE6n/NQv/hR7x/t89wvP0nXaDNJjGqmIopi15RVu3rzN0lKP+x+8n2s3rpOmVqivGkk6HViHZF3PFqaQMAyoqoqqKHF9nyKt8XyHXicgTgLqsqapYTSa8J73P0NZKF5+5XkuXjrDnVvHZPmQTqfD4GSCchtcxyH0fFxjtWrSEQihiGKfCxee4fhkl8OD1xfPRB+HOGpRqCFFHeOK89z/SJc49KnrMdffvMH+3gmrmz12dk+4/8EH2Ll5e5ZVV1FV98Zjc7YCrEPT9/0ZALOGBy/wKYoC1wNHBlx++BLZ1G5UTKPwfIemcvCjkP5qzHSkyfMRuskJvIiyauj2l3nkwcvc2bnLjRs3EELQ7XZ59NHHeeH5F61u1ZRk2dSCgsCbLay2jrXT6dDUNYdHJ7TbbYqixPN9sjxFOI5t4PJcG/kjnBn4sYvrD15vURRZtq5uZnpMjZoBaGMMRa4IPJ+mmY1KfYgTnzTNUaZB6hhkyf/6f/sp/tU//wKD4xIvUjS1ptc6RVbdQcqQ1bUeg8GYos5pdbq8933vJBYVt2/fYTKOuHXzLrXOWVtfZzSdUFcSVU8xxkGKueSgREhrkCsLhes3dLsdWu2AG9cnbG32OT7aJxt5fPij72J/P+fOnVsk/ZzBUW2bVuSEgz3r3HYdRRR2mIwtyGj3M/xAsL9npxqT4YjtUxs40rfMZuBQlbWtPVU21DsIAhzHWxzPsixnZsmKIPRnIKjCmekkm9qOwH3fR9UaxxUY06A1IOy1Zx3K3uzvFK7j4TotoihgND60uZOVw+pam8lk8nuMVHOgZMP6IYwtMzeZTFBKsby8vBh912W1AI5Wo2dH90FgCxVGkzF1bY93r9eZmYSsBlMbRafXZ+/WHUzjEHsBXqI4zkpiN6bMC/xIopTGIJGei9bNbANtwaPn2n5uMTOe/OB4ez6GnrcPzd32cy0p8HvyN23oeUNZNiSxT7fbJcsLiqJgTufNQ+TBAvcwiAlCa26dj82lcBeMreuGBJFlLasix5mBTbvOSrzOBJolPvCRd/Hcc89x8+4hFy7exyjd4/AwZet8n3NnznPr+m2O9vZxjGWMPV8SJDVloShz+/3CmNnEA4rUUKQBQlZI42NEie/F+IEgLyqqkx9u5P3DM5TGx6gQYRzqWtnWjDs5u1ddIkdxPN4hKAI++89L0nHN/WccsumQP/rzf4YrX/kmolfhOYZbr9gFeVSU6FoSufZAO8Kl3S+pypJe18X3IQwErtOga5uH1O4YQk+h8oS7N0tUNaTbKlhb2kYUCVEvJNiQOIHm7ANTWv2Kz/2Xb7J+ycMkAYqKdDzG1RpVCgbHOddf2yeRAwLfCuErrckrDx28zee/+XfwV24yFQUnR8dMRhnT8YAkAtMIHCSeA0IaXn7+GnXmcP+7N3nivStsnjvDpTOfphX/HBXv5MzZNvVBxdLyGVSnoee6vPKNq8hkmTtHA579yjXC5iLv+ciHUcZFGp+Jpyhd+Orn9lDpEjrr4ZIReJY6D7wuk7SgVjXDvYyDtzwODwYodcLypsPyqmd1i2kFVUM9PKHrR3hG0PeWacYOrW5DK5JsrXTox20cDL2+Sxh4oDRB6CF1jBEFcQuMNgShh5DYh7uU1hlHTZ4rWx/XgBAly90WTabxWaLfjZiOBcLMdHs6oNVqoauI/RtThjdKmtJH+iNWt2LcyGF1e5OnPv5T6FMrvH0jJ89TuheOKMUhlXLoba4gRMz3Xv0uDz+ecvYihIkknWi6fsT7n9iiFzdQ9jmeNqw/0OLCuRXqvGByMiQJBSJxmZS3cGQy07c1BF4PaPB9j35vncl0ONOOTnE9aLVCtk+f4vL9j+O7S6wsnUPVLsgpWVqzurLN6fN2xDUcTuy4RUlUlTE4OOKDT/8E3d4lkm6fpjjmdNel1wq4tP0eXvj+C3z5K79DO9nE87ucudCnEYd0e8u8//0f56/87/8ON99OUdrwqU9/jHe/5538h//4q7z7XR/il37pb5MkEUVRIN2corRB3nfu3OLzn/8Ch4dHNGWNI200S+AFeI5HEIR4QQg0uI6HkIqyykFH9HodotjHUYZ2S3D+Uo9Ot8/xyYSNjQ3e/+SP8Jf/d3+V3/3d36XX77N3sMvgZEhRlDOZhB2f1KLFl37rWV762hWmw5zD0QikzZh8/9NP4fmSX/iDP8foeEoxNFy+8CjjoylCafLxYBa0fo+lnO/0dWMXtLpSeKF1JuZ5jdYG6Si0KWm323zrW1/nu9/9BlLafubJZILrCZqmZmlpieVglbaXIB1oZEEjC4ypcR2HOm947lu/w+233kAKb/Ga1DXKqzACqAs8/yp3b9/ktdde4/nnv490FJce7LK0tML25kXbRqM0p06dnuna6oVOq2kaptMp0+l0sVDZl0JKh7puiKIYXUuqUvDyc9d46+oOQWBZTAcf14kp0ox0kOM60O0mLK938BLDu9/3EE5Qc3C0z97eDhsba/iRz2g64jvf+w5RK6FQJXG7RRBEi1G1zYwsFt3Itu3IIcsy2t02nm91mOfOn7Eb1nLOZugFU6OUoixt9ZuUkjRNFwC6aZoFgzX/WXZcW6GUIAxD/KgijC349p2IybjkzOlz3LmzM4vVsQy153kYOWB740E+83OfxJhZbmDZwXfbjE4Mr74sGE/6uF6flfUtwlbCcDKlaTR1M0U1LlpXKJMhZIUXSRotqWqJ4wt660vc99CjhMklNraXmBYjRgOPv/CX/ywf++gneeXl1+h2u/S6CZ1uRNVM6PVjPN/hzJlzhEnIODvAjQrCSCCFj2k8XOOhm5x2J6auNLu7d3FcyLOK+++/H89nwarleb4AdXF8Lxc1acUz4OjaiBvXjq6dWTC1vZ5sPA5IpKNxHQh8n9CPQFsZjOf6NI1lMhHaGqmMwvVgNBpRluUiymi+WZiP18M4wRjblGRrC43NdNR6McqeRwjZEbMFYFmWcXhoHf9JEiElHB4eks8YUCEEnutzsnfER3/m5/g//c//b973qR/h/R/9Ed77kx+lTgQ/8pM/QeDHGCwzqZq5VABcaaUyRoDj2nzasswXY+y5vncOAIVw7PeYjarvhbWD5wULEFrXDZ1uTBhHTNLpQtMdhPcC1OfA1I7K77UfeZ5nx+mO1bm2222khKrIqasURE2eT+n1Orieh8FlnAlWNs9z/dYh2jH8wT/8cVZWBY8+fJ6H7r9AWUiODic4DjgO+FGF40JVpyz1O3S7IdJRdLoRYRgxGRtCv8vq2qq93l1rmpq1vqNUTeD/8MHmP3RTjhfUIG0Woa58pHYoBvB3/+vX+Myf2OK/+m/fy91vfZdv/qtDepEgOgOD44wXf/u7nLlfcN/FDZytCe94P7z89QbfcxgPEurM0A4V47JhMkhwZIPrljYsVVtdXhhpMhxIJYHnMCWl7Uvy2vDq1dvEYcap1Rh0g6vPIVrXyUqXL/32IYHfIc80SoKfd3DdCcr4iGBKFQiWzpYkSUihDJO9krh22e77yLM+NzuaG6+lRFOJLiSNUvgOFEqxvtFjMi6odE071ozTCafcFXaOArYvOLzjAw9y5fbb3L074cylNd7xzgcZXq148/YOIla46Ygoa/jSv/8cP/eZT7Jr3uS//MpXWTlzFr+dMNzdo9fyaCpN4dakx2OSVoQroWw8ykrgyYIgiFB1QeAbqqYin4S4rmZaZLz2QkC/38KhZDIY0OQ+S+cb2+/qDDg+FnScFQI1ZHg0AeGxedbHyILjQ0WRVwhcBBM8Aa4rUQ22T33WF29r3DSqNPi+h9EVVeUhhaQUU4SBt640PPruDstrGkODLhvySoF/gONOWV3rYthkZa1LUy4xrW6yefEyr77yJvnBHdqbLbJsRDHq4PY1UbfktHea11+9jhk6RChaj7aIQofrN6Ys9yMiR5CfCMrMUJoBxsCbNxoSZ4mkW6CahgvnYnbujnHzNjIukVKwtrpFoxXbpzbwvTYnxxMunL+PvZ0JRqdcung/f+yP/SK/+mv/hmef+wK95S5Hh7v4nod0bSRGbioqk9LqnOH0qYs89/y3UFqile1HPR413Lx7QlEUXLrYYWWt4pU7Divlm0yO3uLS+kVeff0uf/7P/zn8Vsx3vvcdfMdn//A673rqMsPhmNPnlnno8tP8xq8PkJ7mmQ/9GO99+r089fRX+K3P/mfiXoBQoKiQs+YSIcRikVHK9gfneW5HP8pHaTs6cxA4MuD2jQmrG21cNyZuTblw33mE43L9jRu0egHPPvd9vvA7z9Jd0cTJMoKQdt/liSee4O237nDz5k38QKK1wbgZ6Yl9gHvtgChO6EcdjscnPPnRT3Dm4RFvvPwymWrwOh5u5LJ+ahOJJAwSjoe2Ezf0A8q6oigKy17NWkSEdNCmRggoqprhYMzqescupDX4AZw6vUFdubzw/BuEfhcpS5TSIKaopqJWFdKzOZkePlrDyUlKFIW4XRfPNOTje6acvIA8avA9F4RLpRzGhzsI0+LdzyQ8/60RN64pltanSNHn8GiX5d4aV65cpS4r4pZ1tjaNDaF2XYe6ulftOF+M5tqzpqpn+Yw1jhPRNNDrBExGU5oGpDclki0kDsIoHr78TgbjITdv7LHUvcBdN+Ptt26RdFocD0cz57C9r4eTI1zPJc9tF7Lr2nrMLMtwHDFjo2pOTk7Y3t5mNJqQpimdTodOp8Xuzl37OboBAVIGZFmxAI3zbue6rn9PrMpca1YUGY1uZot4TRRb/W5Zlpw708PzNeNBhlYBYQxlWfPdb9wi8LtojjDaMnBaOQxHR/zqr3yOPKs4feYCZXOd6djlG1//NkI4VHUGQhH5MVIElKXND6wqB9/xULrGkTFZUWFkTjArajh3tsvqykUODyGdpkzHOb5Y4d/+2n/Lj378x/gjv/An6PZ9GpVzuFfOcgcDpiNFuyVJ0x3Cdo4X+0wGijxLETLAc3wuXjzF3Z3rFEWGaiRBHGGMpqwqWq0WTz31JF/64teJw9CmZmgL0g8PDxdM5Xx86rr3xrbzYGoArQyOJ6mKmiD0cFxbu+jO2syoa6qyomkkGMeWNRQNRoPRIB2F47j3AN5sopHnOe12lyiKCMP4BxjPgE6nt4j2CsMQV9rzPQel8+BwOcunraqK4cnAfi9/bvCymw5lDMb1+Mbvfocvf/3reJMRK+tniM4vUxcFt966Q97k1KrGEa7VspYKz7egsm4qHM+z5psF2DMLJ/Zc//iD9x4zrnH++cYoytJmM1ptsL94L1JCVdvpjx94aKPQWi2Ov5RiFhVVL5hdrRW24lYQhB5h4DFNcyYTC6SDICbNCpAOQhj63imuvfEiymg+/Qd+kgfue5yT0TdZOVXx/BsvkU9DfCGAAq0EuJKy0Liez63rNq+2KjSOo3GMg+vYzF+jrblUG0MQO6A9fN9FGwX63pj+/9fHDz3yfuhjnjFOQxALilGb5aWGO68l1FlFb7nkxz/zFLeO3yIfNrz0WwMC2dD4AYUIOT4Y8PRHlwkfHLO1tMp/+HsDltfXaS9vsj++yVJcs/Fgyt1rkp3rFUYLpK9BGhrlIN2SxoXAD1HTgkYJvCAhrhuMrKk70I8ChvuK6YHLE5fOs3Eu4cXnv0cQJKikoPY7lNMG6RgaFZNW+/SXDN1OhOPkBFHIdJpz9n6XSw+3GB357O8NeeNlgUERaah1Q1VKHOnR6jic7JdkE83GVoRYKciva7bPn2J/GLJ6MeaRh84y2Rvyu19+lbOXFdunVnjzcwVXv7VLZy1Ei4rAtNhPJzzzo6dJ8oIXvrGHbCWkdY40PoHjkzcTet1VRumY7Q0PGSuuX5FEXk2uLMDfPueiioq7uzmdVo9kteDGWwUr3ZjVB1uc7B2A49I/1WJyt2H9ss/b365o9nM2L62RJAFvvnWbcw8mFHrMaGL1KXmR0umBFwRMpyWqsY0kSs8q01xLm+cFONIjigWNqsDYmyiJA5SyOxenlaNrl3bkIl3JoMo4v96iuXGKt/ducGr1NFlWQDRm+YLH4X7IaH9AM0pJNkL67fs5mJywsmEYHOW4nsf0OOXphz/AfeeX+ZXf/DXqosR1ApaWa5548N1899Xn2T59njtvl9weH9PtegROTjauOX+/j6oCJtcrdNtwcqgJIwdjGiYjgdGSpOXjyjbD0REIRRwlGG27rv/+P/gfORkc8lf/yt9geSXEkRFVNaYsNEtLHRw3IfR7fPwT7+Mf/oN/wwc/8jA3r9/AaJ+Dw2OE43Jqo0XHj3n1xk0u3NeCLOeBBz/I177zTU5tXwK5hu8OePWVK/SXYzY3tnn55ZdJWhFlDmVd0ekmeEHM3u4Jq6t9tGmsqc0xKFPgSB/0LJ7CcZGO3TGnafoDLkSF51oA0enZxWk60iyvrHDu/DZX33iVJN5CocmLExoyllfaPPPej/LFL3yHaTqmqTVxW4MOiIJl8mJMWZY47oxZdCRh0mKYZTz++OOcPXua9dOn+fZLL3P24gZ7N27w/a9/i/e++31879kXkE7AdDJiqdvhYH8Xz5sZiKR9yMdxjFG2maOsK1zpLNgSVxqSVmAf4tKhqCp049HuxCytSt6+UlAzBCSO7FHplFYUUxQV+diaX+67/xTnHzzN1rlVXvjKyyytbLN28Z4pZ/30Cv/5332Jk1vH+F7EKJV88MMPoLXkcPotPv2Jv8Sb19/gV//df8J1faIktNmFvs0SrKqGqtQoZd/LdDrF9+91J88B1zx8WSnF2mqb0XTMZGxNce984jTTyZjjowKla+pSUjcV/V6LLNNUdUoU9ghDl6QVzvRx9vyPx2PCuMV4PMaYmRtYV0hjjRFrK6sMBgOm6Zg4DlGqsZFcnTZFXs7Cs0vqpgBmDI6ZOVe1Y8enRswWdLNYeIUQeI47Y36aBUCxq64NzJZOQ1NL0mnFH//FT5GVe1y7esDVN3aR0vYn7+8NEY7G9RxUY5k0V7ogM1xPkaWKdjeinWxyd2cIoiLwJJ4b2vceBRTFFGEknheQF9MZgdEmr3KM37Cx3aJqcu67fB+T6Zgzy09w+9aEuzuvQqG4cPYyf/Gv/G1u3HqFv/Xf/yVaQYTrCYLA4/SZTQaDE8q65ujoCNUY/EQQJ4IoSpA64PiwYDrW9Ps9jo53MEaRdLozx/MUgQV7SRgwGAzs180MInOX8A9WNNr/b3BcSRjafu48L8GIhfGlzBsL2CJbDWh/BiRtSVlphicVrhOjdArCLNzgQWANUo4j7NcJ+z3zvCRJEnq9JaTrUs4C0l3XZTAYEAQBSZJwfHzM9uYWStWLv6/r2pYTJMnMFFYvphC2W1wstJlgHcjalHhOgOP6DIZHdFodHn7sCb781S+xst1heDzGwcOXLmVZ2EBzYa/pIIlnkUsa0+hFR3ddq8Xmba6bnGd0zsGlUvXCoLMYZTsSV4qFtlUpe53Pv4/vegu5h91cVQvwr5QiCCKqykbrJUlM4PloI2aRPjZto6oKNre3ODg+4PSpi2TlCVHS4wMffpJvfPO7XL/9Jpcf2mJvb8x4lOMIgZAGXRmaupplzCowjjV1iRrXt1mUZVkjTIgjghnDny9C2re3T7O5tcIrL7/B4Obw99fl/cSnImNEY3u4lzuM9xQ3XxK0PcmZUz1eeuUWj74vYfmRkNGdbd743ZcIQ6hDh+wwYeOyw9qDglvfh3IM8UrCYCTB81FG88jTBVvbki/95jFVEeDGA8rCw2/VGC1oUp/lSLN9rsdoWpIeZgzvNnTam3grhs65fXpOyLNfznn/05/i5//UX+Hv/o2/xo23X6F7LiXsezR1TH1iaHc93OUJIl+i0SMORcqpRKI8jQg8lFDoxkNQUk8C0CVSgBQeTe1QFhVCaHzhYmqXbFLgLnlIDF1fUHrb+FJyNLnG+fPbnLvwNN//1gvc95RiyT/Ll/719zi+M6azGpNnGjfTlI1g8+GQw1fHPPxQj1q3+f6Lt/DbPo5RdFs+k6LN9sYSg+yEYpLjlA06rHFFyLSqCIwhbLuUk5IzD/c4OXKZDg7w1zsop6RUDad6p7j9nZv4Z3zKQUBiJpRen45XcHyiqXTD9v0ReTMF18X1bT1amWqMcMhzhee6lLWNGErigKIoaWrrfo/jEGU0SlcY7EPB9RWBFlRGIoBuAp1Vgwlc1DjGGwrq8YRh4dFt+5QVmCXDqUuaYtAhr1ZZWmt47vm3cVTANC2g8OgvexwNR/yBj/00o1slz735n+l0WwxTw+bZAqf2CGKPjf4W03TId58bsbxWEIuIwZGh9jVlXbHebtMojXQU00lJEtk6TN8PGQ4sg9dqhUzGDaury/iB4c039rh83/sYjY8YTt+Y9brmOKKLdHPCoM1kXOHKFivrHkdHJ/i+S60a8kzhOS6okKI5otYObU+S1hV/46/+RX7jP3+JOzu3GBwdI2VMryPJ65LV/gVGowFrmyGj0YCi1CRxl2k6WHSmV6Vl9ZWxphOtjM1Wc+yDrakqmqbGdZ0FIxCGIVLM3JahaysOlz0u3LfE9bcGOHKNveM3iaKE0XDK449d5vUr13n80fexvr3Eb/7mb+IKSRBpXF/R75wF5XB8sk9ZKBwnw/VaFDrFFwGBaPO+H/kQk7BEacFHnvgE16+/wb//N/+Cjz/zFPk05ytf+zaubw0qhhqYLzI1jbbNHkLY6CopJVVVotR8ZGW1d0LD5tYKjU7BSAYnU7a3zvHU+x/i3/7Lz9PqS8qioW6syfXC5TO0+gGuozG1ophqbt864Kn3PM1TH3mQ73372xyflItn4vs/+KPcvvkq3/ny75CPKy4+cBGlcq68vsOTTz7D2uopvv3sd1k5dczxQcnhriTPMtrdaDE2xlj2BAx+YN2qVsdVL8KG2+02y70+OzvWwYsjiKKY0WjCudNnOHPmDC+//CIIRUOJLu2i6zoJlx+8yPPPvYDvQ+R3qbUdsfu+TzjT23m+1ZopZUeYllkMqQoLFCbTAUIYHEeS57Ypa27kaBobPB+HAUIapCsQ2MUqTXN8z3Z850U2A9KudbVXVrPXasULllxrjRfYc+75DeAwPFb88V/8Wdr9ilvX7/CFL3yfpd460qlYXl1FCMFbb72Fqt3ZIl5QFtBKuiQtfwZ8EqSbk+Y1rvAQTo7nhvbZ6xcYI2gqH20Kzpze5PCw4JGnluis1rzw/C5+2KbViTk5cOh7EQfDO/gy4UNPvZ/P/qfPEbfP8MBD76DdnvDbv/55lpY6SAc8p4fjCLLiYMZ8OTiBBTjZFOpKkU4bzp+7D+MWXHv7Jr7jE7cjqtLgeRFhJBkNrfHJ9TSBGyyA2FwqMXcNw9zRbCN+jFELQ40j7WSibjKkCXGdmHYnmY3OG6Sbc/b8MqNxzvW3hrRbPRqdonWDK9vW4e3bBiOl6gWgBWzRgBfQW1paZE62k4SDgwOSJGE0Gi00kkbpWWOOYN533TQW4EZRxMnxMXqGSYyx8qp5J3aSJFArpjpnNe5wN51ghGS7tcaZyw/wxosvcubhPq+/+iY+Hh4+aqaJDKOEsq7w45lON8txpcRYaeTCXW5mmZtzYLuI+1noLK3rP01TO20JA+q6Qhhj2UjXQxizuHc9x50ZCYsZU2m1p3MdsTXkWEY5TbOFrEGK2YZrFgF14eJF7u7uMElL1tbWWN5YoVC77O6dEEUBw3E+02MbwshF6sB2r2sH3CkYlzBI0HWDMjVGuxhcGp1iNASeHYWndcPW+lmQOZNRhXTs82JwbfT7Cyjf9VOuaYzG1RHFtOLayw5tfESj2X5ok2JyTOwU1MagonWUmXDw5gnduMuwHNNOOrz35xKef26HN7/o8fjjD7GnDjgaa2RTUR1pVs/6XHxHTFl6HA1vUjU1aS5xI03sO2Sv+IRxwJlHOlx7fg8HSS4kjz/6JO/9Sfjy57/H9Rd9svEJReaytLKGn6TUTOmtOAi3ZuduwJ/6cz/Pnf3Xef2V2wR+hyLPmKQneJFLXYyJag82GxwXfBfqzKAEBA5IFVJMCox2qBuB51tdisTBlYppZY/XaucUT37iMm/dfpGo7nFy0GaSX8fLfZ752JP8+r/6Eu5xSqZgogPawpC7IFXFn/5jn+bXf+MFrt3c5fJZn9VOyot3fdrtDe6/7xw3j25wcucAUzlkIifSIX7boS5rXM9mh1V5Q7cfEm/V7BcKtxQELZBNSHMAk6MpTigsdagFrU6CoyZkU0GyFKOXSjwFE6FRjcb3JPlIkXQMo1FMo20eqHANlTHIxu7afN9FyHlcQw3SamxcFRDKnHHosh3WhMsR0hEUY5dmxyPqVBzsSgih7Ubo0SH9iwl6bQmnPIfXKnj1uSt0usvQ1GSjCaPcgPFo1SXvfXyZuNXnu683qOEurVM5b77mcn5bsHa5pBootG4hnRLTLLF3cMJkVKGbkEZPufTIOdI05+TA5sxJKSm0IM8rPAVh5FmDgRFIIWZjnvaiMk5KA8LqLu095aAVXLx4kYPDPYLIY3g0YLm3zDQv0Ch6y+vcvvk2vSWDxtAULpfOfoDXXn8FzZj3vucj7B8eEMaK1658n1a8SplXaFUznZTEUYc0H9NfaaNmmqfRdLIQmlv24gdu9tmOWymNmWkP19bWODk+Ym2zz+7uLr1em62tLUajEceDKVHYZmVtjZOjY4Squb27z4/85E/w/PPfox+1uXb1Fn7iUTcFvucQhj55VtuQ/MZWqwlTEYd2RJSXOadOX6K/tsK1W28StNa4/OBjmDznwYeW2T+4zbU3dzg52md4qKiaEZ4bUDcGhHWFem5IFLUtIyNrhGwoUjuOAgO64dKlS1y9ehXX9en3+xgxpWkqppOa6dgabRptEMIhTCTZyKPWI6JOi43TW1y4fxUPj6/+5+9QT3M++jM/xmM//m6GL7+5OJ7f/uJVVs+c58VvfpbWSsiZ0+f54I88yZe+9qtsrV/mznXDc889hxCGs2cucf7CGb733LMc7Y8JAo+lXsLJyZAz25fIsoL9o9t4QTxzf1YzBii0WYezAPiyypASWknfNoSsbHOwf5dWF9AxeTWyRp5a4MiY8+fP06icK1eu0Ot08QPJ8dGAVqtlhf6uxvc924jkurSSzgwsGobDIcv9JTvWLFIA0mm10K/WdYUyerHwLi0tLVywc2nFvEkkDMMFA5SmY/rL60jXoaoG6LohCtqMBic4XgttajABvt/Q1ILLly9z7lLCKy+9zbVrB4scxKWlJVzXZW9vj3m1XVYUhL6/GKXPNalCCGtqcrkX5q0bjIEoClCNg+869FZW2T+6w4c//gFu7VynaDLaXcGNq0MwHpHwOTkesbWxRjZSCK3oryxzMtln82zI268fk04M/f4yaTbE9+UiF7KuQEsFChzpW7lSYzMbj44OqFSFJ6320D5XCnrLvYUDuCiy2QjV1vsFrnVr9/t9yrJkOBziuMw0kFYb6HpzU4i0WYONIGlFXLi/Dc6Yt66MMI3D5voFGj1kd+94UY1pjCHP5q09ZsbOgVJmAXxgfv6tTGYyHbGxsYHnOYvzMm+LCUOr9yyLGt/zFqzkZDJajHwDP+JkcLTQF7quS5bZ+Kk4jskK2yPe6/VQjZ5JeZxZFFWAlDDJJtRNwWOPPcLdu7cZHg9nWbqGRtnfv9VqWUCubGC8F1gpgZg9R+fAfG7ScVxhga641+89B8VW+9vMpgmzNUCz+J0nE/tMDkIfVTd0Op2FhlRKZ6HhVEotgOf8+Da1JowTHnjgAY4OT+j1XV568RqnLmzixYrDoyOCKLZVk4OM6SQnCH2CwPa/e75dh7TWlGVNlPiLe8HzIorMsqnG1DYOUCpqIrphmzAJKaYHHGVTzL75/TXlTD2D63k4CIZ3PVpuSYOhcEsme0c4jsPeUU5tYgbHnBcJMwABAABJREFUB5w/t0Z/rUfWaDw3Zu/2iLtXFVu9SyBqdvdukQQxSeDjuS7JpmCaerz2gktVBLSCHio3rLQ8tvs9xIkk0C75OGcwGLByRtBdVyxvptw+eImv/Poekz2FqcZsbsT02g2tYEzsTyiPmTnKApq64Lc/+zVe/v4uabHH5mmX9c0lWtKlHUSsbPgsXdJEgYRKkg1teKzvOwyOI/IUGiUIwjbSaVB1SOBHGF+hfMnqWosHL6zhmkO+89tvEsgzxBcmnD93g47fxqlbvPHqNTYu3UforXC67dHuS1RU47oVRS557e2c9ilFb7Om8SF3Vylzj2c+8gG0hk5nivELpCdxETSipCwawjDG99pMJzlJKyZLK9Jpg+NpKlMT+REqs07SpTUPzwtwRUw2VZRZg3BXiLdD1NKEoqmZZjWR1ESeJjCKGkNWJAhZ4CcG4WpkLfBLd5H5VRQl0NhgY1/iOgIpNMptuNMYROOyOwxRjUvS1ySnNceHBdfedNm5mTLda4jJeeABwfWXU6Z3hmydjum1L/HOh99POWpw3SXOXb7I0kpAnk8p/DE6foivfSfFCY74b37qj/Px0Taf+UNPoyuH/ds+edpibcswHRru3DjAFYCuKOsxWglM45GONNJx6K12kCagPswQ0xzX1ZSFZjAYUNUpSI3r+PS6K3iei+spkCWe55LnBa1WG9932Npe5dbtGwxOJhwfHnP61EXW1pcJPZ/JqOJw7wDfjanKhqa27MKNu99g+5zAaJfLD95Pp695/bW30GXC+558hnc+/ih/8A99hife8wjnL5/lZ//IL9Be3WJ1dfX3OIatEFwuxOD3woLrxcJujGE0GpEXJXlWsrqyznSak6YFw0GKUoaiKLl14zZ5ZhgMRjz4jktk2T4//qlnOD7ZIU58fDcg8F2W+is24wxBGPq2WUgbBD6TYkgjc8IwYjQ45tabbxIpiT7epdq5S7ed8crzr/H1L7zK0d4dDCcYY/D9HtITCGEz7qIootuPMdh6xqZSpCPb8jAXzrt+yO7uPsvLq0gpmUwmCCGI4xZJktBf6liwLTV1XbLUXeMdT2zhex7NJGNyZ8LNFyY8ePE+fuoPvZveaZ9nX3iFb/yHLxCF7cXr07/4MT76zHv4qZ/6Q4xViu/EfPW/HPDK92q++sUrfPvrz7K9vc25c3azcvP6Hr7TmoU7a/Z3LatQO0ccDY9Rxi5C83MIzDYGc5DmzP4sGI0mIEvG6S3CWNtga/deQLTjCKqq4M6dW9y9e5ckCm2moRa02xbczwFfmmZcuHCBVqvF0fHBIlRaSsnJydBuFL0QjKTTaeP7s7YgYZkvKRzCICLP85mhQSxY0HkW5XyMX1carQUePuU4RzSCwEsYj3Jcv43SBZ4vKIoptcpptT2uvnGD5559i1s3DvE8B993Ac1wOOTg4GDxnpumQcJCB5dl2WIcPNcQay1RjaSpBbrxWeptgnFxPU3V1Fy/cY12p8WNa/u0oiWmoz3qVNIK1xF1G5o+73/mAzzx5NPc3dtHxposLxkcTbny/AmTocBQMxgc4so2UgREUULTgJAaXWt81yYrSASXH7iPw/0Dut0uqrpnzhqPxxR5zvHxMaPR6AeyGZfodrvcq8czjMdjRqOR1bwpM2MOrSaxKGq2tk5xavs0ZVnhRyV16bJ7u2D9dI4jWjz6yLtA5hzMNjpJEiHErFt7dh26rmXX5qzoPKx8rg8UwkqdWq3WLC5nsIj/abfbRFFCEAS0ks6iPScIPFZWlmi1WhbUudbssry8DLD4Ob1ebyH9mDvCi6JYGL2CwJsdsyEHB3v4jkuvu8RwOGYySWebnAZlNI4LcRLMtKXMGNKKMs+IosD6BFSNkAptGoQ0IDRlXc6ylhVNPcuUbCxIE8LmsXquT+AHVnM6kyLMNxNCCFSjEUIyGAwB6zyfa5bFjKRwXWcRTwQspjK7u7u4rsvNaycsLa0yGec0tW0niwIfrUoalSJdW5SgMdSqoShrlNFkRY50bf6p1hohoaymCKehrvMZGHcxmcHXMK2OGQ4HFAZaYeeHhYk/PEN534+5RlUKiUPL+Nz+fkgjapRTs9RxCCOHWivSvMHUgt6yxPV67O2e4EtFMdZsXFylqhV7NwY4wkF5ku7GKnEcMUoFeXGCEB6ba20ccjbPSPZ3UpTqs/fmEa14xIWHfW4dNWhjWF0SmLomTQOMr1hZSxgfwWQ0xfc0rhPhBjAdSXrLtoat0qCNgxc6tJYqXN82t9QTKMYtkIrOqQpHKPBAanCkQ7hkuP6KoRUJfF+jCgfpKYZHEMc+rlsjXUOVxzhxxZ/8Sxf5zpf3efnbBYNjB39Dcv9yzO3bEacurHCxK/jWb7yOMYpJ2XCoNG1i6ibD6StaHRje9kiiFo0uWF7a4oHHnuCtl7/HtLhN0E64/nLO5raPnwh2b+cEvr2Bp9mUIDDkqaFzGupWjao9oqbN+LbEETm9nkOjCtK0QpVrCC+lvVXgJIppDe0Qllpd9u+OCDuQFYJBFoNICbVDWii8AJJEUGWGxtjRhXTAdW3JVhDaXlTPDcjKglhF5H6OHwgC4XD20hpukfHsfymQUhMZl8E456kPORwc9Ng8v0KoJMrzCM8+gJrWvH3lBUZpxoX77ufu7bdQZUZaKsa7gh/96BO0g5KXfvXbfPzcOW7dd47PfeEbbJ1PSPOCJAlZWTcU+YS7112QCrARFv6Sj2msGN+PQg7u7PDkQ09zfDTmhavfoxUkNqsvdNEajBIsLy8znhwjHXvzq8bg+3Yk1el0uH37NqoRuG6AHwiW+0vcf/4xvvu9r/FjP/4pXnr5Da5cfZY4bFPUBarx8TyHpOWyunyBw4MhZbNPGLTpLW9x6tQ2Rms++WM/zpU3XueXf/mfcurMNp7nsb9zmzTPcV0XpfTC9cpsgZ07DcuyWux+HcdBmllAb2CZkTzNSPPM6uOKiqJu6Pf7pNmQVhTi+glnLlzk+GSPN1+/SreV0O6HLPdPMRgMODkZUJaW1RDGkKU5ruvzyac/iJ8IvvzNZymaAj+JqPKa+y9dIgmWmWYpk3QPPzBcvXqTbr/N6ERgZIkX2I5p3w8Ig5jJdITExl1UpUJKO7KNomgxcptnVOrGBqJvbfdtEHNjSLMSITzb/bvUJU812p0tkpWht7zE1uk1Lr3jHEkSAZLJcJduJ+b41j1X7cd//Mf5zGd+nq/8l1/nb/3tP0FWRey9nfMP/qf/gX/+z/4ld2/lnDt7kZPJXa6+/ibg4bg+rS6kWQra4dEnTuPHFa+/fGwBcWWjgsQsFNpo+77nJowoAYGL0R5GW7Yy9DrESUjguUzSjKLKaWqNVpa5qeoc1xH4XowxalZxaF23URJydHzA5cuXqcqGnd07hEE8Mz2B6/qL41lVla3gK20WZZ6Xi3iVMApmES2WrVJKLQK156DDGIORBl1XCCNxHYdW28PzQybTkkmW4uIhHYPnxly4b4mTkxPuXE8xNASBQxgHMzNZOXMQz6612fU9H8POjUzz/1ZVNasptO1UQoARPmVhwUO37xNGgjJ3GQ7HNI2HGwoefWKTG29MkERcOHuZ2zuv00o2ufzYec7dr/mX//izTI5rRNOl3dGcvtjntVdu4YgQpS3LZa87geskNoYssLV2ZWENSjaCqaCoSrIsx/PcxabCC4MZGDazasXOwjXv+z6+4840sOqe7nDWouO6knbHArwiL2kaTZwEGGab36yh113GcSUnxyM8X9KQ47k+dW2o8hrXseH3jaqR0hYeGPODjTDMfpYPs47seYNV01g2bn5f2uilGtezm7wkinFdC4p7vR7D4dACME8uANny8jJZli6c4rWy9/nCNS6cmY4zX7jejTE4rmQ0GmGMIY5tgPk8TsnzAmjsNZq0YisXmBUmaKWo6+oeQJqBQmVm/ee1+T1RQ/PPmesq5/fKD8YN+b5PVVnG2MyeU8AClFv2NpwZnLLFz5Sz1ipHejiePzMvgR8LGq1oNASRZeOzLKNRFU3l2KBypXA8m3ox12EKIQgSG0eWTnNQhjC02m2MdYWLKsCLFY2jUSea3qkudaEY3Z38/jKUrcKh2/WIlxThZs6FDxnqYEpTCLSw7Q1KF7hODVowPjHs3D1ASIWRCjfS7N48ZP/WEZ7ngBQ0ueLkzgFSwdp6n/XNHlGi2bkz4mSo+NCHP4NiE+VqLr3rFJ1LMbtpjggkXj9CEQCCqikRWrBzY0qtRqxuOFRFDyNrXF/hx1OyvAST4HmSpKsQAlQVgXEJWtDalHjtKY6rSWtF4iboEiAhbXzu3jK4XsB4rHFcl8YokNBfdpCOosGgGkkrKqCGf/J3r3PzisNTT65z+kwPNQq4fvuY5a2SazdeZeRv88GfeQ+hbJhKl8CX1NMJulaYos3xtQRfJNRqxHCQ88ST7+bK6y9xdLyLND6bSxf4iU9/mKoQmAbbCFIKsnyCI2MEEikiyrymHSzhBxGmAlUO0Y3i8MAK7NdWQ5JWTe/+Fby1SyRxl/ayix9IpvWEJhEUTgCewTUFnpQYx+Mv/oU/xbnzWyjXJVqFppYYA1UJdelitENVKeIwwndafOSJj3C538fLPMjs5uOVrx/xypdtZaPXrpCyoLPmc/d2yOhgiEoLskJyq845Guzw9u5V3MhlYyPipRdfh8bj6fc9TJF69DZjRvslulzj0T/60/z76Q7Xb1/lwfc+wDAbonXD3u2CO9cMUbDE1vYK/c4qjjRIWSFTQZ9VvNSwHMQkUYdc1WT1AFeAoZplhgUI4eCHkml6hNY1GIkU3uxhosiyjOOjCVp5GGOdfFEUMBxlfP5Ln2P79DmuvPEmh4e7JHEfz0vAeCRJwGg05kc+8TF+8b/6WXr9hOODHN9rc+nB+3ECwXef/R5/4c//Ff7tv/l3BKHHnRvXOTzYhZkeaT7mtk0WzuLhBizYhvlIpa5r8qpEuPbBNTg+oSwroiBiOsntAhSHgKauNKNJTr+/zK3rt7j6+pt0Oz1UA0nQYW/vkN29uzhuQ68TI4yx45cgQJuKu0Jz5XBCYSpiP0amFRe2tui2VvnK177Kzs4untNnf2/IUn8JTEQUu8RJiJQuCE1R5AyGRwSzcVjTNLie7Vi37LAV7juON9N6acpasbG1wUc/8qPoBoJQEAS2gz4IPOrCaqlCNGudNXzPoyzGvPbCa9y8sk8sVkj3DXHY5mg8pLvpLF63r7/CV599gS9+/4tcOOMzmlT85E/8LB/9wC9yavNRVtdb3Lj1Om+9eYPtM5s8+b53cP8D5xmNUhzho4zC9/rcvqapSpvXN18Y5+YVuyGwYNf1JOgIpQSaFC8StJJlPLfFZFxwsG/ZRCvsb6ibAm0UcRQsWLr5uZ87g+cs3uBkNKt+1ORFOpOv2M2W5/qoRlOVNVlR4vjeLKjcodNt43qCpOURxd4iDmU+6pw7fufZfRiPWkmKUpF02qxtbhO12xR1RV0ZfD8mz+1I8+TYlhBEic/yyjLSM7ienGX/5QvT0vxa/kGTylwDV5U1Ta3wXB+BJI59fC+k1V5l61SXBx8PaC/B8FiRZ4o8HdNudfBcQS9pc3CzYTpsePq970TVI/buHDItrjManPDs147QTUy3vYxiwniSMZmMcF1BlEiCELQSlIUkiTtIqWm3OwRBQBxbDWPdlGRFTlGVs/F1j16vt8iNNEYRBPZ6DwKri8uyDEfaRiU/ChcVl/PrpdWyQK2qKsbjsd1cRQFrayt84hPP8KnPLLF5KkYry3Svn9I8/aOGpilm+ae21QgpZ7E7FiRl2T0d4A8CprnMYR6BNQf087HydDpd5E7OP9/3XYoiYzQeLH5PGw0kKTJ7bvv9Pvv7e4umGa01/X5/cW1JKVG6oapLXM/mhpZNBcIwHk1IomRWpaiRwgUpcJ3Q6hOljTaqSltBaTdOzu+ZDszrQY2AJGnTNPZ9B0GwmPJEUUQQBACLTM0fvAcs4LRf4/s+nhuQZyVNrRHc+/uyLKjrasZ8zgFrs4gzcl07Wk+6LhfuW0H4JWk+5eRkyOHhgMmoRM3ApJmZ4dACoyAOo1k8kZhl+uacPrNJd9lOehDYDN6ghXADzpzd4PzpU/zojz7D8a2CpPPDkY7w/wdD+dgnpRG+QLoCJRWtpYDhmz3e/tYR62tdZGCQMsNoxXTaYLRLHEbkZYUyDa700I0hL0qkK6lrjTQODS5rW9v4qyFG+7aiaixJszucO7+FxmH9rM/B4S0OTw7QqY8jDdN6TD92WY077BydIBqXnVuK+y4LNk+HVIVLUzmk2RSjJZWIKOshdelSayvE18YjjF02z2p65wK8YgyVh+7C4fUa44cU45qjQ0VTB6yuOjRFTpKAI0OEbMDUoCRp6mGo6IQWYCoC3n7OkA0cfvSPfZDKHLFz6zZ+z2NJtXj9m3usnF/CqJLdWzt0llbR44ppLlHOBB8X30uotaLf6fChT/wov/6r/4yNUx1uvD2gFa0SdSRHh0M21vrcvLPHuVNbDMb7COkjhEtVlKyf96mcGK9dMHgrxykEeaGJOy2mwzFJ0kaLjPaZDfJ6A7n7GqbXUI8NW+cD0mBCpNroVOM5FSNHk/uKB9ae5uTkhP3J6/S6sHNN0mhbQ4k0CFETBA5GgTQOP/2JT7D7UskL16/S1IeM8wndfp9qklNnEunUfPzRiLGOGZxohD6mdFy0u8Hztw7Y3jrDxUfWuPPmDkd3dym05tTZUwReyvGJxEUzfnuXBx96kp//Ax/nr/6f/w7vvPwQO7sj6moPxxNMhiVKKzY2WnS6EXmqaXUcxpMSo/qEfoBSKUpXTGvNqKzQpmS922J4PKXMagQSP/AIQoGZLWhK2VyNe80gEq0EruvPWMGUadrg+w5GNgR+m+GxFWCvri5TqSkYq9tptxMee+wxHn/0af7xP/17fPIn38MLz13l5KBimtvxnudF4NZUTYlqXKQTsdRpLcZAcxDZ7XbRWnNwcLAAm/Zh+XuDem0vu/0a3wtmUTGWzTx9ZhvX99nbO8TxbQyHMQZ0RaMlcWCbUaTENkfpChB4bsTK8jo3rl+zRi0tKIqGVivkZDTm8uXLxK0IR0Ycn+xydHBIkljJxt2da7aTW0mCILYmACVwvIYodNC6IUtLHCdAaBt9Ipx7OZUgZ41GFuALIVCVptX22NxqkWcVZe4jpMM0HVBXsL6xQlVntsddJBR1ao9hfxU/CHjsPe+kMtd54v3vXjwTj28NySuXUuwzuv0yV14M+eDTH+Hf/Kv/gOsXeGGKI2OM9smKgu3t02xcgJvXj9jbGeO7gg9/8JO8+NyrHJ68TacTUxbYKBExA5Xcc677gUueWYZleTXiYP+QJFqjlTik6TGDk5KVlS6gyMsC17FtKkZbx2pTg+vB2uoGJycnFKU9z9IVFHlFq9XB9aDI7c/QGjvia2yD1nxBr6pqZqwpabcjtKktC2caJAll+XvzNecRM5YhlEzSKf31JRpdsrTcI/A8bt+8SRK2qfKGtfU2ZTFjcExFOrHfQ0gbUea6Lum0QAi5CE6fA/DFaH0W1G2MsSHd8w2I08I4h0ymGkd0aPUKotglHTu2sUiUYDwqVWGMwDUJXphz36VzvHHlCsoI1tb73LmZkkQrnLvQ5sqVO6T5lJWNPtVUo3RmI1tMSKsdolRJPOvczotqwRLP77/xeDpjU+36N79Pm6bCDfzFhKGqKjw3Whg+oihCm4amrACrw5tOp/iBNzMSthZNM9Z0pdjePE3SzXjuO7u0OpZ9j9sVynhkaU0QeBRFRTFt2NzcwPMCjo4O7Mh7xnzVla0ynDNm9nhbZnQO9ObGoXlXN9gkgyiK2N/fZ319leOjAzY3NxmPR4wGQ5aXlxeu6zzPycuCKLJSCiPutfGEYThjyPMFILOAN1swo4508T0Px3XJSvv1QgjrGjf2d478iDyzhhu0QDgSOYsRshsSy1pmRY7RLK7p/2X/uOu6FKV1tqta3QNXs+uxqfWCwbRyFGdxD0VRhB+4KDVjQJtqdgzdmc7RjsMd6SEcydkL53GCgrev30TrgCD0rUs8L6nzAmYJHkLbNjsjrUZXuoK6rmjN2q7SNCcMIlzX1ltXZcbKyhLpcU3tjQjdLf763/pz/N//L/+IvdEe5c7vc7B599SjVFWbUmnCjsO1b8PuG/u0uw6TYQpkoB2aytLg0rHaA2EkniNRWtPoeiZYtWyWFgrXKcmnxyDaTNKcSpd4iSFqhdy6tcPe3lt8+XPfZufaCZEbIvUYlZZEchlQ3N0boVREVhi2Lxm6Gx6jqUK5Y/Abwp7B7whqPaTTjWh1Y3r9ANf1aKqafFLx1ksV0xs129vLrF5qWDtrWD7rICjZ2PLY3gJhGjzP8MR7zlJUBpwGLWr8MEB4mjjQ9HoC7TSMSwcRwv1PVmAy7r74FhtL5/D9kKPDETcGR7Tvy/D6+zz+gQBHBoxPCkzc4CaCVhzM+oux3cuy5nvPPkvoQVkVCBkxGAy4dWeIH7l4bsjycpdG1XaXIkuqEqJEQC24+/YBIjcEbhekpd/LYoITaAbDMWUh2b+2x91rrzA+SZnuOxwdaY52C3yTkB5OcHLD4LrErQO6LXjtpedp+yGbK0vQRCA1ujEURYURBW6gZou6Q1Xn7Awy2usbLAcpnoRlBeq4pBdDq9vwrvsEk5HisOxwm5JkawljIqRj6CUd7l65xu0rb+NSU6SaVhji+Ar8VZZWY971/j7P/PT9XHn9Zb76H/8h73u4TVndwHeus74EgRfhh4azZ7c4c2GVqq7JC8X7P/gA3WSNdihpd136nWXeed87eXBznYsrHTbay1RTC260sBoc33dxZIDrRJSldU17vpmJuDWedOi2Q7RJCSM7Dlzu9+kvx0RhQpbmtHsecSLIy8w6OF0bXXL27Hm+/o2v8Y/+0T+yDmAVceeGFZivri1T1RXSaWh1HFpJhCtCfOFZgDCr+JqzNnaR8qiaBsS9B6AxesZiusRhMMtytFVgURzatg0JcRJyMhwyHJ3Q6kgu3Xcax5eYWuEIlzgMaUxJFBn8YMZWNJK6MrMddYrnOaRpzmQ8xnMVdVXx6EOX0FXGs9/8Hg8/eIF3P/EEZ8/dzxNP98nrQ4IoxPU9hJDUjT0+dV1SFPmscxdarR6qUpRlzhPvepg4ThYLixXL2+Mwb+WI4xZ1rUizkjCysTRzYbrnS8bjKccnGTWKVJ+wtNGhMSVG5jhehfAUrmd48cW3Fq9rbx2AHrC+vsq1axnjwTV+4zf+NctrJUm7wnctC9Rf9ljb8lE6Z7m/RToW+L6HH7l8+SufY+t0yOkza2Rj67KeGxIWmtAZW1WWJZ4PdW0zOB999FGWVgJWtkLWNlZZX9/A911c31tU84HEmW0mFjFEzTw3Ty4yJsMwpmk0qjG0WlbnNhyO7O8wW0rsSM6aP4TR+L7EcYUNonaYgSDrFr/HRPkLRqeqKqra6rs7vYSzl84iPIfxdMRjj14miS3zJLDaS8vuGaQjaFSJ68GcALEVonLB6PzgWN72RQeLRV8btRiZTtID0okmDmI63ZAyTTi86+GIGOEUZKmaSUFcQi8kiDTdvub6javEiaDbDagqRTvpsLWxDU0LKTNW1iPe89SHWFnu4hDguQlxHFKWKdIRdiJX2zq+PC8Zj6ekRclwcs80ZJlXC5Tqurbd1bOGmbKsZ+fynlGkqgvKLF+8f7DVf1mWLZ4FcXzPRd/ptDg4OOCl7x8TJrPMRVEzHtWUhUaYkPEoQ5U2xshKSfSCEWxm0TrAIj5nXtc4D1Sfv+Y/+wfNWHNtqCMk+7t7ti++qSnyFD+QGFWidE2aTWhUZXMkZ6N1z/MW7L1dV8SiK3symSwYThtf1MNgdY9mBrR911tE/yhVImRDmo0worRRWR4EoYvAIc9mRh3Pp6obtDI2Aot7bWLzZ8ucnRXGTj3mx2NebzsHsfMczjlrWde1PTbS/B4gPN98zZ9fRVHN7s8QtKEVL3P7xgBhEpQyVKU9FtooHN8+3z1pn/HGGALPo0gzoiDEczzSsaIuDI6w8WKWAQ/wvYjxKKe/towbJERxwV/7P/wSk+E+UaJ/WJj4wwPK0xsfYG3zEZzQJULg5woy7ELiGMq8RCFABjZUVAeMpinC0Qhdg6kRs4eOURIaD4EgSUJoCo5v3yHQAtdIBAWB18KomHaywYULp2iHMWWWsr6xjRs14Iw5PjHUKI4OSlotl064zpXvC5ppgM4SkBM8tyEIGnpdadF/MyWKfNotWFuFxJWYwlC85nB0d4poBSx3eyx3AmLlkE0KWqsxrZZkdFRw8/oN4ti34bcuKF2iFaigpsgtgG4lLnUhcFsRy+ci7r59nf/5//br6Okq952+hKSBoOb2JOXZa0dsvbONLjPyoqGpc/LU1ubVpkDKKUWpuHHtDRwZououSuecvbzNo0/dTz5p2Nnfw3MD9vdO8NwESYTnN+hGc7A7oRV0cCYtsnHO0uoS3W6bJOwShAFhIkmSkNW4w7LnEq9ErPRqlrqaauRQ3HWpDl1r7ulucv78uxBZQH9N8sZruxzfaiOkw9pymzB0CQJv1r1rNW6WUbBu4jEl5dGUVDv4Pej5HqWJaPlLfP/2EjfVEi/efpOB0rx24vH6QLFbSGrZELUEo/0hYSBwQmhKxfR4xPHeIVXWcOPGkNv1kGRNknpLNG5NUxS40Qobl1oIOcaRIaPpEdfeOqYoXGoz5LvfuoM2Cr8VIxqX5XabpspwpU+347O06pKpBsMsiFZAUVSkaU6el5SFQkoPx4kZDDOMdhlPc3BcfD8kLwvKqiIMC1RljSCu0yaKDPddTkB5tJI2WTakacbcuHED1WiCqMFxDb/7ha/gBhlX3n6NrKjodDaZTKCpPHTp4IuAVuQuRk3WcWg1TEdHxwxGQ3q9Hr7vzyrX7EjGEYJ6tlhJKcGR1FoxHJ4w7wD2PAeENXxUpeHOzj6TSUYQ+ISBQ9PYTtyqamY77oYkaRN4IceHQ25cv4PjeDzwwGVWl1f4M3/mz/FX/tp/zY/95I/x4z/1KdCCJx59F8e39nntte9xtD+mzJtF1Z0xGq3AcQTtbkgrbnF4MCabavJJRVXaaruNtXU6nR6nT59eMK5VVduUgdmHEArVwGjQgHDxQm0rXYU1sNTTlHbo0437BCLg4csXUGVF7IU89OgKh7u7FIXP3tu3F69WJLn61j5f/k+v0Ew0fmwjsuaxH0pVNConzxoefvApLl28zPPfvcpocEzgN/h+QX9VUXEAzoTuckw6sRo71/FpavsgT9OM6TRF4KBKu4gPBgPe8cj99NcNTzz5MBvr72AwbGapCjNmpNFoZcjzkqpsOHPmDBsbG6RpSpZPF3mG84zLOWC0juKKKIxmpodgpkGUqKokDq1mTema6dSO3TAeTS0WdXoWxMeL620+8pZIsklBOszxtE+TQj3VNAVUuQUqw4HV3ebFdHFdh6GP40g2NzfxPRsY7vvhwgkMFmzGYYAwGt1YMJskCUmSICTkRcb9ly6ysb5NUWQIUsKwJAhqpuMTVGHfr1bOQsKS5WNct0WYOMRtH2UKolZOa8llkO1ya+81wqDFufMxt6+9wK2btywgdzKkNyYrK4rMdqM32rY2ua5rQ76FS+RHGCMwSiOMXvxbFNkubHturOZ5nl+Y5VOEUZjmnuFj3o1eFBW+Z/MmT44HDE6GONIljm3MVqMb+ssdBC5K2ZSMKOhgGoMwisgPF+ccNKBptVr2fEbRIurnB7NSLaBVi3M9D8GfAz8pJcPh0MouMqvzXlpaIs9Kbt+8hZQQeJK8GIOu8KTdEHquNV812pIT3U7vXv7j7GN+/UZRZI0tqR2X93odHN+hUhVVXVDVBUZpqlwjpcvq6irtdkiSxItrZ/7fuUQI7smE5gDWdX2UsukQjhPYP9cao+00QQhwnPkxsvKjVqtFFFvDUZZPqZtyoZmc6z4tUNczpjXHGHvcWi17D3W7XYIgIM2GdDpLFsR7CuEqosTHcexoXjiSSjU4voN07HpQK1s/W1YNUiiaSlEXgiRYoi4E6aTA8x38QKBcxXiSIkWXj37gEZ555jHywQ8fbP5DA8rrb1xldeUyXfUIo6s+nX4PYmyIp3Qohj5KSJSbI7F1icaFQpV27CkEYdDCoPCdBoxBa0nTGIrUkN495ujmHrtv7yIU5EWK4zfkaUhtcqIeNCrk7Tt3qIRES0FvuYMG1lYNTRGyf1fQjfoc73oIkdOOA/pLMVJEOMLF1CFJK2RwlJOPQ5b6Id1uw9ntmElTMtpTvP2Fmptf1FRaUkeGoOUTalhd03juTICLwQ8A6SIch7pKcCU4EdS+pBYFrqhAlbTXSmrt0vENN196g432JVZOPUjU3uL8xiZuucmd5yWRCXCVh6MUwjgoND4xiReQV1M2TtXsHxYI3SOQq0R9j7AjqauS9qrEi1KiGHCGuL5i+0KNHygeeHCZfk8zPNwlSiRvX9snzRom0xMc0cJx+3i+QXkuTtgwEIIiD9GNR1Y1PPyOR+mub+E6gnNPdcmdLuNpROWvUftweCcl3dsgm2g8R6BNhSCkzAWeZ/U2eSq5eyOlKgWvDFKKymVYbLJnIiY6YOdwgJIFJvJ57PI7YCo5uVugx5qTndtQaLQb48iQvf0JYadDEncZ7aUM7x4y2h2ijkLefmnAyqmYZ793g6XuJTpRyO27R1x9ecp05FqTiqwompTRuCaJl9i5VVKXHtlEs9bStJsD8uNDsnHF8Fizf2eE0hmez0wrGYCxzlulZ126ORwfV9S1RGno9dcYDAqyTDAa2XFLOsrQKqcua4osR4gSYRqKPGU0HOKImN6SpL/asLbR4fDwiGee/jB/4hf/FMPjKVU6JAl8Atf2+E7SKXgeucrZG+7ief6ihWQuGBfCZqYJnMUoCXFPR7lYiFUzc1T7uIG/GM/UWiFwwEjKHFTjEIURvutT5RVC2F5coyAKO4vgXq01Ah+tPJaWltnYWOUnfuHTHA6ndJIW/8+/80/5e7/0T9g+dZbrt48oPIVQmqsvTmnKhrqsmAwUCnt8pWPAOGRpSRx1Z7rMml6/TRy2+OxvfIH9nV2uv3UdlKYsS+LEJwi9BcBpqAnDmLqSTCcZYSyRjh0t+b5PGcK40Iwnx+gy4/lvvc6HPvJhdCun8Husra1RjhuqgVm8bt68SVmWZIMcXUESLyGFZca63R7g4LsBo9GIV199lTAMGRxN6PUC6tJFmJCl3mnevjLl/vuewHFj5MzJPddyNY1agIm5lMHzPKYDw2svHXD5wXdxsDflzs4ulx5q0et12NzcXIzb5uymdG3UTLud0O/3Z4unshmGefED39sGVbe7PXAkkyydGZ00VdksGNNer2WZH98nCltMJwVGW9ZwzojNF/6maRbu9SCUrK8tkQ0HHN25g1Mp2mGL44NDO4okm8WnTPGCZgYsYspCEgV9frDKbw5e5mNEy+gUM71dQxj6ZFn2A3l/DqPxMXl+guu6TEY1UtjUCykAU9E0BaoRCGFBETrgzddP2Njq4YWC6dCjyHs89MQl3vFel3FWEEQ+5cRl/+aAVrRGkeWYxmNwbPWPSdcCqH5/lbIsWV1dXeQeVkVJ4PnEUUQUhkRRwurqvXQCpQxxHC/enycdkjBagB3Ps1rKsiipZ5mTTaMxmoWJB6Q12BWVleWoEUKCNjVKW/NVEM7lAtYJfPL/pe3PY23L7vtO7LPW2vM+853f/F69KharijWRLIoiKUqiWpblKZZnuY2O7VbcaTsG0naCht0xEgQZELcTdAI7CdzdaMdxty1DsizLsiZSJCXOLLIGsqZXb3733fnMe157rfyxzj1FAwHCP5QLHKDq4VZdvHv22fu3vr/v9/Mdn6JXClbgK5SULBbzdaDvPHF9fh85V9Q7nYTBYLBSuKecnZ3h+z67u7tcunSJwWBAHMeMRpv0ej2n2mFodImnnP+1blzRwnnLz/k97Vx1/UEf8Pl7ez5o1nXtDkZ1Ra/Xce+thEB5GO3sRqZVlEWDlD5F3tBqqEqDNe5+GCfOE9m2Hyjf579nx4yM8FSENYIwSFfrf88hyKJoNUieD6YuILlcLjk9HfMn/sQfZ3Nzk7LK1y1S56v78+Hb97319d22Ld1u10HuheJs8tgdNIXBkNHaBVZkXLg0YPdCH6Ql7aeEcURvMMAACGi0Q4Etxjlt3a4ObpO1RcE13ymW45yNbp+akh/5kR9h6/JFTFv80APlD+2hTJ4a2CsXL5EvfKLmLovFgrhOaUzj4umNpD/0MbZEoKiqFtOK1Q1IokSAVIZlXtMKQ3Ea8qf+Qp+/8jeeZVpk+FrSG23xn//Nt3n1jWP2ntzECy2SLio4obFTpDTUZYvAUNegfEmxCBhuCmYHIV5VEduEZFQQX1yiioCNmz5pJ+PBPcFy4qb4IA3QVZef/tyP8Wv/9iv0L5/SNhGirYj8FrWl0EWLEjH7bwV89o9F3L99yqvfbLl6OSIIFY2fEUVQLeD+bcmly6vwg04QskYEFi1rerHg9rd85o9ahDW8+OOvsPvcZcYnM47236OTWIop5Mc5k8M5ofSwAdS2RRiFH3o0wuJLSb4osE3sTpbKnXJcqXxNGkqEqlEMqfWUIHQfxGtPJRzttzy6U+KHhroUxFGPMIamqdjavITyPB4f3OKpjzzBrTv7zGYLLt3YpjUKW3iEuuRssWC4e5GzkzF+Ijk7XRIqS+VFjLY8qoMzotSgU0tRKjwjSdIGEQDapx9usrf9El//7he5fO0pPN/w+GCffFGzt72F7Wuq4xnd/iWO5/tkRwtGvR1GOwP277+HCgMu7VxEez6zyWNMZaCKWZYT8DcRxRiTBHw0EAw//Ale+bEX+H/+43/Ayb5hc9jFekuK3HL15g6nD+ZY3YW4pDvwqTs9pMoYNJsUJaSXN2nKMx6/fxdhCvIsIvF6LMolRuE8S7rFQ4InaS14QoOVWKmxTYjnGbYHCR/dzPnm/YQsrfDxWM5yNrY61E2MbQP8eIFupyzGIb1ByMmhUw0vXd7lievP8nD/AQ8e3qM7rEnTLRYzSV6N6XQ6WJthjYfRFYHvuIzL5XwdxrFScPnyZU5PT8mzimgFn67ywvnqUBjcDVP63geq5Gp16fu+68P1A3Qj6PWct1DgE/oxW1tb3Hn/bYSoSMNt8jJDG00Y+igJbal54soNJqdnHEwXGOuhbUnaD2jKin6/C3gc7M+QSUW/k1DrnMEwYTmDprb4gQATg2ppVgl1N1y1XL96laqqODo6cuZ6ISibcjUYeVha2taFM7ChU3T8FkvLhb1L1HpGXixoKp9ed8BstiAvBEjLT/30JxmfnfLdV99ia7RJuJHSFBXSfnAGP3o8w/cF/UFC4EnSjmYyPqVYJFy+eoXtnR2++KXP09vwKQqFaWKirnBVh9Mpxko+/KHnqStNkS0oiwVSNSwWGZ5yypRTm+26VlKKgLATUs4WXLr0PJ/82ac4XdxC1SFp9wJNkeHrPl/98m+i2wwrBhRlg6BBaEUYJUhhENTkpVNDG+1WxNlyifKiNWLn3Ifbti2Hh6cuZR1FVJWzHgTh6kBiBFpDVdbO+L9adTsP6wddxsCqzcjHnqdKZUvVVPheROCnLLPFemhxCJd2vXJXStFqu9pyrbxsul37+MqywgvkapXo1LWi0s6b7yer+sQ9pospZ6dzut0uui7Ww7vRIWHYYk3AcjnlmWefZDYt6fS6jCcHTCZnvPyZTzIYRLz19uvMTnyaXNLoCYGf0Ot1qIsWzxOUzQlxsEfakat1v+Hho7soqfjMZz7L7dt3mU6nRMEqHa8ALIts6QDgBup2lQy254qYIvRC6rpcHxhdgrhep9u1NoShj7HOdnCOiKrr2nnklnOq2rGFhec60JVS0Fra1l2bTi2TdDod8nyOaWuk9Ggq5/1s2ppqNWwlQYivHKweyZqHu8yz1UrcqYFpmqKkz2xxSL4weCokjGp0Y6iLCumVq1khZJE5P6KxAi/wiaJk7Q8/PwSfq+bWWqJV28x5Wvzk9HjNQf2g3hGKusLBEz6oWtRas7W5jVKK8Xi8Pgidp7PzPMfz3O+42+1Tl8UaV5QkHUBSVhUIQ12XJN0E3zoEkAkk/qIg2h4ynRf8yZ/8E6Tblv/hv/9VLCHStyhZouiCqFb/3xjbuEBkqwRGGGgkaTRA+JY4DpkslsQDaLQCW5Okmsm4pdNLKeuWYW/IbDJnPnW4NGEdB7SpW6w8T6JrrIAw9JGeWA+vja1J4j51ZSjLnLTr4fmW8a3iDxZs/um/+rzd7fo8eE+Tl5rs0SPKfE5pIApjyrmh0w0JwpaqLmhq19IhVvdfz5M0hYemRQQNk8eSf/DfbPLH/1LC/mOFZzMuXnqOv/ULB/zKr0zZvipJkg5tE9CKBXCCLzXaaHSrCLyS2QSELxlt+hSZR3bsE1Y13Qsl6XZAoDW5Mmz2h9Rml488e4UvfOGL+Klre2nmfcbHCfH2CXuXa1ppaDKD8DWDfsA4E7z+mxXXnvLobd7gZL5POcvYu+AT9ASB0gRI3vy2z8VLLf2hxyzL8XsgmwiTC8K4Jbc+7/+upVMFyG7GS3/0M+RNSydUvPH2V9i4ajl7PaaaNFRZhfE84jRA4lM1OSry6Q26zKcLmiLHtuApl8RNIh9jNKNRhBFLlBJ0ki4QsVjMqJoKU8eEfogXaMqioc4tW9sD9vb2+OY3XmdnZ4taLpnlPn7skSSgrcIKQTcM6EeSvGy4e3/Ck5efRPuGd969xcWbe4goYTF9gFdLikVNPPAgzikz8CNBfwjFYcynPvUZ7t9dcuvuWy6AZkt0YwgDd7ILowgVNRipacousi3pxYrBaEgmFpwczlFGUI81bKdQVijjg65BpWT5mJ7t0A0ifu6v/DXuf/dr/N53vsJoo4fwDUKEyHaCtBfYXxzSmgKaEL+vKdqGTrpBvSiplcTvRkRaUp0cknoj5osJy8IS+z5V6xoiAs8pRrXWJH6ICobUzQzllSiTYrQl6Qh+4sNdXn93wdyDfFkiTEB3YFksC7Z2hyDmLM48FnOPILREccD29jZNbZkvSh49esgTN6/yiU9d51/88y+yuZ0QJi1Hj5wCI2lQouvSjnXNxsZwvfYOw5BKN2TLgk6ni+cpZrOZU9OFe1hbAY1pCfwIqUBhV/4lQZp01+vs1kqGwwFnkzFhGPEL//Ff41/+4i8yOTvEDwRt5aNNgx94lE0JrWHUHxCogEHaZVqU3Ll3n9H2wJnQm4KqtM7g72suX3mSIGo5OjpBN5aqdp3GnWSD6fxovYI89xq1bYtCEIRuvSqFT1ZmSCnWyB3n5VoNJa0CU6E8i5IuZZt2JVVVUOYuEZnlFSCp25pnnvkwRbngwYMH9DtDLl++yCJfsMwX63vidF6xu7uDbRsub1/i+OA2datZLNwQqwJoTE2jBUokaDRCGIS0LJdLwqhDWwuqvCKOQ6JQURZ6rQClnXjdgNI0DZ1Oh7pcYJVP7PtMphk/+x/+IS4+EXDnnUNOxhOGYUIz93n3jXfp9RMm0znSC5hPpzzzoecoqpzx2TFCOAuSNYJ2NbGZtgXxgcpYVZrhsEeapkyn49Wg5zA4ee4g4aPRiDwvaZrzVpYPuqOd9yv49/yUUlhaben3h6SdkPfv3OeFF55mMc85OZmAFSvguBsCzhWUc2VWa7MGp0spaZsPhqlzQDWiJopCdOsU6ij2yLIF1goir+vWz9R4yqebxhTVHKMVAo8g9KnKmjRN+dRnPsa3vvk6y7wgTfpMpsdIz4W/dJMTeH3H71NuVTkcJQhKTg4LNvcs01MXogpCwXLu4Nhx7LNYuMpTzwvQdQMYev3OKoT0gbJb1B/0Xfu+a1OhZR0aObe2nH+PS187oHWSxigl1ocDx4ecoaSkrjRp2qOs3PdZa1kuMkevCBVSGrJlTb+3Q9NUWOtaYaSICSOF5/tMF3MAYi+i1+0ymY1ptAvfIv/9gNRotLlW+OqyWWOPympGLx2598OO3YfKur9nUbpwStrtudVw6z4TUom1V/rcV2yMQckPPMdOZFHIlfXj/Po7vybPQ03gZpdzKkCapgjhUxTZignprf2Sxug1lklIqIrGNZ9p1snz2WyCRKACi2lrGg/6nS46M3zqlU+xP59y+413wMsRNiEKFU1ZoU1OGHfIiwJPxUhbukG1dfe7OIjBSqLUY1qXRL4h8Cq2r6U8OJrRGElISHYINooJvJYkgdl8RluD7zlMlVRg2pUVTYLBEscOIaZNS5IkzGeu01uuihSa1tkNmuP2DzaU0+pHRKnk7Oge3UHM8EIXL4SRjGm1k2rns4KmaXErBLXi4bkTk8UHZVBSE8mUQBg2vc8hZ8/T8QNGu5tIL+d4+piwZ4iCIYE0RFFNY0F5Gps1+EYSp5LIB6/yoUrRRURgh6RDn6aqEJGgrEsWdUtlYx6dnCEHM+ZlgIp9jNXQaqQ9RbSHjNIU5QkaWePHLYEOmd2WjLrwp3/+ebzpBuPDYwZpy3DkfIFpF/AFdT0i7hvG+xCmBj9UKB24k15SkJmara0Qj5zLT+7xf/w//Vfc/vabfO1ffYnsQPH0M5+gaT1kUDJbWFQQIyWUeYW1Dd2Bpa4sDx+fMZ/X7F68wPUnrmCMZmszRYgST7ZIWRKqLm0Vkvg3GB8Zup2IQb+D50uENAjboGRNt6s4PTnm+PE+w16I1RkqCrFINre6IARh0GNjtMXR8YTbd6ZU2qcFlm3OYDQgjEImj445uXdA4nVJOn3yTCJsh1BdI/RHgGFyLJkcwwsvfJLhcIM6b8mmC3w8fAI6SYi0EmNyJvslk0c1k9MxpbZsXtjh/Qf7eL5b+7S1wkt9wtzHthE7e1063R5NWZL4MYWuOVzM+cX//r9m//Xf5BMfusaNyx8mXpwyPsm5e2R55+Fdgo0B6c4uFy4N2Bnt0vVHNI9rbl64yoXNbfwaYuuhiCkySWQjehs9MC1pGK08Yh4i8AniCIUAe4LAYBtJlBiiCM6mGf/6WweceaAbgRDOW9pq2NnpMj0tKWYRSdynaTP+1n/2n/OjP/ITnBy5mrFaT9jZ63I6OebX/s03GQ57GFuBUfhhS5FrBAl5ka08TC2TyeTf8xnpqmbQ69FNXM1a0zS0WIIoXD0c3U2mqiqqskFrh7MQfID5CMMYsBweHhF4AXlW8o/+0T/i5OSIWhvms5xFmVM2NWVds7W5QxzHnJ5NaKzhnbu3ef75Z/mZP/wZEIY8c1Wd/X6Ksc6bt1wuuX/vkPksQ2tD4HVdHWQxp98brtem5yvscyyHkh6tNiwyt2aNk3DlO3KDSBrFHyTBpTPoCxyTtqlblIxXSoYiDhOiWBH6kOcVs2lGmsY0TYXwA5eUr+r1KxUCky8p5kvqxqKiDvNFThBKkIKo45Okvlv/C0m3F7G9s0lRuJu2ryQXLm4jRIvAMegMBYaMKLEgKjzfrc+SJEE30OltoITAUiI9w913Tnnz60fcevMY3/h0OyFFdUKYWIxpiGJnQ4mjDrPZDGNdmEMqRVU21KsgSJp2UdINI1VV0OkkbG0NyLIFZ2cnSN9D+t6aG+hCVzCfL6lrx5889yt+AFd33c8uzBCu/zuEYbmc8+DBAz73uU9z48YNxuPxOuRxHq75YFB06/mmqdbfAwYlWA+qvV7P/blwnuCirsjKU6zImYwXLGYNplUs85KPfeKjRGHMhQtdwkjRiy9Sly1KtSjpsbN9FYTlvTvf5/j0FESLFTkXLlzAszVe69GPN0hCha8spoaqtGSZC9PoVYVlUThSQN0UWJpV9aTrIe90OmjtsDDD4QZ15d6H81q+8yDM+VfTNLR14+r0+GDtez54n3sYq6rAYjCmpm0bkiRy6ly5IIp9oiig002IopCtzU2yZe4On5FDFDWVZDEv+fGf/CRPPr1BrScYExEFmwRBRJDELIvcDZ9qxUBcWRyEEBjaNeoojp039+TkiOl06qDtiwmCmqJ0KDfPT/C8IUoOsDbEWEFVu3CK8h1j8wc5uufrZyEEs9nsg1S/rrEYRqMRYRiu6AaOUiCQsKqhFUh009Jqg7dKpVsr2NjYcivhecbW5h4XL1zFGsX29u46LKPbhqqxBFFKEPq0tiVJfYpyjrEVceIR+RIhFCZI8JqA5dTZf77x+uvcv3sLT1XQKpcnIUCGProNKHNB2wjH4IxCllJgwggBKK3p9TvM24K4K+ltR+xc32Q4SlBVQHPWwQLXX7rG7k5KawrKsmRra4PNnT6NKWlbg7Xx6rDtQplBoJDKhRjjOETrmjBw6XhPOiXYk4ZB7wP27v+vL++H/cbdvT9EFRS89JNbHB5p2iqmEVOUKUEolPJo6tU6w5ynTd3No2laqpVXBOVRaUst4Bf/9XfhkkR2ZphbcPPpliLrEcQNi+WYxiqsX+MNTyiXKUMipqcT6Cekqktvs6Q0Gl3F6HYJQUDSj9B+iW1WUFKTEQtFsOzw1jv3wRrSTYPREpFKLj2rqeSMoOzShBpfBdRNg5E+9x9qDr37bDwnmWQZQj7L9Scsd+++Tlx0ef+1JfNHE3b3fLKFz/F+w/aVPmU15vihR39jk94wp9Zj9j6yyUef/RC/8ou/xvzU4Av45ue/zMc+9xJtNaCfhjys7lMLHyEUeBWWAERIkkAnTJhPMg4fzxj2Bgz6m9TVjDhOUK3P2VGDlJbeUHJw+ID5YuZSyTLBaEljG4y/8iT5HlEUk9cNUcex0IRSJB3DvXsPMCYh7ZScTcZYG6L8hrKq0MZyvJiSZF2E0ShhaPKG5ZlPUxeEkVt1tOWCyIQMNze5dHOT/gvXVycfj1aXjAZDpLJUZU7TevhhSL1s2Q0jnv/Ej1L2Xuerv/2QB3c9Ll+7QGJapkXJpesh79yeshOFZCWoZkiYTknMBE9CniaEquLhO0f8xV/4M/yhn/sp/vpf+l/y8oc1o/kJrzZ99vb22H9jn85wA7ETcvY4I4otk9Ty/cd3uHT1BvOjEu0XeIMN8tMJW+EQozI0rllFCIFpNNJXVFoT4oEReIBHQJEbEqvYiCzhYJdymYMC5bmOXSUjqrKkyqFYGp79yIhOP+A3f/Pz3L59m6OjMxdiiDXLRQnSp66EA6fHgkxDJ+2iRYLnQ+Apau0QHVVVudYUpZjP54BkPB7T6dREK65a2zZkWUYcp5S1e1+sNoiVAuXWlOBM4qB1hVpjWRTdTofWauTKm1nXDdHKr7gxHFKX7oTtBT7HpydUVctkNuVsMibPSkBhdcPW9oCr1y5ydtZwfHxMnPh0O0Om0wXKq2k19LojLlzYYjKZU5YlOzs7ZFnGaOQqAU+Pjt36X4DF/b1uPHGZ+WLBcllgQkNV1gSBj+e1zpaAw88sFhm9Xh8hNUKC8hRBFNHScvv99/nsT3wKa1u+/MWv8/D+Q5QwbG5vre+JD/ePGc8X6Krmte98l+tXtvB8d3BotSSfV4Spx7DfY5EtaStBTk6vN1ilfQumZ2M63YSmqlcrXR+sU/Ga2iktQaho2xpDS9W0pJ2YcpGRBIp3v/M9wkhw+eIm+X7JnD18aUiSGiGgaWs8YWlFy+nZAVZAEjuWapp2MFYwKWYURYVZ+e7O1a9+v7/uiV5X+XkeeVnjK4XvKbAK5QyIVFW1fvh3u13HrSwywjBcp3Q95daSk8mEJ596is3NLf7dv/t3YD20bmgtWMSKh6jWQ6haefUW84zwBzAx5x63sizJspLQ+ESRawsZDjeZTueMNrpsbg25det9yqXHm995E0nLg9sTuv0BV29skOk5QegxP3U+wVZoHj04o9sP8GTCclHQ6RjCSNJULtih2wKlLHleuWss7GJNhJVLHj5o8FWHpilAKKf64EQea+3aE3euRoehC9I0bb0exrtJSl6VSAtSCaq6RErWDM5zLFbTiPX621iXbPYDd1jMMleZmXZirDWrgdcipEdRlbzyyit87/tvslwuieKAqzc6vPnGklYPaVtJlAxpihbpa4wt0DpepeAtvvKwxjCZzugPu7BKm0dxSKNr8twdCJxFRfPw4X2UqvDDXUzrI5VBejmoJb7X0OQtxhqsdavowWCA7zerlpt2rXpOJhPH8VwFnJbL5foz6epKw9V1cd5Yo1YEjorzdhz3Z/5KqWxXlomSonBNfL3egMFgROCHDAYj8nzJYNBDeJL5YoqRDZujDSbjOUL4lLnFGEVT5wgZ0BkmoMd0t0Pm0wnzHEIihKepC0USNWRFgUAz2t0j08dcvdbj+I7ALEpGXoRuW1ePbFryyYRYgl5W5MRUYcujBwsMPp7JaRZwZ/k+F3Z6SNVSN5LZpER5gk4npfQqqjJHKQFyxcykXVlIWuq8BATOmeKA/9KHNA3dxumH/PrhV95/+edtNj+hKApO7i8oizMGwRxbLfDbiKKxVAVsbId4QUWRSUfVpwarQCqkNUgRoe2C6Sn8j/7sJn/7v5QUVcSyMAw2Ev6TPz3jdC7p930evT9jY9dgfEVlM4bzTV566SO8fXifPJ/g+VPnYVt08LubvHXriKuDGeGuz3Je4QWCKFHEBprpNsXSMsvH7N5I8eKMtmxBhYjYKacilYQKoiDg0a2GhXBK0/SspptCawe0VQbGdRRnxxp9qkCvek0Djxsv9rjxvOHtr8DtNye8+Kk9+k8uiTpbPKU/zb/9Z7/C1edu8ubdfRaLGbEXQ66Q/RlV1XBpb4fFImeeLbCiR2sK/ACsCdBVTS/dZDqdMhwlIFpkC1K4QacsBL7veH9YhR+4GimnqPkgCuIkoK5aZvOK8NxrJBWtCDAWrLDgC8pqBm2E0DG7V0OU0Dy4M0EKn066QdZM0DZHtbHrwQ1KJAEq8Il7lk7scXq84LM/9sfZ6j2FCY54eO+Er3z914ijAWVVEXdDyqohShPy8YILm9ssg4rnf3zA+986oT/ocDxv2dzYZfL4HTqDmPGDgAtbWzw6uQ+th1CSD31ccvxIMr47o5QQ+jVPXn+O+29lvBA/4pmXKuZZwL95NcL0BVvDhuGFj/DgziE7HcWj7Iwiz2kqp4KFIqJYLBFBB2OWPDG4zv7xfdCGxlhaKaEy5GWJHwYoC0YEyKZAKEXhKzzd8qmXL/Pm7TH5ZInwNE1tGW2mdKIdZot9olg535dMORnPCbweL3/swxjd8u1XXyPtKrJlRb8/oDtSHB6e0e/1kaRMJ4ek0S5ltcSaiiiJmc/nK76d48DN5/N1CrLf7bHMM5qV6iFxqqQfhlRNQxo4tpvy5BrUe66YuO5mB98Wnk8QeGvYdhTFZFlBv5uQZRl7O7tMpzPHwJvNCEKfvCzxpCSJu+RVjhIuIBRHPjeeeIpvfuttvKCgbcWKs+ghhFNyzjuNnfHernucz9d+5+svawXQkqQBjV4BvBsoioYnrj/B/sE+Srm6NOVZPN+yXGj2dnfxo4ps6RSxRnvUtUvCzudzpHDYkzDuYHQF9gPVCBPSmoqmKmkrwydeeZb9w4ecnk7xVJ9eP0Yoj26nx8nZQ8aTksas1tsrXxhW0jbnfjifZTbFWkEcpWh9XnO3ChmFIUXZ4omaKOxRFfmq2i3g2Rc3efv1A2Z5gBSGJLYIqen1+mityfMcrFsLDgYD8mVBazVpr8t0Mkfh1txb2yPyPF/71c4bZvwoXIceAj+iaTRt3axqBZ3Cdb7KNMbgex+Eu6rK1dZFUcSgn5LlJf1+nyRJePDgwVohNq1FW4O1BgkrpFGLkh8s0nzvvCWlXCeMz/1fnueBVHS7Kf1+n5OzCWenE37ypz7Nzu4mv/3bv82f/4/+DP/yf/gNjh6eMOj1WC6nblWuIjppQG1qljPJ3tWUyWTifG1hH8+bkWeu9zvwE6pS0+gaTwrquuXCxYtsb2/zjW99lygRYELAc13ejbNSeNKntSVte94F7RiaTeNab5bLOUnq6kXrukav/KftasB3lgG1HqSTJKGua5pGI6UbpAajaO2ZDIIQ3TgPph+4UMvG1haTyQQpPZaLktFoxM2bN7l9733Ozs5IIp9mZV9COX9wVRf4Xoi0imWxoJd2EVZi2gZrDV7gUDpNo7HCrpFV5wcQa+2a49jtDqmbDLXC1yVRTFacsrWTMj1pyaoFQRAwGAxYLl3Kv1lVHTpfrlqvttM0pSzLdYjNfZ7E+mcHQfQD8H5FmqZ4nlyHus5tEwB17axMaRo7RJCVxHGKUoosW6A8hxz60LPPkcQh3/ve67RNs24OOh9Ie1uKbKnppxFbuyGnQY0+MJixQnkJSz1zjWOqpi4zdNMnGSm6FyWVmrP/miCWNcNuSqUb18dNiC9ivFiBZ6k9SakmjLYuMT44pa0z2sq974MNt8koShc48zyPMBLEqUC3Ocf7zeowAt2OsztkebmyDzQEkUIqu2JiGszqEFQd/gFzKB+99SbbI81WpyUp5+zuRIx6XYyGSlfrdOc5KFRK96ZjXZdwGCh8KV0FlA1RScwXvnTK6azL6XQLZVtsMyLPGpQc8+jhIc997MNEPWhz49aJfsPH/oMKoRrOphV5WzObCW48cQ2/jOmZBCEMbVkRhopCW+pW0xKyfbGPCiuaOuHhbYnOfCIpYOET1tAqweS0xZcxZQXzo4Sjb7f06g2evLhBJ4hQcgpGYtH4kaU78lxdXqjoRB6qltz6+pj97yjqcUUvTHjjS0fc+t2agy/P+fo3vspUpbT+iM/9yEcYeR6LSUEVTElUhNARyzkspgX10qfOGwI6DP09OkGXSPl4siGOWnQzoylrqqZGeBVBZFHK0miwskH6DUknJEpCptOcvKhQXspi0VDWgl5vG6FCrFLghehmgVINyhPUrWZz6yqx36Hb9zh4fMThyYLOMGXniT7GK4n8AGkjfKFIOz6oDrY2ZGdzJgcFm9sbpJsev/WF3+DzX/jvuHnzKZq2wY8seZmxtbVFEPmowEeblmHX4/BgwtmjCV/6V/fZvbBHGjfMH5zy/uvv0HqbHN6VBMkZf+bFe+zVS0SwTdTPmS9CxvMZrdfSj3roLOTe4R2S66fImzFf+X7AP/9ijq8biqOcw2PFw4cPGWwosmoOJmUY9vFMtFLbFYP0Khh4/mOf4VFxiicVVgnCyEdZTUtJJw1QaMI4wIsrZOiUoJEvuRFKLi+WXBYtW7GPrwKUCmg1HJ+c0ek6PIcfBKjQZ2t3k43NPifHY1rjeGpl0eD7AWVZs1gs8VSAMil725fAeiyWE7rdFOU5OPJgMHLr4lWdm0slKudXWqVfXaWeQXoevW7f2VJwJnIrBUhFWTfU2vH4zge2qqqQymJ0ga5d6s/djF21WVFk+Mrj4cNHGGP4xCc+4dafQq3X1dPFhNbUeEFDEFacnc750u9+g8GGpNdPKXLNEzev8exzT6Ib6dKUvqXf28Dadg08dg/RZl3td44taZpmhU2JMa1ASodg2dreXtW1QbebApambknimNOzY/KswvedT6qua3rdAXnmYNN11fLss8+xsbPFyXhMJ+6sX8vxnOV4jtEtQsH+8SPHxvMCggCOj49ZzkoePnhAkeV004g4jGgbh09pSk2R5TRNzd7eLp1Oh+29gEtXezRmjtEFttWEYURZaAe1FiFFoVnkMxoL2rSESZ8f/5k/TH9nk7RbgHSVjL3uBnlWrqv8dCPce4yk0+2hG8ty4YbHunWIl3MmXrDi9IJLaVdlgx+4ZGueu0E27fXZ3t5GCMF8tlhjYs4bRqqqWv+7e2jXFKXrezYGHj7cd5V9Vq15em2r10zD80PDD/L5zodakAyHG7z44osEQbDyJHpr7qkQgiSWxKngy1/6Gr/yy7/F+Czj4EBy4dIe2JbZIuO5Fz7CMy9e4flXukSdOfMso7flg9Jcun6Vrb3u6jAliQIPIyxaZGiTIYRjoznQ+oLXvvsWvlJEwQbKEy5EWlmMLLCUtBTrPvlut0uSRCvEk2WxmOEHLkiyWCzcnn/1GZOet075t0a7oTOJkErQ7XVI02S9Ak/Tzirw5NFqt3EI4gQrBEEckpcVeVnTWkOnnzKdz3j/zm06aY+026VtEjzP49L1TQabDnvW62xhrcQqvb4fFWVGrWsaCoyoaNqSJIkJgmCVAG+RWDwpnDVBVwS+h24dgqzIG8pqiqXEaJ/FrCbuuO3HxsYG8/l8PThHUcD29iZ17RTEc9tDlmUUebl+easB/fz35tibPS5duMjezjbCslaCjQFjWN0jkrWFpigKl25fre6Xy7m7rryQxbzgje+8zztvPeTKhScJ/JQoCPE9y2AQsLfXZbTR5WM/usmlZzzsqEOBon8zZuOah6hqBsKDsqWqBF43QAdLgjSmmNcc36rZ3N7huR97Gf96ircbEyQRdaGR3RDvYszSg9Y2tBOP/ddOqc4UbRW5MM0wXA/TRjsvPKLG8wVNo8mzlqQT0+2mxHFCWWha7fzjZV6TJAlSuQYhY8Ag6PeHbtP8Q3790CvvVz51if7lXU7uzgg/HdK7kfLgWye0944QXUVb1viBWp1UWoePEe7kWTcVCA9lFcov0VoiTUXfS7jQEbz36B67r8D4rGZ2ZvjcH32aYFSTtSXBnVc4uvU6T76YosuKf/3rb2JEyuXrHtm8g/WWPD6a8Rf+7B/nrdcnyLTk/v2vcPfsEWkSIDXcebflcXkEXsZiUbF1vcvxsWBvu4sJJkgvJVgKBn7O+NGUVgZcfmKJ3w158GjK9edaOgOFKSM6HUmRQRT26SWW/FaGbxW51sjQEDSWe69PsaJL6y3xQ0N2J6IYLXl2NyApKn7uT/5Zvvu9r3Lj5oscfv1rCOtTLGsaXXF2qlxoQlpQBcq2dENN5YFtW4Qt6XZ9wihlNrbkywxPSBotQHgYu8T3BY22bG5ucnxywM6FLstljZAeCI+q1IQBYGpa0xCIgO2NXSq9wHgeW1sjlgun/pTlmCTaprFQ6jkXL44IA8nsYIqyKeiG2lbo2vCx557l6tWr3D74HjdvXuXjn3mF17/7LtVRj7fevoP0JaPNLaSJ2T84JO3ExL6irFpKq1DxgkEwYPPagPsP3kM0XdAVtIbldExCipZQ7jtkRtYcMEx2KYolo60e+opids95bqKkS+U3vOcVRHuSq1sXuffuMciAdlmACjiel9i2Ri/PWCYRwlc01jJeZnhlw/ZOn0fv3KdZlEShR29nk8O79+n3OuTdENsYxNKA0PTCBJnA6WxBsyyJNzos7QwxgsMHFYlIETJnMq3xZcThQUmRwYXrIQ0zoM88n7DMU+7de0SvH+LrAN3WSM8jEF3ysmKhM8r6+ytkSk1eTjDW3TAcwiXh+GjMYr4kTVPC0CkGRjeUi9KlLVetMmVZrhUo5Xtrhem8/cFasxrYaoIgQojWNdiYlrYxBH6IbR2U3Ui3So7imNk842tf/yZ/9+/+r/hv/8l/y/yt76ON+z1ZKxGELOY5Qnj0Bx28wGc5Lgk8y9Zwm9PJlNaWlGWG1gprl3S7LkyRJMkaHwKuDSKKQleiIA1YSbZ0ymroK4q85N1333Xr0EjStK7OL1/U+KGP8mB8WnHhYocoFJi2om0KROtBK9BNxXdffZXIj+lHMQ8fPlj/7Bde+jj9fpfP/9YX2Bz26UQxWZXTWvClRSrIyxPXnZs3RFHF9auXeeutd+h2uxih6fa7tFgePXrkkrhbA6wOuLh3g9Oj05Vh3qC1YDw5BWkJPJAqRSofiyAv5nz9q7c4Opkh64Be6rkmL5tgzNKldqWHF0q6vR7Hx6d0uz06/R7j6ZjesEvoB8zGM6SULOeL9fAmlCSKEuRKaXIwak3bGhf6y7M1uicMIqo6X9c6nq8ez9VDd91JNje3uX//PmmaslxkayXc2JY0cX6tczi6Ui4Ecr7i9lS07qjPsmxdiRcE3krQqPEDya1bt0mTAeiYOGzY2I1p24Bf/+f/jNATPHFlxIODCSJSNJ7luBb8ib/yFzl+cIAxAb1+yO999XepNARRQOB1KJeGbrdhmY+JopC61AgbEMcDsnyB57n2pbZt8L2QIPLRjaSsJ2ztbFFlNZ6fYIwhThwmZnGwj5WgAo+irvFCj5YW3dbUTU3suTYcY/Tq9+GvW2jOVWQ/8LA4T+DR4YQ4jun3+5xNJ1itAYNu3ZC3WFZ4qzBTmZeEoWvsOTw8RDsZEVpFWUIQuMBGVs5oGkMnSCnNEgVYoDtM8SPByckET0m0aYii2PlhsXS7XYS01JWrAA1CSRJ3sGZM2wRcvvwkg1EA/oSqXnL3vYI46KzSyWq9lu73Xc93t9vHWkG+SoF3Oz3SnZTZbPbvqY7nCe62bRmfnSBwtIIg8Dg9Ha+UznYNSj8PkwErWPkquKMrdOuG5yduPEnbtkync06PZ+hqusJyOaV1NikZDLucHhQcPdJsXh+w93Sfxk4pMaTDCOE9Jop7lKqgtIqgG7NzWTM7PiUNa65duMSsbrlz+x2MLhE4awqh20C2s4K2rBnPShAhnl/SypatkUd/wzW/TcYlvt+h8UD5nmv/OslJopimChhuxYxPx1RlCy3rilLPh2yZESQSa1Z1kZ6HEgZP/v8BbG4YcvudfcbzHLt3haPTmGUeI0VL2NaI1Wxa1672KIp9x7oyoLUL6ijPgHUnz7QTM59VLKYtP/aTuww6GxSZZDm3fPN3FS89//M8PpnwrTd/n6s3AopxBXHF0RiOj5d86MbL3Lj4YbqJYlE+4IvffJ3uxpCwf4P5SRdfeATUVA9g0NYIVaN1y5WnQp74cIdlJonjGCUtdS758Ed7aN+j8WPGi5pZadl5yuPmxyTaq2kXBeWpRRmJ5wVEaYQ2CRWaSpY0nkDbhiARkMQ0skZqQ+x7mLiBVjOdTpFyzD/8+/9nHtyacHv/HTZ2Yshb6lIgbAhewQsfvcTm8BK7W31uXDN8+kc36A0kRiuaKkBXhuODKXXZgG2x2tkWBB7GepSFjydTlsUUi6QqDXh23Zyh64ZssSQKfaRp0EXF6ekYJQKKzOCFEj+AydkUrCbpGIQq8VrB7Tfuc/jgDBl5FH6OTCzCE/hK88xzL9MZXeDFF/4wr35lzPjRgMNHPq++9S2+/o3vMJ5MmIwzKt24m1pl0WWOaFqGwy4KBaliXNZkxxHkgjS9yKi/S1xbSA3l2Of3yx6TiwOicIFBcbRfc/zuEQ/fn7Pz1IC9JwTGy5icTBDTiKi/xY2nniTseMhCM+gNmc/nNIcNtlDIToduIun4mq706Xc7RBsLjqaPyatDQiTTouDug/soCUpY+ttDVCdkOBq4h68pUAJGG31K0fLaeMbvH8J7jzWxGKy8PJYoClG+hxAemzsdnnjGcvn6gNnY8uLLH2EwChkMQ/xAuGSy9bBGM50uUcECK3K6PZ/usGK4EdI2TsXrJClnJ2fMZjM+9rGXuHHjBr1ej7IsWWQZRVUSxAFWAFKsvW6RH6yDED/YQXuOKzlXA7TWJGlEkoRrJcIaN9g3usBBkEFbw/WbN/jXv/ZvOBmf8d1XX3McPU8gRUCrBcs5VHlAFCukV3N6VFPkJVcuX+fNN9/mwYMHpGlIUWbk+ZKyLF2l3ErpOoeeyzUCwzgMxvqfLVL6YN0acDqb0GqBpaXf79DpdClyQ1U7tdPomJOjCdJGpHEMtsU0kjTqkcYBnsQ90JuajV6yfp3tP6JczLh6dZt5tiSrW/xAUleOKuAFiqTfgldgWp+trR0e7+8z6HedbeYcfmxagtAjigPms5yzszM2NoYI2eD5BuW1RLELxPQGI8IoQXmasjwjVB5tmfP7v/M1nrgUUZeONoFcMp0f8fQzV7ly5RLWKOKOx2w2Wbd9tNbw0Y+9xMsvP09/2KM7SFkulyRJsu6TjuN0jScrS4fQOUfRxD8AunZDYMtoNHKg+JWyeH5dnfsolVK8++7ba9SPWSnPnq/WqlLbftDLfX5NCiFo6nYVorDrQ8+Xv/xlF5ioWzCCMFAICzs7W64RzW8Bj5PHU4pFRWdXovol02VOEKW88+73mM8m3H7rgH/8D77Aa6894Ctf/Sa/+Eu/RitzhNfgdTIqnVE2R0xOCyK/i67NyoseApIgVFgK9va2+MQnnuTChStYK2h0ga59FrOGVgdsbGxw+fJlpPDcinIwACCMfLrdyLVwKUHTVERRsE4gB3G04jE6hfhctT1vxXGMWYtuLGkyoCwamrrF98KVtcVzzXTC9X675Lh771x3tmv4alvDxnaXKFFgQqoC6iYH2VCW1rUuSYEKPLLSba1ufOgSURqAkusDqasNNdjWEAUhvU7XJfZ1RCgThGiYzk747nfe5+CRz/VrrzDaGTGbLZwCmpWkaZemaTk7mzAYjFguc0xreeqpp+h2uys1Ua85mM4eUq8DQmEYumtXV5ycHnF0fEhRFK6tZ+VTPW930qvBu9VizYd0djGF7yse7T+gqguS2Mf3YD6fopTi6OB4jTN79OiQLJ8TypRHbxxz93vHyMrDLgrq+ZzRzQFnQrP3vGWwC8tsRlvG+GHA7o0nKX1FLWY0reYjL3yKy1c+RH84oNOPMLLFAFdvXCcMukShRCjnn83zlGwWspxaknhAVVUugd5UNLUhTVIAOj2fKPKQHuxd2GJre9PlAaxjdXZ6IdJGLjlKiW5y5tM5TflDbbvdDPLDeig//lf+mL16ccBb39zH+FCMZ+gyQ8yOkGJKVkI5C/GUZOeCjzWasvScfCwbApFStA1Sa4Rp0EGHyazgr/3NkP/537uACEu++/Uhf/0/OqJoAq7duM5n/9B13t//Jkps0RRj7r9xH5PsMj89Y3erS5xsI/VdxvoMm40gC7jz/QlxCNeue+SNZbYQeLrAWEPUCRH+kNFeSFYEBFFNb6h4dP8Bz/1oj2Cz4fb3FkzPEqoip9cBej0n+88aRNilDjMiYykrj6P9FnXmuHW0BisCPCWwwvmswtpD5zX+sMt/8FOf5eF3/y1eFPPNBwmmmuF3oc1WzQKmRHuK0UbKIPC59d6CZ17Z5cmntnj1i6+TVT6e575XNw1FUWIBJUN3sw4USE2tLVqzqnmCulwgiGiaan0TAtbDQhQ57hy+IEk6VFVDXtYo38OP3cpjZ2+P8XhMXZYIyxrdcj58XL10mUen+wQm4JnnX+LVr7zB1ZsDDmeniCzn0vaQgRfhbz/DW4evIRaaXkdxtmjQWUMcGhqryWpJb69PNplx/fqTRJHHwXv7PJycsre1RdnkdAcxeblgc1fw6H0PXTREoeTaJ2/y/ndu41tFMgzJ5y3z02OefH6LuWkY0Of0YEybG5rCYHBJ/KrOiHwfL2iYGkHfS4hSGM9yrPaJUwi0YZbXbKQxoq4QwlJZy6IoUVg6cQR+Q6sFadxlMpmgNWAhjhP3cG3ACxpUmJAvDaL2eeL5ba7uBlx/8gr/6B/+NkEosEbhy4CyytyaXUCcSDwVYymx1Dz9ouT970nmE0HaCQCP2mRgBVXe8MrHP4OxPg8f3ePh49vESUhbVsjzPmUkrAIYvlTkWYbnuy76phWMRhu0JufsbE4v7VKVCxoNg2G6HkR1w9oLBlCbHCk8ED6mVeztXuH+/Yd4SuOHGmtc5V/ke+TLhhdffI5Hjx5zduaYmiIy9Lu7zGYHCKNYZDVx4qEbg1AShKauDJ46R9ysUvOrjl+j3dAHTkE9VyqcwtoShIJer4dpHJtvMpkRxzGz2cTdCIWg0/VJOsoNLrWiLCxKrXxWtqaocpSM1/fEjc0OGzuW2Vnggk9dn8FgwN3bZ8SJhxWGxuYY7WN0xbC3w8lkwad+9CO89u3v07QLPDVg58KQ/Ydjl1hVtVM9Ku3W+pEDk+dZidYtvrBEsUJ5llYLdOXYjI2oePnjz3H/3ffwwx79zU0eHjwmDEMm41O2h0OaMqPSFtn6CONjlcUEhsl0SoBHrCLm2ZIo9un1OqsQjRuYmqaiNSVKOXSPFC70cX7YUKuDR7fnvJRZVqAbp6gh7dqv5nxvDgkThiHL5XKtJiVRiBENVdms2X9RlKBWPeICA0Kt31djXF3o+UCWlxm6McTRCCk8JtkRWEjDlDi1VGVJYxRl2dDrDmhbS1YsCBJBf5AilKTJMnZ3Orzz7gQrE/YuKnpJQ1UIjsYNEQFCwGKW8/yLTzCbTTg6WBCGActlxpM3P8xirvnoxz7Gl77yeU7PjklSSbe3RZX70EZcvznk/p3bjMfjVc1pjfDB6hjpNUjh07YhyAy5oi30OjvUeoFpPlCLzpE450BsbYxroJONs5Z4AcoP3EFM1bSmpMw0mBTllyhhyPPG1akaQBk84QZcoTS6bmgahZAtuq3odYeYRjBfjAl9ifQCLIruKELXGflMEXShWmaIFhTu8ySUwbQS6QlEXRMFl1mUx2jt0YiMON7g6Wd3eeLah7j//ju8/c77NHWCNgtMW7O1cZmqqdFWc+Pyk7zz3vfxfEvbWsrivPxAoHWNxFsPhP1BxHJREsYRi7lL///8X/pz3L3zFr/1775O2olRMiIIe1TtAQBttfKtrlTK8+v2fFCVQtM0DmSu4oyr17eZHhvq0qOqTmm0wBclQdShO9wlGnkklwKCRGCnj1lMWmwTkrVT4oGgmliUn+BFKQ9unTDcihhe8JFtTDWHxbxiVmaEiUXWEZ14yMOH98FofOXhSZ/lMl+JABYrWobDPlUzc4SKGpbLwrXx4LN1acTZ+IjrV55iPL5PVTYs5xltEwI1dSmQKkB6NWGQ0LROCJw/zP9gPZRRYnn1K+9xuH9CXYzxe6ErhE+6WCSRH+AHEmNXRP4gwfedP8RqgZUNaRwiVUgUDfCsjzLw+u9FCN2A6VHXHlVds7Gd8nD/gKN9Q19d5/Xfu0O5fIpmlrJV+dzsXWR2YPnWW9/l9XuwOIsp7Jygt+T6ywl26DGtDLQ5nq2oWoE2AbZNmRzOGT9cYPMFxw+WnB5O2b3c4+E7M04fLvCrlI1NSSuhOJJkd5fYqqHGEoYxnuqQewbRc+spTymQPkQhrTRoYbDKYqWiVpJKwbKYc2EnQCdbjM9avLpiZ2OTy8PLVG1NnEh29lL6fZ9iWTFflgw3Ex49GPPvfvUNhoObXL16HWMEbWtQno8fBASB6ynO85zFYkaZS8zqlObuCi1VKUE0a3bbOYpDCOFOgkXlVjUmYDKZUdcl/UFMp+swEkEQcHY8psgyhBU0ZY1C4gmFlAphLLfffx+5bJkXc77/5hu0I8uDyRFtO0etktJPe3dRtoJlS6Ak4zahClvSzQEi2sarfbbjHovHE2I/wKssr3/1PU7GY0bdgGIxppg1bG4IPvsTN8nGDkQfxjPiWHPw/dtEIqYta9pjeOWZG/y9/+1fpD+ISGzJgwcPqescXRcsZnNC35L2fLzQZ1FqqmWCykP8qCHdSJG6JKRG+j2MihG+x6LV1EkISUyRV4RC4QmPbrcPsk/ZKKYL1xSURilpGNCJAjwlqY1E6oggbzFNieoUHL475ne+8Da/8flvsDGICZRBCXfz9jzcetMPqUpDVS8BibBdLm78FMKM8ANJpyfJ8yWiGeBLh6n51re+xmRyn6qcIix4UjDob9JqCPwIJcHzBVGsMLbCCw2ID2rTFovFKszjVJQwjIjjiKpsKYuaqvxg3ZYkEaCRbbyqgtMYs+Tg+BZpr8KLarel8EpE6NOIGD/p8O7tOzRoLl67SNHUeCrh+OgAtE+WZSRhQBqkBFIgWxwySgYEMsRq583ypEQKi20NTVMipViHE9zAYdG6JU27PPXkMwR+wjIv0Qaa1rXEXLt2g6ZpCcOYTqdHGDjVzViNHwis1QipCSLXz9zofP2aTSsOHxvSZMjOzg6tVizzM6fa1i2LaU6TpWADlOdq/JQwfOebtzBWMJ/E/MWf/+s898xHKcsaqRoUPqbRRIEkDn18PExjicOIJHaHoLquV+lMl9AMI58qa9ndvcAzLz3L48dHjA9vkeiK5aMJfm2wNURxihd6IC2NKTFtgy4aOnFMkiQYWi5d3sP3IibjJb4fsrnVx9gCqVrC6Hxt/UElnfIkH3r6JtrUWNGu1e3z2kMh7NoLed7n3bbNOkhzvhaXElprVr4/u/JMhuthsyxLzCpJXhTVGkMURQlS+mSZa7Xp9QbEnYKdKxkX97qM+kM63ZCybNjYGrF5yWew6aNtTlnNEBgiMaTJYuqFIjCXKY3Hz/yRn+bS9gbtPOb9txWTsaKaGIq8Jc8qNjY3EEjOziZriH4Q+Nx6/12OT4/4wjf+Ff5ozPBKjA09vLgmrx/RtGccH51S6oxOv0eLwkgPawPSgSHueFSNpjUlnpI0NYRBStVMsbpBSjdEW9uurQTniKUoCHDKUk0n6bow1nKBrl2dbyceMBpt0rQzdF1hjGPNdpINOp0O/d42nUFDd+DsMKZ1KqOnEtd1LgS2XRCFPRoRUpqSqso4fXhGvqhpZEYxa9jbidnejfG7UAmNNi2e1pjjhrL0qM2SziDB+gWeapE259792/zTf/kvaY3iEz/yKay0rt/beEjVsrHts7XT5fXvfR3DkqpqnK1DGp58eptOz+IFMYPRAOUrOr2IxVS6Z2Yt8D346Iuf4M67hyiZECeKKGnZ2ArpdQOioI+0rr8cnHJ73qt+Hm50qniApSXPl7zw/MtsjLaRUpIkMWEYE/oRqA1Mq5idTKjrqUt0nzbk9QjT6VL4kp2rzzIafZQg2ma5bDk7rtjYihnfNbz5xTGzg5LLNzWL8gC/jikOGyb3pxw9OmB7NETaFmtcDadDcWlU2CKVaxm0bUxZuOIA1+ees7UHJ4/PoA54+3tvcXKUMzm1bO9cQXqCuglQYUvaFVy8tMkyW6x95D/s1w/toZy9NmXzYsRi/hi16DPLp3SDiOPZnAsqZNnUhKGkkeCpkKZeeZqEqzer65oQCcKS1VNnwmhh74qhlxgWVBSFoZPsOPCmV/D53/wye3sxiBnzye/T3evy2T/3cb7064e0J4+5FG4TpD6vXP8o9/O3OVw8ZFqNMMrhW6xn8GOJsop5tkQkoGSDbToc3JuwLAyh2mU0hCtPFeggYHq4oCkNW3sJRZ0jRcgsr9i7EPHjP3WJX/3l+yhP0et7zMZnVEVC0DNkVU0kFMpofOGYV4uqpvXh2sVtXn/z69y9dcJnf/aP8ieffJqT08f80q/+HlaktGaJMQnlIke3AlP7xHGECmowHkXjMzndR7eCQW/E6ekpFgjOT584L8S5of7KjT3quub+vVOUgDBKVv2jrqj+vNNZyvNatIi6yPF8hbdiEy6XS6TyETi0gkRCa4jDBGEdJsTqFiUVURAxbyuUnyDaJU215NqVJ9jdvciXb+3TImHrSebNlPlcs9WD5ckjiqKBZEkohkgbkrc5aZBiGsO9h+8wGvmczgqWmWW02UfWmpPTOctcEicdjsVjhA7Jy5JUeFTBGK8Ts/d0SO/6Dr/3nTMOJ8eUE8vwgoc0Fl932drtcvvWHeTSkESKi7sRVbtk8bhgKJ+m56fcau6SRhHTg4cM+s67s2wa/F7MyWRGEoZ4BvAUx/MZug0wCLzAI5ABgdFIWup8iodHFHj4SlFWOXGQ0LQtP/O5Z/nV3/kOD29NiXwPJX1aK1DSg5X6EEYBvnGrpMBPwYZ85UvvgnAIjMODOXECG5sLJicSX/lEQ8lkckBVCXodV6NZ2xppQUhLUzqVtW1cuOa8EUdIS+C5oE2WFUR+RJaXSOmvmzrcdlMCNXXt4NZJGtFUNUXusxjXDEc9UFCVmjTpsVhO6eiEqmlomjlJxyV1y0wzGm7R6fbJpjmWmlYExHFKEEiWZYHyFbaxYCxx4NMaTeCptToTrdLpfqDWCqUwYqXaqPXgMR5PODk5RQjB0dExxhjOJhO3WhWCvMqpjgt6nYThcIivlpRljfLAEwrd1ijPEv7AHVPrmsW8Jc/uU5Y5ly5tI5TB9yrisEtZLdHtAtsqhAmpG8HmBUGZLfH8lu1gg7jX8Gv/5DfwA01VagQRnidoViDlpja0rfOJRqFHrVyjicDx4mgbmqYmjARf/8q3IVXIdIPBhT6Tk1PaqaWtJSdHZ2xt9xACPN+uVnrgKZ8iq/ESRWtb0o5HkozY3z/CGMPjx49BGKxRhDJxDysD4FRCqSzL5Zyd3Q3G4zFNU69W4Ibz7df5alEpsQ6J1XWGlN4qoOOsOOdp/ShKVoqQ6zeu6xop7AecxiQmL+bOSpUbWut4g0r5lLUl8Xoc72cURYOUDWVbUlUwW1h2Ll9g2JdMTmccH4xJQp9OR+P5huWiZNqeIZoO7956gBaWsp2zeSmgtTnDMGJ+JkhixXA45ODgBCWdImuNxhjp+JympSoCWgQoSRRGjMc5woMosCyLKX7cAVkwGProxuF+hjsp+/dywiREVxphY3xviafc78CaACn1msUKlrp2zEdfeRjdoLyWwHfPW08qlADfV8Seo3Hs7e0xGR+ipE9Z1ChlUa1luOUOybaJCWPtGrN0iLFzN9x6gassjSN0pUl8QIbkFrzIQ6mGUNREG4rJvHHwbRmAlfihR2lbPvozl0kvDji5f8q7bzxww3DTUhc1UkYMBx2++Y1bhMnr+GFEt+/ICteuXeP23Tucjs/odiKscKvpuq6pK8H4tOTSxWvcvvuAs9MxSsY889GP861vf4UyL1BKM+iPuH/vFg/2H9Pte/heiq5bJmclnlJ0hjHz+RG+cmqva8jx1urvOa1ACoPnBXhpyhvf+T7IiqaShEGFNa5wobIt3cgjICAbWw7vnOGFGhNN8GPF4gDGZzM6SZ9ITrm8t8X9B4LLe0Oeupnz2qv3CMJNhumHyea/ROLNoS157vkXuffwAeOjKUZ76BYXoBHQak0SRdSmRFeWMBRUVU5RQCdNiCLLdFwRhpKq0kSRRbcCrwMn4wO6mzHNaUldhFSTmjCShIHbUoTxDz0m/vAK5eBKyI1r13nm8hV2n/AZ3WgpTI6gT9E2eEogResuRCmoGqeSeZ4Es+q1tAYvUAThAIVHGPh85ctTzg5iImFZLCUqdMPOoLPDpctDwk7G5WcvsHv5GoeTit/8zff4zuvvU6HojrY5eFTxp37+b/OxZ/9j9Ax6cYbyLI1p8aVAScN0UeNZZ+jUbcPG9g6f/tQfYXPT5+H7xzx+b47nQRjE+KnhymUfzxhEuIFIDds7IWVh2D9+zNM3d7kYKx5+q6JdpCQDg65LQuW5n+cJhCcRVhCHMRGCZFkwXXh85voG99//Dk2ww6UnPko+P2aQKIzuUOQVkg5KeHihptYLqrwgDAxvv/v62ng8mTiYa11pjNXkxRJLS1U1+AFUVc1sMiZfLugkDtTsKacW/GDXM7i1+Lkq1e9F6Koi8EJsowi9Lk1laOqSyJP4UmFbl9w/X3MKC7ppsFrTSzqE0lC0Nb1oRGNnnBU1290Rve1NLv3oX6XNY7LJIcLGjBJBKBSDsIcWBYXVnC0WRHEXKzWmCTh6WPD0C9fY2RoymRVcuNrj5rMdzsb7HJ0cEvhbSD8mSlJaz7C1lZIGkk7o8eo3f4tvfOl3UW1Ib9AlCWPSdJPN6wPkzpgrH9mlP9qgWFiWJxGYDh96bpfHZ4d885tvuaFChIy6IwoaZNWijOT06AzdCipApSkm9LBxiOoYkm0P0TXIjnLXgCewFuKkz86gh5EVYhhCECJNy71jn2vXRsRJgCDEWDcUNTrDyAY/cg/iutSr4oCMZTZhf//eqpc3ZG9ni4+8cJEgkPiBxvMUTS05PcsQNsKagCxfMF/MsWjn1/Mkoe+DlYRBisDHW7VKKCVQwtJLezRNy+Url4jj2K35q2LdMOGKC+wafm4M9PoxH3/lRXdN6QZswbDfQdmAeVERpjFbF4a0pnJVc8Zy573blIs5rQAv9JChQCuBthokGOk5jJDUaJFjqQgjhW5LDA5QfJ4kB1w14qqaz3mjaqqq4PDwcO2xFIIPKucQhKELHIVBirEh9+7uk5cFyjOrQ5ehrhv8IMBTyfqF0HiBI1xgQwd4bjawaIpyTn8YsnsxIU4CqkLS7QtCPyBbGBaLlhtPXeRrr/4qKizp9lxzirXud1lXmrrSKxXPoyxz9xlXyqnMyqcoKpRyOBPPCzg+OmP6cEmblVDFdHopXqL5uT/15/jxz32aLJ+S+j6mbYgTjzBxvkylPGxr0FazWCyo6iVBCKa1+F7CaLjpHrKZZrQ1Iu4ka1SLMYZHjx4xny3pdQeUZbXyaDrVzPMdhNz1GwuWK9TR+YH2/FCrtV6tFjVZVqwaT6p1yjdeqahBoICWwTCl14+IU0kYCTrdmO6gQ5bNqeuKVstVT7Wmm27TG0ZEHcV8UnLwYMbZcYUgIEk6FDnMJg5v57Vdzu403H7zHY4ePcYXAapqsJkl7URordnYHDJfTFcpZLHyORoCP6G1LVEgkFWAnXUI6gCahnyhqcrA1fMlCY1u8eIGEc8JezXalhwcTLGi5ebTfa48oSjKOQL3efb9yD1bhaHbS2l0Q5yEJKmPFBohWxCaNE1AWrem1q4y8caNp6i15eG9fb79jVddw5Z0eLCqdkPobH5GEJckw5zWNDQ1NLoiSTpIYppK0uqafLZ0gRWpHewfQei5RHdegWxKRN3BthH5smTUj+luxFz8+A2u/omrTKIZIh1ja0mxAClTkjTF1iV2WTLYLQlSS28QkHQU168/ybdffYNHDw+RXk1ZV5SZocw1EkunkzCfNXzrG2+zWGR00x7WFnz1a1+kqgv+8B/5GV782BP8wl/7D2lMSRB6NI0LPTVNRaMLltmE06PjtVfXGLOyjH2gtp8Hfnw/cH5tWoyR1KWbV+rGdaIrKen0QhbLgqopqeaaO9/NePyOYPreHovDHlvDARs7JaMtjzwPOJhMoXfCmb3Pw8k+157q0Jhjfulf/Abb2z2C0KLLHpk4pDvQaEqMsEhlkWFDlNakfUFZlPjK3UeyPEcpj8BPHG5K+VRljMSjLGo8L0RKD89vsMbH1CFbGxv4QYDwFCfHZ2sovG5+6Hnyhx8oDw8mvP69t6lNSeH75FNLs5hg2hlL2cHzxWqA9CiyfJ2e8jwPqUAYBa3AGo+mbqkaTZBo5rM+b7w1ZkBMOfV5ePcEEQVk7YK68PDlFabVkt/8pddgccStb71GuDwhm0x5eP97PLWzy69+5f/KL//2P2MwvEm3X3Pz6lX63XTFo4M0EoRRh7wq6PUS3nzjTe7fv8srr3wcZM3jhzmnd/vc+0bFxz6xxeiCj2otV64PiOMNBn4H5fn8xi8XvPnVU/a2Rox6EXdfdz5CDNC0SE9RS0shDbU06Lom1vDCpet88uUf5cHhjHJa84Xf+A2mi5KPvfJHufqhlygXGWpVCD/c6KOCkKIokTZiNi7Y3B6hBAhryPMlyyx3MOSmomlKlIK046OkZLjpsZjnTMZLvJWvz/cdV7CuP+iGPQ9b/GCNVxgG1EXNdDzFl4rt4SaB8JHGEoWOadW2DVVTugeEkpR1ycc+/jI/++OfRhlQYYxpSx4dzbj/cIxfTuklG8jYoUC2tnc5nWQ8nhVYX1BXE2wdUJZzdkcdRskFBA0/8zPPc/nKNhevDnn6IwFJaNCZT6Cv4ZkY2fpsdIf4FCQhqNDgCx+lFXe/uyBp9tjqDlieNJzsLzi6u+D0cMx8ajG6z7JZsnGtw5WP7LL94ZhZU3J4ryDsBcSDCJ21WBryUmNXBnwfhbI+RlsabTiejslK1zAT0EXpmHxWQmNci4bUDLoS2gUnizNsT5PIEqVmxDqlthWhSGEOlczQtmRnb5vRaMsNg1ag9Zy6dt2uLsSQ0x/56EZRlTmtqaiqmjvv5ZSlZj5fkOcQeBF+qAljw8svvczzzz23Ap+7BKpArXmTvu+73uRG4636lc9xPMtiziJf4PkQJz5CWIrCeXYEkmJl/q9aw9/4G/8ZH33p00xOChQxnlScnTwmiSwff/Y6P/L8Czxz/TnKueHq5WsMhglxqoiSCE3llFOpV6u1FtHWq0YlSxg7xIoKJMoXDDZiRhtdWlPS2ALTgic+sHT0+112tzcwtsEPJEEoSFKfIHR+uU43ccGspl6nZ+umZLGYu9CP8dnavIRpV60sXkRda2h765dpPVotVp7BnCI3aL0kClN0Y/FVh2yp2dgRXH4CUC2VXmJMShRs8/DxfQ6O7tEbGuJ0RBR2CGOPqqlJO0OaxiXjPd8ilcEascY/CaHWB7swcqnnIAjYG/a5sB1zcnTIxb1L/OX/5OdpwgV7T13gQ598gXy+II0jyirHCEtZF3ieC75EfsB8WnNyPGe5KKnqHN02TCZu9ZV0LLPZhGyx+AHvngvuNE1LlrkBcO03Ww0t1jpF9Dyd74ZR53l1Hkiz3rCcI4M8z6PX66y4gK4dpWkqsqzh4or56PtqhRcSNKZCtyW9/iaj0QbICt93wbamqRhswMaOBeYoVRJFGk9VFOWMtq0o6oLGNBhZ0B0mpEMflfiM8wVeEuPHQw5OMrY2e+i2oCiWa096kVdIqaiqBiEkZZnTH1k2dixVvUQpS39D4QUag6Axhq3dmHnWrBL4Ff3BNsKmRKHP91875d3vZbTaQ1uDsSXGltSVddzStiWOQ5xK3OCFAt06XnFdNwSBj5QW3bZcvnKd6SQjCCIuXN5la3tEtnCqt++HdPsxRVFRFRJtNZ7fMl80mBZQmmxZgo2QMnAHz8g9T5raUJU5nt8Q+G64GvR2KOkytxnHRYkN++wfZDx874w3f/d9/vH/4vOYZsDmdkoU+MSJR11pJtOMsqkxJmRr9yKXruwR+kP27xluv38X5dd0OilN0dI0rnlJCDAUWOMOdP1+H6zHZHbqwl8e/OSP/yx/5+/8F/zcz/2P8cOAR/tHJFG8Unc1QhqMbWhN4YJg9fm9wN0bPc9b22fOUVjNKtha1+XKUxkhpYcQrRt2y4ximhPGQ1SsUKpiayOmbaa0kzGP3y85uWdYPg55fHvK6eMcz4aMhhGRlxL5Pe68mVGXSy5dNuxsjBgO+iRRTS/1eOblDa48FTLYsqQjRRAKytLSVB5x2KdpXGlDEAYUuWawkZCVE4oiY2Mr5vBgTpXDdOboDEZrpFXoUiB1yKXLQ5CVyw5EDuR/XmX7w3z90FpmeuFNOv0R9sjDNNuoOqEQI7qdHl57gm2X7mTaWprGrbm9QNDqEiF8pKixyiUgrTUIfIz1qfSCX/xnMX/6pyymsXQ390g6Q4q64N7+I7byHp1Ll/nRn9hlWSxZGMlOso0I7+O3NcFpyze+fZvdpzLmZwJkzOTR+yz3PfauwXJe0Q2GLGhQ1sPUku0Nn/2H32W+vMClJzZ4dGfM/n2PLg3dQUY79Nm6UXF2t6GpC7KFR3e7y6Vih+/93ut8+qev82M/NyCbP2Z2siSMk3XoRUmJEgKjFISSZVHy7eN7PHWnT2kF1eKQ+XzK/+Ptr7G1eYl6/phOP0HXiqSv+NRnf5zf/Z2voERNWWjkCrId+M47JSTEgb+qt3Lr7taseIVVjjI+vh8QBI6DlnYcr6vX6xDH4Soxm61UGucVMcZiCZDSUtcOUr2YnFHHKW3TEgQRZV6s0rMShHInJOtwHe+99x4nRz16ezEnt3MGHYURErusOSqXlHWEVjWz+oiq0dx45ip33m/xhGDriYSH3zklVAoZDvFjizfxmasZe8+FHN3PuPHSEzyVG4oq52y24Mf/0Cu8+9YR80nJaLiD4QTp9ZnUZ3hhB98rOTjJibvbbF0MOHt8gt/3CYMOus7ZGqVsDa7wvdeO6Q4Sbj47oBY14wdTlwyWBoKQVtXEUiNkjDZgytr1ditFWRV4nkBYiWgM1s+xraDf6UIj0MJD2wQlu8xmczavX0Ucvk+3FxHFFTduBnwjO2Fzcoe/+Of/FHfmS37z136dcC+hyGbUlaXJSi7sJly62OX9W6d0OhF1bWh0Q5pqtBacHZXEScmNJwOKAhYzQ1NVzhweCBaLghdfeIWXX/oI/+v/zd/DWkOWLUg6sWtKaCtnaF+pz2HkUqAnJwvCUFE3GV4I2jrPYRSFTCeLVb3cuconwQT8d/+vX+Txo3ts78VEgcfhAdRG8tRTT/LM3i7TZcFGt8dnP/kj3H18n3le4HVizuYzrqR9RumI4/yEZe38SruXNrn7+IydQQLtlKSjyOYNddXSjVLK3GLbBGMrfGXWQ5XWNUIkq2HD4nlQVS3GaPqDDvPZwg06uGYOJXys8ZzR35RcuXqD8dkST/aIogVlNSPyFY0WNOUHoYgojNEaBqOG4VZJuVSosEBVIASU1RIZCA4eNgwGA1RwzPbOk+xspwSBx/3DbxPHPXqdPb73nX16aYgfe8R4bG1t0XviOu+89zpltcT3I1q94uvRonXpQmVFuU60LvOC02zO089d5nSWMat93nz/PseHjwi9Pm2j0dIgPcXHXv44b7z+PYJVurqq3DBXG1eK0O0mfOjpa3zn1dcxOiaMQoSs6SYpc70EBAK5GiAdnzDPCtJUURTF2mqwRv+svH4fHGA8mrp21XJSIkM3HBvdghCkveSDTvZwFe60lkG/z2Jecnp2jFM+BUhBGnewUpF2W+7cvs2f+tN/kpOTE770xa8SG5iceJycTAlE6JLSyyVJ3HM4oxC8WpEXGllYimZGS4BsIAzdNqeu56RCEYSW+XyKbV29ZFU2JEnXHbSaFiVDKlNTC4kKFTaCRal58uYNVJDxzhunVHoOGUgDXru5IqEodOOsUrYJCHzB7oUOo82Ut16/TzkTJEmItaU7wIYelXb0EAdQd0l8zwuxwlA1FaPNHg8f3uellz5Op0q4c/cWEo2SAQLI8xIrLdpYdAND2aPMIuoqw4gGBXQHHQSGPM/whU+pFYEo2RoqJtOaLIPKtHgmIA189h+7UGx/0KOqPbRqwVb4mcLXHW5/cYx3bZtFfsIyLxmN+rz0wouEnQD6c9A1xwczjg+OsK2isUsizyCMQrRdpK9d65AUVFXhgnk0ZFnB1etP8vTTHb77nfepG8Noa8S/+Jf/mn/2//7nKA+SJKK1S+IoJYm7nJ1NVvWXAcozlFWBIlmjdICVPzJZ96Yr5WGMJE0D8jzH9+KVP9gihIc1IE1Als0Qokfi9zk42ucnf/JFyrP73DoOqecVLRAn0FMhxb7i5GFBEJQ0bcPiuOTjz38G3884PH4HrRTWNDx4L+dsEpDNA+raUpcaXVlAYdBou8AKQ62h00m5eK3DfD5eecktBwcHxElMFAU0dk5rcqglSpVUZc7Z0YQnPnSNYTpwHfDSNS41xQdYpT+wgfLC9YvsjPY4sCkPHrcczx8SeT4qDFm2KRtVgFAtumxQIgSjiCNJXrgTtaXFBgYVNIiijx+31Kakmwi+8G8z3rrtUduSYlzRzlO6qaVNBjSZZXq8JN7dxmOHtLacTR7z8Z98gZc//iTF8UN+7be/xfTQEgpJDex+WHBvbqkmAbEXcppP8MMOWKhLTRS3pJ2AxWLGix/5MJ/+3Cd579F3eftLZ7z48ApnGWxtetx5/ZRBcoVkGHL0aJ8oOiNNI77861OWVctOkqC2NWfHDWEskdZHtAZ0Q0tNVbeMLm0z+sh1OnnI9rXLqE7N4tY+Wx3LiT1gw04JraIdXOZHfuJnqRtNlTV4okV5KV7goUTGYl6xublBknQ4PD5aG9o9z3Ecq6pBKcfIDCND4Acssxo/MCgVrLEr7uETrD8syhMoYzBofE+hZICSECgoFnPitEPke0RRj9l8iZKKoqldF7SE1rgVgSdbyqLFUuF1YrY6u5wd7GOtYegPCUWHetqQL095cE8SNJZLV3fQfsuLf/gSb71xl0UzYXej4LndKzzYnzNbPsSebcH7PR7eLkl7HpYznoqu07DP8fQOsd9DWsssa/B88H3np8IURN4GJ49LRtsdzjIDUpJ2Wu6+f8Dy1GMwjOj0ar71+/f4+Z/+y9z4hYj/y//tH2LOaoajLo+PMmRr8UYetjAsFhU+ijAI8KSCFeZEKZ9SCjw8kiRlNpkyMyXWtPhlyVK0fCztYT40YlDX5FPD7GRMv9OhHl3hndu3ePPWmG6SMjm9TafvU9YtWaGYTl3aMowsus3RGqRI0Doj9Ad0I4+2OaXXTemmgNEUXolfdxEmpJwH/Mqv/Nf8039q17gYpQTtCgaOcEgv58V1K2/QxFHAdL5gb6dP1TQUS81sNqXf75F2YhbzDD8IV6zSkiSxlNUDgjAjyxqqMkD6ligS5NWUb9+v+Z/8T/8ata64cu0qf+e/+HvkB4fEePhC8spHP8neYJOzNuflT73CP/jf/300Ei0Ljs8yPD8hjRN8NHVTkC8zqqqlNxiRpBGP908wxtXBtRbKwiGHlHIkgzCMWSzmLJdLlCdpdE0QuLaSqqoQNiQIfHQrODw8BBPyzrvvEkXQ6ycrBmJA6xXre2LTNAgSglCgW4vyNFWzoMhjkp4gCDvcu3vG9Q9tsb0T8Ma3fJ68cZPD07d5+P19ti4Inn/u03zi5T/K/33xX3Ln1h0CE2CNx/37d+l0U5IoZrGswGqE8FZBnAbP85HS2VaqqqJpGqLYJ59mHB9MGW5f4P7dR4j7ml7sCgoG/Q22f+LT1LOcxw8O2RluUlYZk8WcOA4d6Nk0lHVNm9W8/fY7xHFMqyVNU2NLQyXmhGGM1k5VxEA3TVxKO46AFmMcCeA8ja3+vyg9cuVNOz/UlmVJawyeUiRpimkbxKo6j1UzStu2lLaimdWrvuaaMI5WASWQK09h0lE82r/Lo4cHSK+lLHPiqEe3YzDWoVWyZUlrcuqiRZQeQZQgRYkNOlx/0aNiynjfYJseDx9npEnKzkbCcjFBCQ8VRGjdUOkSKUCbGrFC0URxwPi0Ilj6dIcDDk5OGJ/WXLjSwXBCkiomkzlxHDOf5izmFd1egECiDEi5auAKDVsXUnqPQBcpbZPjyZC6tmjDym+aUlUN2bIgDAN0m4FtCf3QAfcxvPbdb663SkEo8PzA1WRikUi8UOAriMIu09kMY0uUCvCCEN1Y8ix3A1nH4NeSIFHYwOBXIboAWVYEsc+9g0ckSQzacnIwJYr64EuEl+DhISk4vPUuPZ7j2vWb7Oxu4iuP73//bSrjsfNUy2x8xt3XJGnXUuRzANJkEy1PGWxGVJViPslpWyeYPPX0JXzf58GDY4aDTa4/9RS/8J/+Tf723/q7XLmxzW/9+ueZLx4SKA/fC9ja3CPLck7PjtCNO0BvbvWZTudYI2hp13WO5wQUpdQatWSMg4GXZema6HRDGMa0bYXWLUp2UF5AbA26HhNtdPjf/Z2/z8Gj2/zGLx1Q1YLWVmzsbjA9W2LrEqNmYALqpY8XNWxsbvLerTv0IkMYbrpaVFuSLUumi5w48cizmsBTWGvo9boI2VKXGTee3KMqPKaLE5Sn6Q1GGF0gMLS1QHkeRVHRGkuvH1HkDUpYwiDm5U99nC//3hdJexFSBavnjSt7+QMfKKvDFziatUynhnL/Ab6GWi3xfRj6XagddiQIFDQRunKgbs8H04S0tYfWlkgEIGoao7BGEwU+h48F/+gfH3Phw7vo7H0O3jmhDEK68Q5pqjB+l9msoS6O2El3Sfpw8OiE/8NvP+DGzRyRKx5+ecboiYrLr2iKySZXLjbEfsjjo2O6vRjTlDSNJu2k+HHM8VFFbUq+/Fuv8fHPnnL96RcYfO4CX/6Vu8h8g8GWYNCXBCqg193h9a89QISnBMD8/hzjSR6LjKDbEvkxranRtISej7EufRkrj+ub22ymfV578yGtjAiLPqoTk433OVm2+IHP1Rsj3p5MefW732a+dHaBNErpjrossjHtwvWiL2azVRJbI5SzF2TZcoU2gHwJUeRRlTV5VjLaGJDlOUIWGB26+jXkqlZJI2WLbi1pmtDf6lCXDctxRplXmAbiKEJJw3h6TBINkMZJ/mEUuZO0dSuWQXfIUx++ye0vf5Wwq1iWFlk9xvdCLm9vIGpDIDxM4+N5KctxSVPPqd6yPPtSSnC1w49e+gnO3r7PpFJMxzUdOecjTzzFoR9w9xvf5tlPXmeaPeD4eMm3fv8Ya3wubG7z6PaEYa9Ht1MTS0mtp8j+Fh4D8uWM5576EPdObuHXmfuQLAdo3fLSJ7b5kR99iV/+5d/hwt4V/vxf+Kt879sPePz+f8Urn9hg2NljUgp6UUS+3GeZQej5COvYb51OilBglUQbi1mWCM8yW57R0hDHEbWxtLohTUJef/AuS68klikfTS8Qefe5/3BCgGLi3yXuKrrdlssXO8zmBZOJJQk7runJSMrC0Ov1uHhFcLA/J01i9/6oCdOxgrpDVS9ZLBuGm4ppXrO1PeTpmy/w6ne+TpwqkjhkPC5pDQjhY62k1S11rQl9uWq2gvlsQRD0CUPIizlx0qESdrXWqxBG0B90qSu9GtoUorXQKEIvdc06deOq/jDMssc8Wrb8N//0n/DpT32Ob776DnHQ4X/2n/51fvcLv827777LV975Pj/zmc/xmZ/+Y8i45eOf+ARvfO81NvoRzWmFlYIsW5CGA3QlqOopnopZzmp0LXjuI88wnU55cO8xg0GXNE05PTtDa0NRVKv0pqKTdqmbirouCUJ/1bCy+rsjiaOea7XpBiANRVWgCkvod7BWorwPoOq6tQjbYlpB5F0ga47peX3mtsXoiGWtuXRtyI3rH8KyZPPCCQdnt4hCB20eHyl+7wvvcO89TRR2ETi2ZxIGGN/Q7cacnpywubFDVRYrQHqBJxRFlROGsRtClI+hpancIDIfZyS+5sJGTFZpFnmAGmaI1JCVDTf2rrD/7kNOHxzSGwX0kph5XgEBmJAgcAelqpAkqc+nf+wjvPnm95hNaoS0tFpjDMRhRFUV5IWDxTsAt12jhM4B5O1qPf6DlAmsRQko8+wHbAp92tolmeu6+WDVJlizFltborwQ3TbEcYLyPUBTFEtabel7u1y7CbfvvI1uEoabIeXSMUvb0ud4mhPHM8D53HzlYW1DUy/c6jIVjPo7zKuWI1HT6wd8+KktionieHIHrV3TUF0ZEIYwDMnzBiENQSAxQhPFEk8WSFvTTAWhEjR1yfvvLhBey3TilDWjG7CWre2QwI9ZLueEMuTSlW2yqibPNJ//t48YDbcwpqDVltZUCCXR2lUPtm1LVbb4KqLINcORwhiFQBGHEdZkCGHwfB8pFV4I2bIAYUhSD6kUrahIe5LD48cE3ojtizA5naN1n6ouEMrVxs4ncOmmZnYE7UIRehFtp8JYRdMqhp2UKhd4UYsRCYtiiWglViv8oAK/hTbk8Owunu0yPhkTJ7hQWWhpDgdMZgs+/HKPa1f7/M6vfZ9nP3KN779+QFNLrr14gdHWJoqYF196jiefusLXv/ZtvvfGHT760YtEXfi937vNd75zyNb2Db7wxS+TV8ekSYdyWeCjeHD/gH6/SydJUb7EGM3h4QGtVkh8/j+0/WeQZml6nold532PP5//vrSVWb6ruqt9T/dMj8fMACDcwIPkgmaXIBeAdimKIiJEUqTE1UohkSEtdylaQVy6JZcAQRAECcKO65me6Z72Xd1VXd6kN5//jj/nPa9+nJwmfylGEauKyKh/GZVVlSef8zz3fV1KF/XQqNSHZII0rdWEdbQlpBH0qYocqK82QdBkd3cLx7FR1QLDCRChS1WVxKmN5Wzw//5H/w3ZLMKyNO2gy/zwiCILcISHZ5XoqkJamjyx8FxBFg1ZHBrMSgs5MJFIxMkzuEwL+q0Ow8mURtPHNAWz2YIyqzCwkVaFKkz29yqazbrQWZ6ULqN5he2brC6dZrDqE0cp9+494OzmefYP70BVsbLU5eB4SLfXIElzquL/D6aczqBJJTXZUYg/WMJrxAxHE/rdASmKKmsQFHMibaPtFF0YpJmF55hUuqCoNCQlqajQ2kQKEIZJoSTSTvm3/2zCH/rTLoZvUnptmpZJZcTEqcRwwZddOut90sim13QYHu7Q1wbpgaKauPjtnOOtEtN02Lm94Bf+Vz9EuDjmwdYxQVeTTjUqb2E1TF787Dl+43/6gKZnUVkl5WRBmAwpex2WAouta9vcvxrRanro5hbu9oROwyedV2RqgeWamIaBaTkUWYk2Sgzt48qK3EigqrBySW6aDOcLot99jTjM0BfPcPz+Q37ur/wCt27c4eFv/huSfp/7hU3Z7XB96zUuXDjFaqtLOCkJK01pNbCWDZxpAobCRRATogQswhzbMpAahFt7TtdPrZHnKXt7OwjTwBQO0pAYjqZStXoqLwqQFdow0KqW0YyjAk+5FHlKqQuEU/NNq1LhOSaGaWBKjzwOIZ5gWQ2y3OTiUpef/+lnef/Q5dHeGSp7yvu3Dlhe6TBPY7a3My5uNBBI8F3SccTnv/gonbNPklGw9aBg/M2Eyf5rGNJEGB20kTK1QmJlsty/gOEuc/3tCac3zhOUI9KjO2ye7TCZBkh3Cs2MK08b3H3TQytBHM0x/Qqv5fBwd4LjW+SWR5FqFukIaZqUluSrL1+jv95jeHjAz/0f/zzD+9fp9QQHkzk3dwsM1cSWAww3pX+myd17D/Cbzsl5TpNME9rtLp4dUTZdhsMprmtjuQ6T45CltT6DXsCDBw9YiBbd3KKSgneqBc+0G7hTRdOAorLoeEu0WxMCO2DruEJYCksVOLaNykZYAgzTZ+P8Zfb2X2FJeuwlFlYZINWci4/FjA9T4t2KtcGTGPMdlgeP8Zf/D3+VX/j5n2Y0mjCdJLQaKycIovqlRBo2nudimxYYFUmiKZUgmiS0u60Tw06O0wjwhaTMK8rIpCwtbMsm6LZOQOkZ80XKZz75PIvZgveuvYf0JGXuUqoI1zHYvvsOL1GyvHGZJ58+h902GR6GeD3I0ilXHx5y9Jtf48Xv/QTzXoNLjzzG4Z1vMjwlMWIwpYnvWBTOHNNqkqSK7sBgOsw5Ot5iMlI4ToM0y5CW4MyFNnvbC+JI4Xp1635ldcBoskc+rl3fpQhxzSaTKEJKn8ViVkOXF3OW+6sIfFyrjSEXgMD6T2LnVeFSZJrFBJrtipX1NYwsBHVMmTmUuiBd+Lz7xgOsxhiETZUdE4UDHCmodMJstM1o/xCnkeD4DpIa6G4YIGUNCS9VxiIMaTWXUSo8USO6VGWFrkqkZVNmCikdsCSLOMKcbdESy0SZZO2Uz/FwD7fwMRuKq+/dQLQbrD92mixP6K52YTpn786IbsMlUjZKa4w8o0xsHFvw/IsXePXrbxFHDlVpIIVEG2W9LQkz0iSvLSNVxgnqlqIoahxRlpBmWb0BzhVa189+pVT9Z09ifN/HMk2yNEYoTjKTJaZps1gsaLd7pGkMWpDnCtd1aLZbzMOINC8xpIOQmqPFEVeef5Io/oDRXokrKkwkWhXMFjM6QRfbqZV76BxV5TXBAhPTNmlaBt/+6haVpXjqxQZbtxbcvDnHpEFROEijIMksTJ1hyoJFKXH9DoFtoYySeTokmdtYnmCw7JOkCqHgYH+PoOWxutZgNJxh2ZLllS47D2ckWUWu6mFTxybDwzlRWrG06dMdVAwPFgReim02KVQK2kKpkLLKSBJFtxegypjN3llyXXCwt09g2xRqgZCKojDRpmYRV6z3POxGwHgSIT0QsoDCodtaZdB0ee+De5RlQb81IEwXeCsmB9sm0srQMZRlE6uhKcM2cT7FaYk6K70fc/7sRXa3D5jFIUErYNDsMJ+NaglBIVA6oNkskRjoasrOboo2ZM1xriS2NeGJj2xw7lKbb718lY9/5ikuX1nmwqVHOHv+NKbrMh/XRImHu8d8+UtvcO3at1EKfM/CsVqE0YRcGQjpYZgRZ9ZPYyJYXV0lLUIcfPJsTpFrLO0SxxGGOJE5GCW2qKMtg5UOIJlP5piWQRjGWDa4VguqCFMUQG3JCVoVjaZLmuZUStL0W0Q6oSoc8mjIX/yzP4vjWPR7XVrNAYvFjGSRYZkZucpJhxWeG6CFpO0PUFbO2sBjOi6YHymsRQhCosombiPF1R2spsIPLfI0ZhHFtIIOpx4dkMuCo50pg16L4eER2UKjlUHQcChKkzMbawgx58HWAU7zHI888QwPbu+wO9zCdiv6yw2OJ3ukqY2XVfiBzcFu9L/8QFnR5e2vfZPd+xMuPvUID3dHdCyNjiWd0z2KYsF4sosbOERhTqUVZQlOKyCOau+pqjSm1EhTfpjFK1WFbUvCRcGtq5JOfxVJhnAEOrMxbAddmvieIFtAmU4xB6fwAxe7qbnktHnp/VfJTZvPfO4c3/qd23z+J8/x6Pet8T//d/dxlEZPDQzDw7YLxsOM7b0Rpy43efj+Ea6tGY0atFXFw6N36Xc0jUaLxz5/nquv7RA4HoEDw8MFVV7z0wwElT5RNgkDYRiUVYipJFLULTLH80jLBVU+ZqUl8Ltn6V05zyd+/Ee5+NizvPH77+F5YKkJwwlMdMRTzz7KxuYqr758h9kiZXWzxXJ/lflUUfkLRqMRZ9aXyR42KeNjvEaJLS1UZJIkKf3eGp7nkMQ5VSWYT6coBUamCZo23a5HHMf1FrkyMaWFoSuEKGmZXcbHY0xTIpQNQlGicE2bOMlQYkavFdDq96kSE8OB7eNdrrzwGZYuPcm6SHlHvg2mxGt69HobLHaO0HoPC0GWTVDHET/6U5dQrs/Occj9a0eMb09RaLq9gFKVJMkuZZbSXemiLJeFGPPpz5znaH7M++/e57mPPMVk8pBYxFRBxOqFugk93AsYDNrcun6XtUfaTGYK6YLlTbFtUImH3YzR0ibLEo73QuI4pdfpk8wE5ehLrK90WcQmUVxwbsNCRRXj8X1SGdHwfHqrDeI0YWO5y5kL57mzs4fKpoy2DYRMaDR8vIZHnuf0lnq1kxeFF/Qwo4hIW4gqQZhNrk0Nso7kVGedYjdhcPoJjg5uce29begpRJkRdGzspsGtW5LlnqCvY9770jtYi5TiUg9LhDSli7PcxI0meAFsnpJMRtfRleL2+2/wI1/8ITwp8Js+aZwwn08RVkVl1Gfu76j2tHZJkoRWo40qKno9h7IqyYsU33fJdIHrm6iyxG16pOGCKM5oBB0sS9JuBBwfTXl4cB+XJqSSUysdlKGZRJrv/8EX+dV/9TKPPrIOIqPZe4bbb98kzSdgtbFpceftt4gGLr//73+Nztoyn356meOqIDnyMITCkjZlbDBPEq48fpGD/RHtDsxHIeNhiq5MpFmhK5fjoxmzqXmCrjHIshzfaxNFEfNpgSUb2IHCtZpkaYWdm8xnBzz9zHPcu3ePtJxiSoVjW3zq0x9jvHeLWzfvIP4TEURllaSqItcVepbh6CWUWeI1mpSZABWTF/VmXGQlS0uP0Fvp8/7777N6psHWHYHvlVRVPWgFQYP5PEQphWVLjo6GCEMwPJ7y9FPPsbszxLQ8mg2X2WxaF2NOyi1SSoQhSYsSSwrCRcJgxebiZpOzl9Z5+etb9Nec+krQTRgN56SGQ3t5mbKqWB206bkd7j68gesEVEoS5Qam7RAtYoK2zcr6Jndu7IOoAflCgudalHmJe2JPWixyoMaKlUVFYRWAwDYtqkrjOC5Jkn1o0TEM40O3dW0SE0Bth5GyLgFqrWg0PCy7Yjwt0SJGuiWlETGaTvBct2bmOhb97oDbH+zzuc9+kW998xsc7ka4Toc4HdMZBLh2wHR2TBLntFod4qT6cMurqhylHJaXGxyNZ+zcTmh7XRbFnDItgYKsyBBYGFaOYVlUiSRWc7Rh43gmOm+gqhSdO1QssYgOqbSB7TjoImc2BWnWebu97ZgsrfB8QZkYqNLDsgsWUYTlNcnzFMcT2G5R8wXjiIqKZsuiTCRpXL9wZllBNNOIKkWbM3oDjyidoqoAvwFde5Pd7V08mUDR4uh4jNOqG/X9wYCjwyGHR0esLp3jv/6lP8y197/C7esHND0baRq0BwVF7LCy1mNra06n6yNFSl4JLMPG8TK0UjzYuY9pOAgMnEqiSlCGhetKCp1jGBFF1mR59TSWlfNf/vz3oki4//AByQKKPOPClXNsnD3Dpz7106SJ5p133uLe/R3efvsPmM+nDHfD2mYg6gJhr9euOxtWiGkmeIZNtyWIs5AigePREE1EVtj4/oCyiMlihWEUxHFUbx5PWLO61Jhmfcn7whe+n42NDX757/89skRiSl2bmLwabVWmEgwTQ2oODvbptjq1ktmtox6e530IRnddlzgOATgYbqGKFGlKtGGhNViORV4WWKZJITWrp9eQnSOC9SHnimXefWULszBxrJDpXBM0FtiRpN13iFKbMo4o1JT53CYtC5SouLe3jxQCNY7wbBe31cVvKEKZEo5i0lxxsH+fnbs3MaSHyECZFapIyFKBKRzCqaLddmi0v/ua93c9UH77d7/C0cExnWaD0f17rHRrL2S306W/0iTrPkYy3GU+mkDlIm2TqlRkcVLn9lyLPK+RM1LWEEoB6FJj2QbzsWLnjqK/2sKpCoxWwdFDgekGNP2Ao3iCE8/JqgYTf4QfaMrI5LU7HyDcmKarOfvCU/Qvr3Pof5Nf/rdfxje9+orjWRhxSNNyyMLasPBzP/dD/Ld/4Z8iTbBdzd3Xjrh4pcfVbzyg2Wly/uOPsHT6FEtrPr/y97+FUSmqokIa9TkI48Q2Q336NQwDYZgn9gZBlqfYgU9SCh6MY049ucTGqSu88fI1/sU//+c0i5jTTcFxEeB0FR0hOdxeMN4J8SiIywVG1ET6Eqmm5ExpdAomk2PyvOTpJ15ge/82YRYS6gSZ17aO0XgPA5vVlT6VTjg+miMNl2bgY1oGaRyxurbC1s4xZVXbRgLfRUVzHK8ip8RAY+oK3zEpqwLZ9GmgUGnK6DBDNnt0l+HS2WW28wO+cuMmiwjamwHFTGFkOVfffZu1zUcIFzZVETLOIp54aoWP/kCH3/w3b/LGlx8yWHbpLwVUrsR3TPZ2xgRtidINbM9EepppnPK9jz/J0mzEnbv3uXN/C2nZCKlYO2PgWT2iWYPD3QVluoMXNJlOEspKEi0MhO8wGY/rIn6qWV1dJa+OSbMEwwazkXGqNeDOBwsO9nP6gx6T+ZBGt8Wt/X3iPGbz7BLt5QWGY2KEp9kejkjvPsDIYpIStFtQlRWm4538sMyxbYMg8NAGxJlCmYKqzPHSgvG9Mb21DWy9zcpKl/vS5oWPfozf/u1t5mFG14fnnl7hnQ8OOZ7mrDYH9D0DES7wgpTv/+R5fv1qiKEC5m5Ij5wg9djXBeHcpOue44t/9HP88j/+ezgtDzKJ4wQEAYzGMxq+g2mZSENg2/UDVFop0lLE6QTXblOqBLSFY3VIM0XpODh2m1kxZT4+5tFHuwyHMYvxgjRR+IFNo+kRzxtceuIsWbLNC898jHOPtdnaWXD20U1++PsCXnz+E7z+5qvcv3+Nm99+G2018aTFfDpHWjnrGxeIsozZbMzDh4q49EE5vPixT/DWW+8SZxMCv8XW1gOkaXDv3hy/aRHHJkpnCKDSCsuuVaSOW7ewMSWj0YxLj64htOD+nRHPfnwDz7P45le3uXD2NKtrff78n/8lfuEX/hxlkjEPFwyPYg6PpkSRTVg6CPkfIb9hEuNZdVTEsDQH4yHtTkCaQhYnXLx8Bk3B3v5xrWwTGa9/7QYf+dh5Vk5Jtu5c5xOf/QjvvXWH4+MFq2t9wnBOpWu4d5JEuI6DEPXQ1e8rtGjh2ILj4QG+XzfPy6LEdRrkRYVWKbYjmE8T4kVIeyB45+03Tk7+ECtJkqYs5mO63Q6dvsFiEZNri8JTrJ9f5uheCFVWMwqtuuU5Pp7i+42aFJDXqB9BgZS1C1opRRjOEaJeFuRZrcRLkxzLrk/8plHbTWyrzqatr68TJ/UP2nC+OBk6K8qiRgilaUpRFKytrSJkxWC5x/7xPYJGgDAChscxnXafZrPJcHKE6/sYRkaRKx7czJiOKpptk0U4RciAOMxJxZy8yOkPOuRFSrNlE4U5ZVlhWR5HhxMabUnTa2CVDXReofOSPMkxbUlVgialveJjSIv5PMMwNZ1T9QDVaxfEYUEWlRxu3UMpRaPVxWp7NQ4t0ViOoKgiHCcgLyKq0sYwKqSMmMZmHR8wCpSCcFpQKYn0TCy3RAPzWYy0XCxTUpYZhqho9ySmcwSZRiUurY5DnMcsb5xjcWRhWpp80WBsjHCtJnk6p7+yzJnNxyjKt6iqkv3jHb72dcXqeguvkRDNFeGipN11Sb2YKEtxfQVGgWVXUFmgLOJwztIpl0ZTsvUgpN1sUpUFcZ5SaYPSyFBC03Ta5HnG1t5t8iSn0gPOXrjM5Sc+SZzOidMRX/naDdLwA7JsxPhoF4FFkpQ89elT9M9nlHONlILKqNmQdSZas77RYHcrQkrJfFYipM3aps/4OKTKLeI0YbYYg64QhouUAs+30JUkT3MqQBguRVkhLfj6Sy/jWAaLeUTgBigdY1neyVLMwg+82vhU5QRBwPHxCOvke+H4+BDbdvG8gMVigeu6NJtt5vMpqpQYokVVligKbNvFlBZa1PnMKD7k4Z0St1uS5Aa9Zo7tS4QOKKoFrnDItMaqUipDkKR1uSuNJGMi2l2DRqeDLWA+rbh8ZY3+csIrL+3R9BvMh4f86B/7QZa6Dk5T0pAev/Effpfb70V89Kk1xotj7lxVtPsVs+OEK1ee4+p7373L+7seKFVRsXHpIsPjB9i5x4ufeoai2aUoS3bvPKQ9aOG3lpiPJpjSQZVZjQqSgrKKcW0PaZmossI8AWvX9X9dw3ZNm3BRUFgFTtDisSdDtu4eYNvHGFYPMZFUhk/QhOc/BYZrsuxfYvuwzRu/AWXL5Ztfu89mIydYX2Jr9x7hsYPn2lTJnNRocqhi3MDi4HbG7WcKPvYjn8bI5zTPLXGw95Brh0PWnjtPt1fwcH+XP/6Hf47XX79Bu3OdeBRjnLRCK60/VI0ZojZ1CCnqPF1eO/eEAMO0Eb1l7gwfcvfGW9w42iNQNrEh6NoF/fYS17ZLBnJBlluUhUEeFagypt1usYhC5tmc849scME9z8PdW+wvJixf6JCgQFlko4xWUyKERxiGBA0HpWA6HWM7EkvaeG7AeLLAlALDcFDKQmlJUVYsry6jdI7sVwRhTjcTKMvjYDzGlBVpVuEaNu5SgzCNWW/Z2OtNjmb7lLsm21nE9Xfvog2fP/EnfwTbLpkpg+ztI6LJFmWRUjS7iMSjd2Wd2zsRH3/uE+zeKrFXNMcPJ/gLg91ZjBN0sKwWVCPyQpPkBafOSV597V1ef+l9nMBEdlK8oKLV7FLMmkznJY5VoKqUZuMU0liwKFKagUuRag4ODhDSpt8VHB2NGJoZlahIijFPPP0Mu3v3kOUqq+fPcHD3DtF8n41Tp/j2a/cxlM1Sc4nh7QKjWOWRyy0+eHAXe5oR7me0VyXR3gK/18BpWcxGGZUSOI5HlmVU1F3Ydq+HzmNMPE5dCji/+iQ3tu+ipkPe/fp9+pee5OWvfY0Ht97Ga3sE2uL+UUYqTGRscPkxxe4s5U5sc2nV473ExK98htE+fq6Zh12OTqXsz+tT/Kzc54P793jmo48zjh+SRx6zaURZQqNhU+kCrS0c3yaOagNPWRm4ThMpCsoyOcEG1XmtRrtFVFTkWYau6nb3zRujmlVZZqysLZOWEck84cHkBmV6zM//2R9j627EYm5iM+CDa3tcurDBe+98wPmzTzNf7PD1f/eA7unzhNNjojjio59eJ5cuZy49ys0b19mbpISLhMsXrjAcHSDcHRqex3iY4gWaNIOzZx5jNh2ymIdYdoWh22hZkKcZjpmTJi6OK2pvfcfg7dfvsLxmcf6SxeHukMHSKZSO+ez3/yQ/8RNf5C/+xf+KH/qxz/Jrv/qrZEmF55pkoeLhw4eE+RGd1sqHz8QzZ9c42Nqj6XVIqjFu2wERkuUZaxtnCRpt4jgliYasrp3CdZZom3vsXh9x460QXRaUiw6SAVF4yIMHDwh8h0ajz+rqKm+++Ra2LRHC5v6D2xwdjuuNh1e3wMNwfrKZFHXWzTAIPJ8sqz3Q+3t7rGz0aLlLVFlUbx0nI4Qq6XZbPPL0Ixwd71NpweHOBD9osLxygaJ3yGh4DynkCapJ4WgHszJP2IVZPaSLCn1SYlCqOOGRVvi+jxDfQQEVdHs+liUpigKtDaqTcs53Wt9lXnyYr5Qn9/Isy9BG/b00D0MwCipauL6F6zmkaUxRJLQ7fs38bLQpFYz2Clodl5df/U3aHZ/hsWRz4yJmcMRsWjIZLk7oBxFSmCRxim27KFWRJXPanR7Syhn0mqhSMJuOkZIayZMaaCza3YA0F8ySMa12gO23cLoJqhS4yieZGfQHXcJwgRQ+gdvmeLhPVRU1piWqGCxdYDi5R3/QJmhWxAuH2axC2hVaSJoNnzAeooBK5DS6y8RRRrZQmE6KadWKSq1c2n2F45Wkc8HGAB5sRSxSTWWabO/s4bklwnYQXowuDaRV0gp8xsMJ29tfp7Msak1frBjNRiBc7tyAL3zhY+zsbLGzf4uG32W2MHGcAFP4mKIiUfuUWUmeuXhNk91pjuNBnGiMIGG5ZzDe0yjpoTNNFBa4VoprNnEDya1b3+KDW9/id34HQCBdiV0VCAOcwGBlvYVAc3ScYBou4x2fJMrxWyZFZiBMg3bHY/PsgFzPSNIYz4FKgMpMjg4WSCyiNGV5YCNFl9lkgmFUlGWKLqEsjLpkdoLiolJYpsXRwR6mKQgCh7IoMMxatCAIWF5qUhYxrmcQp1CVJZ7noKvaG/8dt32W1QW+un1fKxwdS1EpSV7lWI6u2+VpjO1I+ksNtrYjGtYxbfs0+7cPSN2yvlzkIwolcK2cMoHBagPh2ihVkRxXrJze5PS5ip2jA46PdqHysG1Jd2nAcHzA48+eRZolt67H9C5e4cc+80n+9P/mTzPbn2PmLu5axdVXRvzS//2n+a3f+AbvvfmQ/maLr730DhjZdz1QftdpS9OxadgCT9q4jQArCLh59T6vvvQK779/j5e+8hKTWUzgBFAqMCRJFFPkGr8R1AwybdRtvBPyfKWNE4aTgSEK8mJIPJ1z8/4u0+M5v/BXTrE86DI6imi3PI5nU9zWkMA7x3P9F9gbDmmfGfA9P/Ui49kWa5sdvufHv8gTT38v/uwSxsxBaYhVCyUqfKPOtuztDflXf/9XSI0xnDZ4/e2v8c6373P0cI4nK6ZDh/1xzN/6R/+QL736ZZymRZHXCrkKDeLEoSpqWK8h66LLd6DPUkrKSmE7guFkD2/Zx2nWOaZ78h7TVsQHM7i5qHB8D3SPeFHSbffY3NzEdTvEecVCRTRX+9w/HLI9WVCkAb5TMp0tuLd1Hd8XSKPFIpP0lyWWbdBq9vHdLlXpEC1KirICmWE7AUVhkCaa6x88wJCC7lKbg/ERYZrhNDvkwiTKcsqGi7faxbVsWg0HoQom8ylJqqkMKMoQSwQsRikUGrOqHyi/+Qff4NqW4t4w5mM/ssazz1+AsKKRacZyzGwPrr22w7ba4eIzPvP7EZXhomyTjrdGEaeE0Ra6VCTzhMsXH+F4r+S9t24wWFUY1HgXVcJwdMjWgxmUEt91iNMJcXZISYjt1Jq7JA6RRh2uNn2XjfObmOYy8dzFNizuvb9PduiTjibsfnCTjm/T7vSZDeecai7z6WdO0XISNIowTAlLj8lBRjDoEnQbpPOK/vkmeVqcvLVDUdRcSt9rkWUlvtcAKiJMiqLgrau3eeXGAzJ7QdMxWG5COZ8zHEXkGZjdglwrFtIltHu4nsnBQjPNJZ5XsD9L0PYpfvgnvpdBq0nh5oRiwav3Q6KFDaXN6soKt2/fZGcnZP9WhN/pkMQpUghMYSC0Aq2oVIEwNaqqoJQUObiejeMptPJZWVvl3MVzLBYzbG3RsJbJoxzL1GycOk1elhhWyanTm3zmUy+SpAWdvo8bLPPNl7dJSshFxtH8mE6rSyEKWr2SW7df49d+9bfpn1ojHT5AuG2ef+Zpnnjsac6dvsy7b1+lTBO+8D1/iM3T54nCI/YO7hD4TVTlYLkWrU6NzkriFMuRKJVg4JIXM9ygpN3q1OBfM8Z3A4oiQmIReA06bQ8pNKooOT48wHct/sHf/pv89f/mr5PNDb76e9/Et9o8feUjvPDsU7zx7d/HzSdcaC+z6gcffpxdeQLTCdBOSbxwMOIKxzGwTYs0KdnZ3Wc4PuLcpTM8/sRH6C+d49mPrDI4NQZKfuAHv5+9g1tsbd2g3bJII004DxkOJ9y/v4XneSdGG8XR0SGaHMupsB3J6HgIlURo80OAuyFKVCnwvS6O41AUKR+8exupA5YHfcLZlDKvneBXHr9UWzDsHstLp3juo49w+mKDNDHonVqls9YCIIlykiKnEgpbGB+atKSUSGFR5KqOARkGjlcXnOI4RFUFSpfYLiRJfHIGdE+GT/Uh3DxNY1zfxbRqFWOpFeXJcOq6LqXShHFKqzMgySpajTZalRgoWu0mujIpc0mWKvI8R1ol0TylN/BOAPqK2WKHJ5/rU6oUSzYwpYttmyAKmi33RGloE/g9ml2XZrNBFEVMJ0OqqiTLy9oZ78C5c5t1QUi0WF9bxwk0eRHXXNgs5WA3ZTJUHO7PsUyXaDFnd+8hRlUhKoVnmhiVTTgf4Vhdur0+puUxXUxIY48Lj2xgVApdxXhOzRm89NgZ8irFaUsqCZYrcRvgBid+bGFTKkUpcoaLErfpIiRYBpTzgOMtD8e1kbYG7WGIvP67cArWNgKCIGAR5hhGQBTmjEYZg42Yr730Orqy0EWPaK5IkggvUHQHkv3jXTq9Lr1+gzxPCcOQShdUuYlhZfzh/+r7eOIjazRMF9MC3ykJfEhKyCoDK/Bodxos9RqcXuuxPuix2moQeAOaXgNDtYjTkvGwoNlf45lPPcMf+/n/guc/+QLzOEF6DkoWYOeMZsfs7gxptnww6wWVZecksUJpWN+0SbOIcJaAdvC8mnIihc2FR86T5TkbGxs8cmmDOKmtQI5bD4Cq0hiOpkRw5dkneeq5R4izBdoAadUluryslaNZUXyoDf2PjF8JVMxmk/p7xoRK13gtrSqonBNsVE6WG/Q2mvSW+8T5CGnCU09cplIRpJKW45CnBoaGjzz9/fyVv/Y3+Kk/+gmW1m2m8R6VaPLYlXXWNjcYnHHo9Ru8+vuvcevVfW6/e8hsXm8aJ0c7/Mq//LtsNtf5pV/6ImQRZy6usnbB4Dd/9QZCKnyvy2KaIWVGq/fdY4MM/Z9kgv6//br8yae1RcJslKNkQXdliTKXhIsFGkFRpegkoUtKlkJhQDoP8ZsOhl2SpwW25ZHn5QmioiLLqVtLZUmeJ4RRg/56D2ulDWHFpWdPURTbzCcg44xnv2+VxBiy2vwYneB5/tVv/C2CvMXP/td/hG98/au88coNjMBHeyEff/LjVEclv//vf4tW10OHc2wzoNIKoQ2ka+Cca7FyucHw7pS27nEQ5+ztjTEtQcu1efTiKR4e3mW85ZJPJyfMs9r6Y+gT9zGKSutaTYdBnqcI00A6LlFRkhcaQxnkVY4b9Fi73CItIo5vhlR5Sru9UhsXzJRec43JeMyzzz3K3sE+42mEdCw6A5eiWtCzl3n22TV+70tvIqqKZfcsxwe7aHuOacFiahD4bebz6ER3pihLhWkXmJaDymsHr3RNlFDYTRdVVeSqopjlVCY89+zTTK2K8e4WV9wO49mUMIrBzHCdNTJhMIqmZKqi0ZLkaYqh+5RxjiKh1bFodLpceLbD1q2E566c4fTKZbJZj1ujV8nykN3DY555Zpmb39rngxt7NDuK6czF9QxMC6RwSPOS7kCydtolDEOSuYvtaU6dXuHOrX2ieIxrdQg8n8lwRNBpsLlpcufGHOnC8WFMYDlobZCpCtfzOXuhw97WHMuxCcOYlcEKUrtQaj723BWyPOWtd2/QX20zG01Y7gZYZp9plLE/2kYmBZuPtni4t0N70GR/G2wVM09DLFPjOm2U0kRhiu00AZjNRrXj1TYoRYGODQxpUVkxvn2aS2sF5UwzWjTYPbpJ4Nk8vWKwF2puuT7BLMUIbIpMQJGSzhSf/siTfPLyFf7xb30V11ZE4RGpUiz7HcK0wMDGbfsIM8bJUkrbJSusE7RUhlYZRa7xmjYVijQ2KYsE02wyWDFxPJcLm5/jjXe/gevUG69G54j5KODwYEq716CqDOaLMYYBhvahSLFcD0wD1+4znxzwmU9/khc+tcn+cJuG06LKBGmesUhj/sOvfI1Hn3mecjLhB/7of86/+Ef/gDgOmc0iHnv0DKPDI06fP8c0nNLrNxkezsEwOTg+wLJhY6NFtAjx3A7j6S6u3WQ2qWgv5Zw9/yh3b+0zPBrS77uosm5xV6ViOimwTJvlNQvTNghnJnkR0Wi0KQuD6TjjwoVHePYjT3P69BlAcfZCj/Rwh1kkuXzlyQ+fif/d3/xlbt79FhsbAbvbFbZb4bkli4VEKQu/aVIZFR//xIt8/eWvMp+lrHZX+OxnP8f97Wt0GhfZPf4WZblgtGdz9swK71+7Tas5YGllmbX1Jd555y1WVtYYDoekSYjr1Vy8JC7qH0RFfSpDQKVrj7yuJJVRl2NGxxmXH3ucL/zgZ3nr6hvkOmU2jDh/+iyHx0O290Y88+wVTp9Z5rU33kAU4LZsLlxc5yu/+Q2K0OF7f+RRomiOrT3euXqH8SRGijqvaqCQhiTJ6pOwNGperjagyGu3sDQEruvgeR5FURGFCUIIgoZ3YsOpTniVJkrVA7SUEtt1abb7PNh6iGkKHMfClPU1azZPMKWDFFa97bEsHMcmSyJ0VZeEpAgImpr5tKI90OSZxnUCwmhWO9MrQeCZKFWBFnS6TZIiwqhKkvkJPN6qKFROlla4noHQmjjW+L1l1jYGHB4+QFcCO2gTpbvkqYUtTSglKq83UFLU5dOyzJEqodINKiNm+dQqiAVlWaFUwWJq4dma8ShibcOjAqrKZfPMBh/cucWpMwPKsGB0PMb1THzPYjSMyFOTRlNiuglF4VGmDp6fo5KASmUkixLLBscrySIbrBA3sJGuACnQhsNsPidLLISfUkQNuu0GSi3Q+UkWNk/RlU2r1cJrCIbDEboIMI2UMlVYTkBRpag0pzfYYPWjPZKjLfbemNM+7XG8H0EBTuCjSTGsChMHXdZ4QSGdGn8lJI4u0dpBy5IyFjz28XXccyaPnv0JnPk2f+/v/SMsUyJMhS1FnctdaBwPitQmL2eo0qQzsJguEpb6PnmUUaYWpmmTFxEGJpcuPYVpSd5+6yovvPACd+9d53B/hOc2USRIccJ5NnLiDH7u5/8Uh/vvcvXN+wwPIlynQZzMWT21yvb2NpYw0NrgzJkz7O3t1f+HbZv5fP4h4D9PFZZlIKRC46AKG7epwEwI5xWf/uwXODi6w63b9zh38QzHh3PSOMd2Z6wu93FaLvdvjVnuX+bs5Rf4wvdv8uYrX+L1d96nkh6f/Pwa86lDp9PgaGePaLpFPOywvNniK19+D6o2Fy5v8vSnJCLa5MLj57n27jt85ZW3cE2L+d4EIQSu7bN+2iHLQ3buOiTjmfHdzInf9cn78aef4vr1N5lmc5JoTpJJ+oM20aLkwqVNtvb3MV0wioqsnJFVNpYpydKKbsfBoEIYAlGCpg6TA+gT765tu5hhiGm28HxBnBt88M4Oaxsez764xDvv7TC8XbD6xHn+w298iYd3/yWONPkjP/cMX/39q7z28n1QEUYxwx07eE9ucZy3sUqNL10SW6MKRaxTguY6f+YX/zi/9ge/w2h8gCdXGGyc5b2vvY0vJfNZTBYZPPXsgKcHG/zB1ZeQUtabnBO+mjj5XRuCUitkJTGEQoga56MNE8e06wegZVGVKblZMrk9BwSWGeDaGiFjTEuTYXO8GGLZBlffv4YUFo7tkCUxB/eHbJ7pUCjF3rTAXaoHqfnBglaQcnHJ5n7SJ5yOSLMQKQ0MUWKZABJdVRgaMEpc7yS4q2tNXbPdZDaf43TaCC3Z3hsyz+ZUOiNeWaeKY0ynIFUJsRqxqATCsBm0BoTlBLuqMKoajHz6ylk0JRsrXdy8oJgsUFXBQXZMq7PCG//uHvPhXXorLV4Z1RuMYNXBbypkUzPcFywvC6L4mEZ7GW3HZMrGkB2EFTGdzbD2WkjdIE+P6XcMJuNjtNQ0gi4Nd4ki/YBK5HhWH1umCLPAxsE0NfG0wHU0jj9nNkuYTWzm45K/+Bf+T7iO4O/893+XvEoZLC3z+JWP8aXfeYmVgUmYzshnBY212rHrywGOZRC0DrF0Fw7h4qNXODg4YB6PWN/ss7c7RgqHwPdBCxZZjmF7tDyL3DLIsjmOM2J7OyNLJE880iefQYOMFi5GAA/TErlqIKoIEgPXXiM4OyG6+ya//v41EuliOusIsc96x0OUDZpOweH4kI3eJpnKKBxFOBQYnkDYNioPcUwDoxS4hkElIK8UtpSYpkuSROTKZmd/68RpWwO0x0d1eccJAJEQJyXrpzaI4hmzyYLAb6AMQc4cVSgsp+KrX/0K9+5f4rFnV8jaMVfOXiBTJjvjXX7xz/1pvv3N6xwZgt/8tV9hPJnQ6TaQVs50Pienot3x2D24z/7wCN9wOfdYi8kCtJZs35siBeSNfdZXLrGztYuQFZUyuX51n7WNJpaE7ftzgkaK5Zgo5fKf/exP8c677/Pg4V1MM8exfUzT4vBwwtJaj8qK2Bvd5sFvXyccZ/zoT/0x0krwG7/yrzmezTh7/tkPn4nPPHGec2cDrl99D2nuoIw+igWaEtc3ybOM0Shi5/4RP/L9P85v/Nt/RSHm/OZv/R6mXeB673H61Dl0vIpWN9l68JBux8eyBbs7W6TZjF6vy2w2I1rUbdMyywmLEtd1sT2X0WSBtCySJKkVcTqmVKIuC2LQafvcvvkBg+Uu49mElZUl2pvL4DiYgWDjok9jOeONd79G02+zvrrK8TTkxt1tOhsDZjsLXNMkM2AeRR/yJauqwqgqDCpMuy7p5HmO73q1UaSoTmxKdUEnSVIaQYAWGtexSNOU+TTHduuN5XcMXkmW10B6U5KrAmlB0ApwHKveQJqaRRiCNFAiwm069JpNwlCRpSVukCKFg+1oHNsmzRb0Bz3m8zGe7xKflHHyJD8pPxmUcYnArk1RmORFjZlSqiCJNI4r8VwPKoWUFa5no5I5V799xA//5A+wt7fH+1dv0+wGuIag0+2TK5Pj8YRGYEBZUs1TyDNyQ2BYIcI0CDpTbn1Q6wJts0Wre0w0lwjpkeWC2SLCdSqORzuUuSKe13lUy6/zwqUu6PRdJkOFZQ+wTIkyxmRRBUUCIqbIXCpbsP5YTKu5xM03ZsQpuA0Tw/CYTiK0kWLZFm7bochNtEwJFwVNPwArp8xEDRavTBazMUurpxnOjijyhDxSNP0OGBGuY+I0LIpkzv7NBZ/+Q0/xf/vL/yV/6Zf+EptnT/Pk44L3vnmHJGtSYZBkKVKUZJmuM90OGKpE5AaFVTHP5wjlkTGhSCK+9do/Ye/tYwzDQZoSQ2hms5z+ckWuMhpOQKkcTp1qc7gf0+x62L7HaDRm0DGZJQVVkXJq/Rw/8zN/gv39Q77y0peJkpj3r73LYh6e2KlCPN+FE82wYdgYOuH1V79NEGTs7h7imT1Ms47sPXz4EANJJUxcx2RnZ4eyLGm32yeFOQPD0DSbDcblFG1ItFGQ5HOkaLN0aonzl5a4fu02X/qt3+bxJ64gtaDIcsJoymOPnyHNNNPxlPPrm3gdzTR9h5e+fJvj6VM8fblDy9C4Z5q88a2H9LuPkZcLIi2oOm3u3t8lHuU89Zk1zCJjfLDAtT7ChY+s8sZr1zm/dpFq/5tMy5hWr4s2UlZW23T7NndvChrt72qWBP5/OHm/9sYbRLMSoSsct0ceZ+zcfkA0nrK3u0WlDbQUGNIEoamERGtFUSioNKZlnICUa9XWdzyulS7rt1INvrCIFnWORjsGVAl7u4oHt0sqOSEVDaL9UwhZ8OnLl3D8Ji+9c5s3fudlTOseoTHiYFrg2w0effIp8sFdTNclTo+hyMhMFw1oVVG0TSZjB/YFjabDpTPPEU/nBGZJ2+2SpRNe+oN3eHhrgigFhhBoceJtrcoPob1CCKRlYpmavMwQlk1RGtiGg5AVvTMej3x+A7fZQicJocqIszlK58TCRGsbbQqMDHxpYpuQ5yllWbKIQ0pdErRbRHHF3njM1at3yKclWeoQd0bsGTbXjyGOMgyhT9qTBrqStNoBlU4pK0VVGlBpDFF/k5jSxRQB4TQhWYRkakYcjzk82qYnTLqmzfUHD9mbTphXBV2/i2k5mNKlMgStgUuh47psonOUViySlMPhjOvX73PtjYwf/UOf4wsvfA/vv3yAFQle+Phlfuhnv4g2fSZFwlwk9Doett/AKATNRk4yr1hdGdBp51w+9yTZ3Gbr9g5ZCrLqcLS7j1Z7NO0GWRTT8C3ajTbCnnD9xm00iiQygBmG0CymGtMU+D2HXFe025vsbZus9tZZ7rS5fL6FyPb5n/7FLzMPD3GchHu3HvDqV1+lIRLa/i4rzZLSErR6BnuLBTMxZ3urYrV/BvICr7nE7sOQSjksFgvCeEy318CgwLUNdJkQVJIqmpDmE3xRcKq5RNvv0WXA8kabpuPwvRcHfPQx6F3xmQR1EUrnNuXYxRJNQkLyhQXuEtqwGXT6nOprZOUTl5qD+QGiUnR9i9nuAyZ7kq1Fm3ljwTxN0KaogfQaGq7PxtoqtjBQac6li5c5c+oc4SLGNpsskiMUCbP5EfPZIRUWTz37DKbtoKq6gDYZx5S5REoozQJERpUK+q0ObtBjeXOFyWyXaCiZ7Fnc21uQqhnz0ZBgcJGzG2tceNTn9q0P8GVMHB6zNFhBOHDm8iq+u4JTBqgoxQnmTEZDorBAlwVeUKFLTRY77O48RFoZeVIxnSTEyYSH9/f4oS9+im7HJ0vr7X0Yh6ysnqK/tFK/8DlNoihEGh3OX7zCeBTRbTVBVYhc0ml7fPMr/4Y3X36JMp1x/tQp2sHow4+jyTUagzYvfv5jtFsWpCOq0oPKJ4srpDRoNSxGxwfcun4XI/MQhcSxZgx6ikuX24wOD9jZvonrNAlaEkREVswJGmatBawSVtf6pFleDz9l3Ywuy4I4XpxkFWvFrUZi4IJhIo2AsjAwzZJmw+H1116j3QmwGy3iKmR79zZ5MqXVgCSPOP/oJdYvLJOUMYt5iuN0kUFF/5TN+koPqpJZHH6o6ayHxoI8r3OR39F5FoXCEObJyb1me0pZD5yceK+BE51iDQI3LVFD9yuF7wXYngsSlleX2N3bodn0MaRBXubMwzpbGQQB7U4faQYoHGzPYbDeptM8i8pa9HqbdDo9JkOF1urEzANBULfRl5eXcRyPNMlPzpQVSTojiTJULkFVlFWGY7codMHZc6d57PJHCRcKpQ1UUdFpekynOffv79NybaJpxovf8wKldplNE5pBvS02tIt0TSzfQnqKVi/A8Q0WM49G02Q6TFmMU/J5gO06GKam2bFwPdBGRRjOafg2cXLMLDrCcds02z0s1yBo2SgjQ1szMhVi5Ca+W1IpsAMLw5MI30Gxyc17Q0zHxnd7GEiGwxHCcNCqVhq2W10avo8tG7Q7Lo12hSo8siQkjxXNhsuFi6fxmw4f+cQGP/7HL/Di588SxTMMmZFEFZ7bptl0aBc9NOf5O//jPyFPRrTa65zf/DixLlFOhNksSMoUy2ngegFZloEhMIRLVWrMCnzVQEQaF4e13hXOrj/FmQs2jXaG4yfEcYIftGh2MgbrJvN5iOnP6a7CymmI0jGzKGGwPEAZAsevkVVnzz7BrRu7zBYJ0+mU5XUPp1HSaHqsri3juCaeHSAMTaUKTCw8S/LuG+/yra9+UNu5qpLV9W4dQ7Gsmrlq1OQXpRSu6xJFUZ0F1po4jmuKgV1QKkm7u8pnP/cp0izi9q193n//Ac1uhx//Iz/MNDzm0vnT6GRBPlQ0q9O0ZJe261El0O33aLgBp5Zy7t6/xs69O1zoNknChO5Bhjm+x3jnfebDITs3x+RTxc7dmMXU4cKls/yFv/RnaUmDq+/fY33NYBbdZfVci97pJnmlWWQJdx8ecft+yR/6sR9j9Vzjux4ov+sNJaViOltQVSa6yJFohGNTxhmHOyNanSZRnKHslHajxJjOqWwfnebo0sKUJRqJa5sUZUlZmcgTH6eg3v4VDUU1HzG65dJZ63Pq0bMkOufBrR26Sw7Xr73CxqX3ObPxCO0VH3ZnHL7yHnK5TTltI6oIJzfo9pZ47foO0zgn6JQsZhZFVdC0MoRy0TLhy7/3+8xmH/D8059jf7LDv/33v0Oz1+B4lmOUFRYelQx5481v0rR9KOq3cUuaGIZEGwbSlKRpViu4VIWyLAxlY8uCvA1ZbuMkcO2Nh1i+gYh8vFzQ7LVQumQ6npFIjTYrTEt+iNKwzBoSrAFDWFRKEkYpna6H43sc7Idk2Q693hoFEWtXHsMYH4GyOZ4MUVWCowSiEvSXLUYHBVBhez65KojTgkZTkCQjUu3gLQ8QpsR3WkiRovKCYuLCYobpW0gZMDfrYSmLYqS0eHDtLhrwlgLCdEZzqUMexhSlokimWI5Hu/8Uf+1v/M+c2VhCSMW9/T0eW72I0fXxqpByVjEPBe6gZG4oljsdZKuiSA0skfLwwQEmiuVen+F2wlMfucQ7197Cahr4g4AsiZHaI40zktgg6LQIxzOCZoo2V5jNNJaYobVmOgmJZjGxVzLo1NmrLK3oNdq8+q336DZMiuU1Pv6JZ/j2669xcDTi1NoKOBYPbk+QfcF4vmBpY4mbd+/x0Y+s4+dneDCJ8AYV8fgBWa5p+B5FlpGpgjQDyzJxGi5us83iKCOXLsXMJO0pjMkxm0GDRWHz1ls3aLQy0qqJe2Rx5yjBtw3m6ZSgaKIsqIqcQpU8yAs+/cTz7CcmanwT1624uL7Jq9PbCNuhazXoigV7QlIFLYyxonRcFsWcCyubNJIhySLh4f0jlF2AY5HENrkzRyBo9DwW00MqT7C62UC2+hzcOWLj4gX2trY4Coe1VjSd4eV1HlpWFYNGl4mIafhrBM0Ft+/epilavPrt1/niT/4IZzbO8v4771IFfb789S/z1CcusqJ92g2PG+/us300ZGdrl1ZgMjIj3n1ti0+/+BTn3dPcfO81pguDtabJRqA5yH1aPciSHGwTVI7X9FgkGYN2i+PJhPeuHvC//au/yMDb5Fd/9Ve4//A6f/v/+T/gex1cy61VipVFKTN8r83HXnyGd97+Fr59huWNAdtb95hMJpy7cJ7//X/712gHMFjZ+PCR+LVXXuK1D+6ymGyxsXkOYU8ZjY+wXBNKmyozMaVLWWU83L1BlMfkU41jL6OdiGK+wscur/CevMnD0TFNY42V04rtBxnzpGQwGACa3a1DTBsGS+e4fH6ZN99/j8OdOY9e2WB+OOQ4zLHdijwTmFKfqApTTEsDGs+3mQ9jytSk0Wlz7fpbnN9cocga6Kq2sNzf+4Be8ymS+YztgwM2LE2/b7M3lfidDqJcIp7dp8gFVWEiZYHWJpW2MAyDvEwwgLIAYeboysKULkrlNXIFqIwSQ1poI0XpWgFZaUFRSHKV43keQbdJFGY4rqSsJIZloY26PBF4AWVS1N5xrzadYXiUaQWiosgmuFaLQswpNBwdzvGbDlGYIbRFqRLiIiVGE7n155DCJA4jNAZFCcKAiqp2SyuLvBxiSJuth3t85qOPUVGS5wW+34BS8c3f/wp24LC0MqAoMq69usV4EnL6/BLbW3s0Wz5psY8lSxzfw/IcDg4smn5Fb3VOkgoCN8BtCUY7CRcfN+gt5exu1edSz7WxLIfJfEZ/tUl71eHgOuxPZzQbJpN7OWne5MXPXeHaa69iNZoshgLT8pmNc1zTpxIZ/SWf4czDcxySuOR4mOG6LfJyhsDBMR32tw6pTFhecTi1ucSt97YoE5dWY4XIGhP0LWxtcfX1B/yRX/o8Vqfk9s0dHFegMolQJbNFzPIpxcZmmw++/htE8Ryn63L48Dpv2DP8oMV0PsERHkHTB3lyobQMfCMgtgxKc4EuwHF8xHLOcGGxJtb54OZ9XK9LEmcsdxMmlaDshySlYPl8k82nLByl+dp/mLJ6yiNLwdJN9raOEMqmEaxRGTPeee9NxuMxWhU4do31GY/nZJXm7KpHnlIXcQyNZUmU1sRFzpMvPMZwa0gSxeR5wo337mOYBpbjoLIcA4lh1ESJosgw8RGioNFz6Q9Mdm/ExLlmqS/J5glFKun3XbLYwZMWd94fMbx3m+XLHZ64+Ajj8TmeeK6JU3i01i9gtAVHD++QbG2xUDHatTDSKe/tVjx6/jH86IjpBriehSi79Fcs2iuCpZ6HMiwqu8CwPF569dcZHe9h2yahOaDMujz9/HOk5S4fvHeEY67w2GOnufHeAf/kb/9TBmv+dz0mftcZyuD8hnYNE6jl9HkY4th2vdKtIClT0qKi15IMOjmLmUZVLlmc0GmbYGXoyqRSgihOAXmie6vIswrLMQljBQXgO2DbnDtznv7GgEonLGZzHr+YsX7lNJ7/PXzpt77BaO8OrozY2j1AuC2ErEjTFCcTnH7e49mPf4bX/vmbTIf7aEtSVAmGkkjT4TCP+ez3fRZbuBwc7nD1jWtYwsW1a+NPq9knHKdURYopS/ICvuOOlabxoW2mLEscx6FSKYWAQHikecLKlUvkpYFv54iWQZ4q8hAOHhzTcJ3aDmHYlGUFlSLNUxzH+fDzG4ZBRd127A36SKFIkpwozun1ax1ap7nMwcEdhCHpyAaZlIx3D7h4ap3UKJnEQzZW+owOJqR5hbAhy2xcz8YwFdL2aA7aDOdzXFlgiiZxWtIdNNg/fsDF8+e4d2OHaFKiRYVt1psEKeoTW6lL7MCi22+QFiUbq+sczUe4ac5zn15GmX12X91HOj4f/Z4/wr/89X/I8f49fOlTyZIiyxGGzfK5Fmc2lrj3QcbmxQFlljIaHpDkkqCTMzpOITZYWVkDMWcyS2n1TNxWxPGBQ9AUxKM6m6PLiCKTOA2PaG5RFRF+S9bWkzTCswbEyYzHn7pAI+hz54PbdFtt2t0mo+OIPJNoe4dm2+Da6xVLvSbLA5sHDyZYbpMihzNn1/mJn/4iv/nvfocHDw9Bhqyvw+FehBuYrKw12Hp4SJGZlLrAsiWlLAjcLjhD7LJBmJskyZSnV3qYgc3td/ZoBjbxUFEFJlZ/iWE0pFlJOr0uh+UQmRu07YDdLOd0Z5PPNUy+ceMah2nB5mof24JH+hbf2s/x7IjDRFCIAb6OyYAqXPDkuY/RKg/Y3bnN2oUXuXn9HSpR0BysoKoxplghM1o8/+yjvPTatzl9Zo3h4ZzReI+15RWGoyOKOMZ1TMIoxvZc4kjXmVxf0LVbfOqZ5/m9b7xBLMeIRGMKiygs+WM/+ydZPrvE+HDCLA4Z7t7mlVfv8PkfeY5rb+9ht21mWw+4dC5je7dguXmWsCjpfHwT6+ENRg9S4irmTG+ZrblB83wDNc/xTcHD6weceWyZNJpw50bB9/zwJ/jxn/4ZDHuA57iEO/v8P/7mf8N0OqXdaZJk4YlBB4rcIIlh40wPxymYzWb1M69hE80aKKW5c3+LuSrYPw4/fCZ+9ff+gHBxl+nOnNvvXeXq/ddrRVvhUpRT0JIiF2AuaLebzKY57Y7NZFQy6Lc5d6nH3TvHlEVEOIkQ2CytBCiRcvr8ed769m0soRAYmLZEG20+/UM/TjE54j/82r/ho5/9PsbTbeJyynB7iOu7CCHJ87I2oJT5h2VBLW2KUvP8Jz7GaLzPs89dZDG1ODq6x2wa4QSaNK2YRzGe18b3bHo9yb2bM372Zz7D7/7W19mdTFHzjCyrM5BK1dv/sqwQhlm74UsDaZZoLU5OmQam1Pi+T7Plk2clURShtcayJemJxcN0a+2n7TdBNQhnc1q9Jq1OwGQ6otNaYTjcQ6LB0MzCBf1+l7LQLOYJtmvR7TVQVYZW9XNK5QqV1V+/KiOEYWI7JpZlkue1eacoM8qioirr605pVMjCxW8IlOnw5/7iH+XmtW/z2//6XdIU/L5dm2Eyi3QWc+b0MvNkRhim9BoDJtEx0vJwg5SgVZ/pW60W+9tzVAaGU6KUZuNsj83zgm9/uaA7EAyWDe7enLOy4lEZNr3VkpvXZ9i6S6sjGE7G9NebRFmMZ/R4+DDB9WJaMqC1vERnrcXhvTv4QZvZYock9OptdaVJspgsdVk9I/H7KeW8x/adA9otn6r00DInylNMV9FprbG2omi2bEZHBVs3DpHaJWi7NNo59z+YUZbwiR/4KLv7tzi4Bs12ySIvqDLN859RKGVgWwE3r2ckccH6+iqWU2FaivkQGq0+YTgFM2d4PMcyWihV4AUa27BYzCKcpscizAksl6RMCIuSXr+NqWc01zw2LwRsf7DAmUtyy2SYzHD7XcKtMeHUwXZL/PaJda3RQMj8hP8psTBxpc1iFlKhaxtMqdGVAFmxsrLEdDqnUhmOHZCmgsFalyvPnGXn7ogPrm9jGjmOIVFmQaoKHKnJEhPfFWSlokJgiwDpGRQi5flPnudoa8bO7R18v8WgbxLOS2IlWDp1ivFwl/BghhIuq2eWafiKvb0Dzm28yGJywNoFG9G8wPH4G7hNSRxpZBlgyzlF1iI3WohOSH6UYLYkpwY+hlDM4wTPbOB5Hkm+j7AaWKZPr69Jk4rZNGNx7HPpyS6TxR0GzXNc/faU0fGI6WhBkuScuxxw7635/7IZSst0MVsNpNaIsmRSZRR5gS4MEkPgIyikgWX6RGFMrh10WWEaJnGc0Oy6KK1QutZsCamoqhzL8pBmjlIaS1iURkleptjS5PrbH9DYaXD5mU3WL/S48sQV3r3xgG++/rdYa5+i3bYIJxZZUeEUCZZXomOb7rLFmdMbfPW33kekKZUvyMYZsiUwDRNKwXKnw81bN5BYZEnO6soZTG1iWhqlKyZHIU5VYciKqCgwpYvWJ+xJIVBFSVnUoHOBgSl9ijJGCwPH9hgdTolTxdqKy8pKn8PJhCRVWL5LluSoMscOLHRRYn7oUoZS5RjixLls1W//89kEW9oYpibwJdFckaUZi8ktGn7AYhZz/uNPcu+dt+kJk8cffYIHRzvcf7CHQ4JRmTiOqstChkua5ZhVhTYKdnd3MRwL0zNpdxX5NMFwXM5c3uDunW1k6eJrhZI2qlRkRYHlGGhhsLy0Qnupyc7eQ0SuqIwZvq3wgxaNUyaec46f+bFf5Gtf+jbH4wMs6RL4TTqtLobjUJQRyWwBpWL3cITomVy9/hqedNk432WxO2E2A6EcTLvgaHSbT37is9y7GzKPtun2z+JYMdNxSLs1plj4FNisnW6QRjmJOcJvuBhVraIic0mKDEXB1u4OvX5GsCJRRkplNrHbBZODISudJeJkweoZm1PrNvG0xHV9pFUwPpqyvrRBv+NjBYr2asbxdkgpVkmNBVVecjRM0cJFYWB7DQyZUOWgkhlol8RQCCPDNnrsRhbVUUbLsDhtZ8QeFE6Xmaho9Zt4pcnufA7KxC0thlZGoOHe9l38joltNnnuiQHR0X26fYvrYZOj8ZzOUh+Vpgi5gzaX8QjQgWC6GOIwh7YL7QErK5e4uXOVZa0Qlk17+TGOpwteffVbqGhINCw51VwjHhfM9w7ILc1gcwBGSTNtsD+Z49sCESiuPP0ZZqMh7+1tE6dDHNXAdEuyk4bwO9df46Mr30u8SPBbkhc/c57VtsvXX76Gjo+wMhuntClmS7TklNNn1vnSS29hXWvyM//r7+ft198nniToHKZfuouf2dibAeP7R5x7bh1tVDjhgB//yXM89vTHeOdr1/j6N3+LtVMbrG9c5C//1b/KL/7in8X1fYpcIIVFu+syn6QMBl3Go/oE+Pkv/CBvvvUtIKMoY37hF36BdHLEa2/d4oP33/jwmXjh3AUOrA1+4Cc+xd/5H/4vrM6WuHlvm2bgU6kCxxFIS1Mqm/ksRVeS+SLHb5pMhxEfFHsM55rVnofvCI7nBqPbc5baPmudBKtyCHoOKxur3L22xVq34rWv/SZPPvlZfuZP/Rm2x0fo3OTs5kUcs8nB1h6mWdalGAS2bZ8AwiVFWeNYrr5zlaeef4zRQnL7zjs8cWVAFOYkYUSW2zQdj4OjEQhNmDdx2zYHw110mVCGYEkXZeb1C29V1Y1ay0IYDqpKMRCoqsQ0JVJqtFZUlabRCNC6wjBqBaaUoi7tCJNO3yfLI0zbxDA0WVLSbHZJkgW5Cun1elSqIklqDIvWinarQ7jI6q9VlwhDogqN63kki5LZPK6XA1phmQVr6z0m4xkGZv0yfHINqlR1Uoqp8/Ed22acZmiziWuVvPqljCc//qN84Scv8a//2a8QxA5llZDGMZaArYf7KENg2Dmj6QjbdqgKged2CBdDqoq6cxDX1qDGwMexCn7kpz/J1dfvIrzr5BI0AXlpYbkWh0cRkzDDc13ixYxKezRaFuOjmErW0apG2+Xi4z7xUUyiEiyrz2yscZwpVR4grQhTB5hmjuWYzBQkoeLcuXPMql02zrhsP1jguQalmuPLNqI0KBcJC+mw3Fqm39LsuSEiUxzvHFOWa/itGYG/wTuv3UBWEUHDYzgOwbA5tRpwvGMQhyY/8IOf5+pr/w6lTI4PE7xAYfshyA6LeJeKkrY/QC777O+NcF2P4fGC9aUGjabDPJsgrJqMEVgeosqRUYxtefSNFfLdmGdPP8p464DjhcYf5oSzEIFJ4KfYnkOr6aLyDL9hYzsWWTFCax+pNRQ5VAUIgZQGnuPjCotpkrBYRFRViZAWlu0gLIesypnEd9GVwYUnPOJhiV74hNmChuNRFjkNt808HdPqNTAlVFnNaRVVk9GDBT2/xaIRECYZaekS5yHRXDAdbSHMOQ27RV5mTHZCuo8M+Ohzj/DSV7+BSZtmr4fp3KDr1Ya6bi8km9h48hSLLAHTIJcVDbtke1ez1rXQZYnQHQYrDSbTIb7ZJswUybyk3fZwTMH60gYLd0HQDJmNl9i6fZ/7d0e0Ol0+9tnz5Crn7W/f/27HxO9+oNR5DImF0WmSlgbaEAhTYjUC8mmCsB1WV5ao4hBZueiiqv9RdI0ckpZJhUIogePYSLOiLGsFlxBlfUqvMqQUWMLDxKbTkChV8vabN1g72uD21RGeU/G5Tz3Fq9+8x/7WPWxhkKFod21s1UYOYpYftTk8TunZMEnnCBcspwFVUT/wjPqhNJtMabc7LJIZ/YaNZbbRlUESjzBESZ7lKF1SCXnyIKub3dIQFCrHNOWHeABDS4KgSTgJ6fQa5GVKusg4rlLiJGM6XRA0u7QbbUbhEb7vYwgojBJtaHyvUbPdMMnzvM45CkGUZXS8Do1AkpcGWV7jGVzHwtAu4+MQVWU8eHiPJR+SSrB/eMDh/j6nVi3ifEpVulha4dgBpqsxitpFq8qIyrI4/ciARXRAhQulQ1lImn7FlSdOk+65HNw5Zl6McR2HNMlrY4BpI4TJ3vYB0rTpnPGYpDmqLInLCa/8+4Ik/gq/8j9+g2cff4HLl8+dDOEpQcNmMg2pRIEwIZzFuEYXqSf8+Ec/wgfb9wnLAzrmgMm+otUSmIFHWmZ867XXkeYA6QgODhSmVDQdhygqaDsBtkUd0F9kdNubaBkyPYiwHIm0csoqx3Vc5tMMzT62o5iNJfvOPr7VRYsZdx/M8DyPU6cFR8MjxtttOt0eR+N9PvmDT2MGAbtHE7SRMJnv8syLZ7l754huy0Nqm8PdI/q9JphzUClaNWgEgmwiKMOSwhJ0fIdCZWgnoGW75MJAmCVnLwqKls9rNxbEWuMuf6eZHzMtXHztIJcsun2TB4dwYWmVgC5lfItb7wqS6gHLywHhLMHyJI48g4pjmnbGSFdE5FTOOndvHWG2D+mvOFgjmDYzguAMu8Mxo/23kbLAsSXzcIbR8+hdfpTjnVtQFhwdz/GCAGkkCDPDUBaDjXWORzvcfechhir56R/6Pq5ee5Pt0MTNSzIvYzqaUo0nrC1pLr7Qp+wpRg8Vz3xkCSt7yNbeTYquyamNs0TH3+Jw7006nRxzFhJNNbkM+IEffpbrN7bZeu+ANIqIxzaPX77Ay7/3JudOryDNius3dnnpjf8XtmPwiUef5e7+nOW1VeZRzCc/80muX3+fRqfN8fExfiWZzhU/+7M/we071/nmN7/Fu2/fIssK5scxf/dv/0N+7Ef+c1orFZ9e7WA3/6M14vqt21y4+Bj/+B/899y/uc/Zc4/xM//ZnyEINH/9//o3wNAYQkFpIYRGGIIoi9G6Ii8VT116HnvrPmWcsrTsczSe0G772IHJ/igj6Nu0O03MapUku8U4LTETuHfzDsvrpxhGBzz59LO8e/ttWistjg/3UUXtIrZsl6LI6k1dVWGZgrJSTIZDyqwgXByyu3+E7Rg4TkCVGzQaFmUJXsOi0fHJUgttWOShxYXTy0zG75PmXUwZ0Ok5HOyPEMKiLHOSBBotk6xUWJZZD4+AMASgwKjq8k2psaRJnGSUlcL1apuRMCVlqRAl9SBKiuuZKK1Is5D5dEK73aRUGYah2dxcZzZdsAgntDs14H14PESOan94Epdklkm3a7J5epXh8ZhFGGGKulHu2g6qLP+jClIImu0uVhOWuiaj0THnNs6wN/sdPvinBrsPUhpNGI+mfOx7uzT9DV75yk0woLvUxvZyDndmGFLhSI/ZeEKc1QO0YYBpWyAVWZFhmS6/8s9epVIpvt/j1JmKeOTy0U+c5+7t6wSDFJ03mBwqTOmR5BGqMFkedFhEBgeTY5Y2mkxGPllYEeczrKMUv5kjWcGxC5bbG2T5nDhJaDYkUTJH41PMPNb7TyKWM1znLuPRlNX+JU6vrjDobjKLhmxeXCGaxLz51k3CWOEqA4lJIXL+2P/u+9m5M6HfXOflr1xnMbJ48tIGbpBzsJ1z5+YU2/L5t7/+NSzLoL/c4/h4RDoRDByHPIuhqjAMwf72IVobeK4JKDrdJotS0gs0Hj5JUaItQS7AaUqMWLKIHXbinMunJNHOgqlOcRpNSgHCc7B0iSnbGIZBNC2xTJ/h4RDXbmNIBy0K2q59Ym1yMEwDx61Ik4zZIsRAkJUG2lAYpkOSJGQkNBoOg81V1pcuMUv32DW2uH8c0W60OPfYRQ6ne2xdv8/m6R5pokhCjTQEURJjmA4bm4/iuZLjccRisWB4lGFIAy8QtCqDUjeZJiGedCiLCe+/N+H4oEW33UdIhzTMWW5q9vemePYKruqiRQamg2UabCzlPLhZ4vcDnnhqlVylSDfHacZIu888hkazQxEfsRjDXmmxvNKlMDSuI9i9axInMQcHJSsrK8ymKd/48m36gwZ+47sv5Xz36sWy5m3po4TZ0ZQSQX+phxMIZMcmPlKYQhPGIYFOcU2LKMswTBctJFlaIE0Dy7KoquJk01fzy5RSFGWJFJDmJabZwKg0hqmolEKKgGwcUTqKcZaAdMFp8+zHn2B8uIveK5lHC5pOytPPX2J3cp8kjdlsr3CMQBolsyyqs5BaoIyU1VOnaSQ5+3vHiKZFv++QVgse3D9kyV8mKVNMR5Insjb6aPUhyPw/fTOvdF3QceySsjKotMZvB6gyI4oqvBPSapZkOCJiHsZUBqSlxsgyPNenEpKiSLAspz7PFCWO52JZFs0gIEsSLMeh3VxGVR5bOw9QqcGgv8Ljzyxx+4Nddre2GVoWK80On//cZ/kH//SXsTxwHYOQnCSpQKRUwsCQHrbpk5OjREWcTDFVG3KbyeF9EAWTeYbp2MTjmEbXoxHXbDalS8q0wHJsjnb3EVYNWZ6MS0yVc+HJLu+9dp8f/PGP8967Dzl6ewt5eUxVnqYsKzpLXQ7Gu1SpSRItOH32LO1Oj8OdB1iBoKpKet6A/eM5zYZEdjOKzKLRElRFC7sN+3u7tPw2rl9hmxLtVqRDjziZUVBQlCWuCLC8KX7DZecDxdplhVlaaOXiOIoiK5jsaXzPw7NLLKmospK8BEe0yIsxUVSRZwElMBxv88gTJheey4hnF7h/sEu7Lem22tx9MGO0WNBt9XHMjO/7oS/w8tffwrSbeA3BfFFgaoG/4tJteOyNpzR9Azmpoc5Bz+XhgYUROAzHQyLdRvsmrUbFbJLS6rqUukNgJZTUGbb06Bi0hTm7S6b6GOd7rM1d0nzCdqQpLBtZVeR6Bm6LuNCgXAq34P2jh2y0LOzxDe5WDnbVY5I2wdGweIhvSxxtUToxKo2xVMT2wYwor3Bw0KZFhYEdNCFa4AxM8jTl6GCEK+HcxSU2HtGY7gUe/vZdlBUizQZZWbD5eIOljZSwLEgezDl4/w8YzSSWdxHBMoc7Dzhz0cJ//Ccpjid88vmAKjjm5u37GEaX3/3SQzqrBs//2BO8c3uXi0ubjG4fsyQr7m5N+cKFFn9w5w6GqXmke5r9NOOdN9/m7bdfZ3mly5/443+G+3fuIg1YWu4xnR0StFy+8crvMRpOeOK5VXbuz1lZWcLzHF55+QP+/t/+Yb74fX+YzVMDnOZ/hPzu7x8yO05wY5MrV65gdHJ2drZ487X38AMHpTLSWGI7FXmR1CdXV2GqBtrK6PdOczicEBsVcTblymrAe9sLVJUQZfMTt3pEkR9y+mxAHJWUpsl89j5G/ICnPvl5tKo4tXKK+eGQU5dPce+dOwSBjzbANkySuB4qDaMWMHQaNu+++h6bZ9e4cmGDKMs4noe4lkFVlpSGhbBMTG2QFwXz6JjDI8mpZZ9Hn7rC9fcfMp8qzpxbYzwekiUFQlZYlkW4CGuVndIYhsZxbJIkww9MHKe2yni+yWIYoipdqxsbDbyGTZZpkjwjizQGEVEU4wc9Lj/+NPcffkCSLshzjaUdsiJlh0M67R4YFodHY3q9Po1Gj2Q+RUqDTjcgjUOU0hweDBkNQxy7SaVyDCBLU3QFaAOtK1ShmU6nGLGg0w3I85IH93ZYWVnhU596lpfUSyRJiwvfZ/HTf+pPIst1Xn3t/0w+SZiMYy5fWYHMYbo4QogYQ0Cv4VOoHCFrRm2ZO6ydanB4GJHMIkxPUClBPBqgTc2N61fJsoxnn3V5+5WQsgLLMSnS2pY0nQ1pDFqcbVzAbIRMRzNmQ01RGhhVio/FzevbrKxdxOhK8rIgijRlmRIEARcuPs4sHFPNCpYHTZ587BGiMIdijY0z6yilaDfWmUxDDsZvcepywP6hw2I05hOfP8/BeMhrXx6yOE5o9j0e+cwTFMaEViV570sHHG/Na3yTn5IXsLGyQZzFOK4gihXHe4J2H0xLUKQZgd9mOomwLUGpE9rtFnuTEboscWwfqSu0FuQlCNul0Wkgw5Q0LKjGywTOGdL5LZ67coXZ5DUOZns1LzXwmYVzLMtAVTm+D65dUZYCiYUqKhzfZXo8w5YCYZnkZUFjqcOg1WN35wjXt5HSZDabYlsmFjZH2xFbd97leG9Et2sS9G3SacSDWzvE7pzHLm9y9pmnyJOQl37vZSSCynDoNZcYDyOCdoFYjmhUE6IDC0vaFEWOtCzyLKMRuEjDoEwtArtJuIgwpIPrKUZHBt5Rn5WVAckiJ/BdHK9g//CIZGGgowoZ2qw/YjJJc8q4id/QhOMFfW+Z0Npid3vCpUuPs6V2aHUCpDdm++ER5849wniyjSoL/I7L5CBBihkrfYe9hxPc1nefofyuB0p3pYcxK1G5or86oLfkcuHpy8zinJtv38XuQkHJyuYpuvYS+3fv0mw4xKk+YU3mGIZVD5HUb4W6Ai3qBp40QVSSTClUFtMYdJgXOWWmcRxJUSmMIqYsC27f20VUCrNc4emP/xBPJCFmVXE4u814lCGTiwxaIbceHOP3+4SjXWxHn7S2SpAwHA4pcxOUjcRk0D7Lj/7hz/Hr/+rf8fLvvM1Sp01WLrBk3Z2sqDBlDRGu0B+6bPP8Ow3LAqVMlGFRVA5o8D1BmiaYpUm70SFwHWbzkLysyfmVKshyhdVsQqbRqqTdbDFVUOYKxxIfZjXn0xJVjFk/tcTq8jrDwzGWo5hOx2iR43d9pPAYZxl393dwLAuRVQhhog1IyRFGiSoctKhIygRpuvS7NrosmM0XHIYTev1l2kGT4XzObDFjfXWJhtvkzrsPUUrhuDYYBrZjEi5ql/jx/hihwe1LZncWnDt9GbU84fSTq8QkuN0WVW6wmISYjQQhJaZh0gq6LOY56WJCki2wO5t85e0tug2HtrdEnOX01wfs3j3ASHOMyiVLQ9ZWBqRpzGS0gxf4NFst/EZGNg/QeoHrQTyVfM9HL/PeO9u1e5g2Wk4xbQNKQbrQPPvEo1SqpFKSo+keUTLHtATRYo7jBxw+sGgEDo6d4qs1PAVXX39Ae0XSDlzm85jZsKI0DQLToYxzornBdDllpe8zGS9oyBVag5RZtE93zSIfJ3QaS0wm27hyhSSLCeOIi2sewwlQXSIu5gRnfMqFhW6NyfKS+Sihs1Ih4xS0hSHbVEWCXDqDyDLeeHPGU5dbHHaWCPMFhunQQ5NnEXNV0mh55PMcI9ckhoG/1MUlY7yIMLoO+WhBlWiWHc1+leNaHuPc5+LyGb742dN8/fVbfPN6hCFcDKcEUSKMAZ5jE+uITsPnv/ipF3g4vY9ybSw7Zjw8JDVm9EwfGUh0suCdtx7yfNAjjQzu/8E+B7shYbfFeHSXcDrh+z79aV5/+SqbT38Ev+tS2C1Gwx0WRU5uzTBFyP33AgxR1VliAfdnMwZnL9EID9nObB594gJyKnjhuReYVVt87MXLeE7CcHTIv/+Nf87myhp3H9wBYWBqG8+vLVPzcMz774R0ug6bm4/x+revcnD4AW+9+/uoYsyf+VM/zruvvPnhM9FvL7M/fJ+9wyHTPGew1Gb3xhY3rt3l4oV1RosJGB5KGVSqNnvoQoJpoIyCX//Xv44rBMI1SbKczz7zOJ+66NFrr3Ht5tc4OsyYpzn9fsClcy/w8le/hdWM6fRXaQcBwvTYurVPVKUstS7w/Mee4CXzd/j2K6+w1O/WWBbToCgzTNPE932iPEFicPf6Lk+8uMrySp9Wp8Hx0X0mYY6hYgqVM4tKrEDS7CmCgSQuFPuHGd2BgZACx1METUGR19rZUtXn6zobaVEUKRU5pilOAO311SXLsroBXqYsr65hOiYKhRe4NNo2sZUzPN7HMAwunn+Slf5Z9va3EYxZXRqQxhnFPGU2GbFYTDFMhedbNXO3SpHCJAozbHNOr98iXMSkyQIqDVW9wKiUwkBQlgVKV1TaYLCyzP7hIUtGn9nBAteyWWotce/aFlWxwZVPbLK3c4N777fYuS1449VvsJgoOj2F0gnb29t0Gj7NVkAUK1RukiQxzbbA9cAQ9ck5CjPWz/rs3Yy48uwlRscJO3cO8bomi1mO41W8+vsmtmvQ7TbJixDDUKi0RSOomB4Kgt6QqlC4tkNw1iQvEg7u2pSGxPYNWkuKaTym1eoymg6JRjZLyw53b99k45FVYMK9g4cUucGjl5+k0hO+fW0X3zUY78W0HEFnc4Drt/jUJzvsz/a4/NlniF95nXYQkx0vqOKYZNLDbDX5yu++x/RuhtmyKdOSbtDAcU0Mq8JQEStrLZApD+6GLCYCbZScP3+K2ShmPs//P6z9Z5Bt2XmeCT5rbb/38Sd93rzelr9lUUARAAGCIEBvRSdIItUt73tCmhBnWhpJ0RqpO6I1HEo0EkUZghQpEiBoABIgCqgqVBWqUPbWretNenf82d6sNT/ORbE75g8nhhmRkZGR+eOkiW9/5n2fl3ozwLA9ojxlft5C5HUMWyDTBCOfpbVliYE0pnSW6pRlSbFlsONu4UYVnpJ4nkMxUNT8GsPJEGFAXlUzlqS2MG1NnisMWSFNm/FkgONYqKJEYBKHGf/w//oP2N5a5xf/7b/HrnUJxwNs06HIS5zK5vbrkxkBQZkElUVVt3F9m97WAVkCadLgi7/1IqaYYmkD06sQrsk0GyKCLrlTUUw0q/fV8c/cR3qQ8cY3Xkf7HtrSxNMMpU06czVEZeA6XSoFWb6PbdToNlusnPC5/PohOzs7BC1Be07j+gX7tyw6S3VSR4AvKKqCSQq1rkbYI86depyNg6+yPzjg1IN1bt3cIMGjlClXb9xm7egK6xuX0CMfSsHq8RVcP6e7VmcwGP3ZN5R6WuEtOJx78ARBq06mJON+ytVXrmEoWD52hFF/F6/eQpAg78V2OTUXo0rBFBSFxjBmOa1KSTSzs7htuZRVTpZXSGOWYOH4Ho70qXoTTFHh1eYJ40MC16PbaFBmOYf7Id944w7dukK6CtdeptMy6A0L1u/s011uIkuoKodMKrIiwbEcTEwMJEmWQiUwSsnLr7zOrZvrCKWRcqbjRDkYosQyMtJCI+8lRZiWvAfiFbMNJCCVSZEWuH6TaZjMNqa1FoYjsQwIJwWlVHgNl6yfI6oSz3fQQpFGUzzTRcoZ688QEtOZaX0qNWsKTWlQZhWbd3dwbI92p0Y8TZmMKroLHkmZ4IkJRuDy7CsvYihBsxEQRjEyVzQ8Aw3k96IBqyoESmp+l+2DbRZX57k72WGcJOS6JEpibFMS9iI2+3tYwsG0LCzHRImS4aSPYRgkucD366ThlPbKEaZbh3z428/y8gsHXHv1q3zwOx7m/gdOcuPyCM+wiAcJQc3DLPXsZwwzsmRC7gUYt3vkSnGYmviOTSfw6V8fUGQ+YU9gWTFFZhNVKVor/JqPImcajrEtB7MmcKVJEtq0liRxWGO4W1Fr5exs9mi0HNodD8Ms8eeaNGtzHD+5jOO0EDLgv/zqLyIMRaulyDKTKm0zjHaoBx0aczbCkJw/9QTPv/Qya0cWmIxTitLAcSTTaYEfgNuo8eY7r9OdDyiVxdb2ISfOGZQjg6tvhLheRqnHGMqg4hBhGxhihRMnO6hrY3BATzvk2RjSQw63Mlp1g0ZgEW4PaDk18AS2B1SSUeJz+uSDnDn8PRCHkBgcf8RheD0nHDhEkeToEZfBNMRwfUyhEWXOjpL0lKT0KporNeYSl8n+DkND4ba6DCch49Lien+K0XmQxpLH0tBj9+AQFWuqKmQa3aRUGtOoMexn/OH1Oyy7Bt99/gQbPYcjTzzJfePnuPLWZWrNjCy0ubW+TfOIwalja5iNdTrH6hxsShgM+YlPfQ8f+cS3cfUfH8xSgJghU8LJBMf1SYYKKXzKcY7lSOKpzzfCOxjNDgfRBNmZo2xZNJoWf/nP/W3+y2/9Z67ffo3HHv0uqqJPGB1Q5QrPT2h2K9CSMIIkGxBGPrpqYNszbMzXnn8BQ5Z84fNf4G/9jR9l2D/gs5//DMFK872a2JpU1Oo+NeGzODfHzrsp8/Muax97DCqP0aiP7Uny3JnFwZVgGg5JGWO7Ng0Fy615jp4+wkc/+aOYXpN2EPDqq6/ykU98gs997nN0u3UOt/t85jNfYHV1jmXf43As8NpdjMCm1arzwOmH2Lx9lyRK+Z7v/0m2NvcZ9fewLAOtNaUS72HOHNsnrsZ4TZPXX7nL8VMe86sGo2hKq9XCigWN7hyTUYFdd9gfpKiqwXCwhzBiHrr/Itvbu+zvHyIwgJmjG1HO4mbvocuEBMFMrmE7JppZFKMhLcoiRwoXaTqUqpwxCZMcz7NI0oiq0jiOyduXXmN7f4NKh/euWQbSsDENF2xJvRlQFAWtRpPpdEpVKKSwsS1BrWGRZRlCmEgqtMwpqwiYReEJIVFCo6VJrV6j2W5wMDzkIB/jWRIpoR/vYwWCnTtfY+sOmHUbjz6/8m//HcNeTD3wCKwWwyjHC6DQY3JtzLBCyqDd6TAeD3DsJn7NYmvUw3BNlMrZ36lothRSeyRJguX4BL7DsJ9RlQWdToNKKaIpeIHGcCcoZRONJ6SFixfYOI4gaNoYVoblTzGlj6oiDg8sTp1dpVZ3uHnDot0tGY8GBIHPwcYuWW5SmTn3P36M+pEmSTbg5FxA3TdIKAlHHlXa4cbVfcwkoTZf5/O//hJHGqfI45wTR4/QXXQ5rBK6yzFnT63wxvodRKVozrkYfkmeVywtLXFwoLh+9RZrJ3wWl2tEYw3aYDQO8boV77vvOJNJyigcs7TaIIsFWzfHLHYWEWaFTAuyMsNEEo9C2pZH54gPg4rOJOChhfdz2BuSZSXt+RV0NqVW88lViBY2urKxbYPRNGR+oUOWh2RxgmVJVKmRlokwJL7vcPv6LV78+gtgGMRxQlVp0kqxsNhlbiVgeH2E7zfI9JTd3gEPP/BxHnjsCJeff5lLV67Ry8c888kH2Lh6je2bCUVSUmtYpOOI/Wt93GZAro9ze3zIE0+s0u0oNrduzwajymNlpcX8STjYjemvRxiGT3+Y0Q06hKFiZ+sGonWahbVFiqxJlq0zPEgBi1bHpiLhcKAJliVeIyFLfXI75/Lm6zxw7mN84AOnuHFb8NpLhwirTxw5RBODek1y+/ohQnh0aibxWDAZVZSyIskV586d+VM3lH9qDuWZhwI+8K2PMs4Vb759h5tfv86rn3+Ow50+USrYXr9LESbc3dgmVYJmY568rKioMFUxM7KUiiKfiaHRM/zOLIJrFoMkhEZRIEVFFKc05udx6wGiLPEtcBstlPBnCTVuDa9mEg9GjPopZdUlGu6zfXnA7t1btBsVkglhPKR0FbZysW2HUlVQSsgU7U5ArWHhSYk/Z7O7vcvO+ib1pk9UpRQ6nsWZ4czO9bZBo1mj3W4jJYDCcW3KqgBtURTVLPYvH+H5ksl0yGQyxLYlSyt1ussNpKsIfBvXtGZFTUsMNTt1f5NZBZDG6Sz7855ZR2iwrBJTGqiqokwNUAGddhOVNXnmkWdYOf0glruMKzwGacLxh76VnEWkB66RQ6mxLYNwmt4DCSds3N6liGC4PsHBmTly0wRkhWEbJGXKyQunZulAUhNnIYqSIyeXaM7X8ZsemcpRbsXuzSHTouTXfuWLXHv+6wQU7N9KuHG1j2MxSySZa5N5NkXLgbZDremyULdpmwpTFGiZkQ/6DDdHXNvpE2lwUZh5QbtRIo2EvCrJKsFwFFEkJmWqKIqMQkSUStHpuJhuzle+8scUeQa6hm9bVEXMdDTGsTzSLOLKlXU+99kX+Mxn/4i33n5ttl0xuhhqkSLVIMbMzdWoO8vsRod0jte5eX1INkoY75dcv75Hd96hnIb4tofOXCg0jVqT0b5CFxLbVOShTVCv41klTs3F8QXNlk+908SuK2zP5rVLfU6snkGrgmFvm2oHhhOLxoJLqW2kIzACl6hlcRhmmLoCy2U83eb52y8TBSXDkYG12sVKuzT9Fq2WxAkcUm1iaotkktCfjmm3DZSlmdYszj5wnPRaSrqbIe0Wu6lgd6QZGDbStSmtiv/tl7/EV7/2FipJKGWM13YxmgGW71JIYFJQVRXDYcaNwx7X9w4pdZMvPvsK7//WeZaXA/JcIm2YbuzScGz2t7dwz1+g9fiDPPmJ+3nkzz3Eu+Mp7YXTfOTbvpXSA0FJx6lTmAtMpEc6yCmExsZlNBoRpUOWXBcRjqiSjGJaEmYholigLvZo2QmNIubGC7/B1179OnGZ0p9OWN85YDiOOBxMMZ0WmA5llVExmT18BmPWjszhmC4P33+M+84+zAcffIJnLqxycmXhvXflRmxsb6HHAe2oy9p8TqNu0W0d4c6dmywtzlPkoElwXIk0FJBgCYksDCpdMWDCjfVdfvU//XcWV06wGQ44+/B5/ut/+Q0eevgDPPWRb2fhwgV+4b/8Avffd5wbdwe4ZswPfvz7aLaW6YVjwOfhxx5n9ewDLC6e4Ed++FNEYYLnzZiLpmWQ5jlZkSPKHNe0KBOXlu/R377KOy+9yYnF47iui9GxqByYTGKMShOYNbKw4GB/hO1a/MHvvjBj5I4keTZzagtsDIOZXpSZHlHKWRNrWda9M3jxHhQ9TVNMx6Qsc/IiYTg6QMuK0WRIvz+g1qhTqZwz5xfBGDCe9hBK0z8YE6UxtVaT+cU55hfa1OsBeV6wOLdA4Ji4rkm9BUWZkKUz3WZZCLSyKKuCvCwQhkTaFidPn2VpZRnLtbi7vYlpWwSeg9+xKBHY0qEZOAQdiekJilSQZD6PPf0QzWVQIicrc0xL0dtLmPTsGXDeEbj1gkz1aLZNRqMx48mAo8frGJakCG0uPLrK3ZvXuH71Bn7TpoxzxuMp9abB8vGS4XBMVUbUav7M6OmaGAbUfE3gWUTTlN6mYn+7IEsDvLpDoSwM6RJOCq5e3uH1VzbpdAxWVju4jklRxMTRlHicMtou6d0NuXH5DaKxQ386xe6YvPH8Lr1syqXLLzPcv0ls7VC5IYtLDUT7kFhMuHOwxfPPXeadr26x/ZrDeD/BMMFwAoKgyzMfPs/cQsD+/j53bu9gmS6HO5I8tZiOK+KkpNdPSIsakzwnWBQsnlzh7nZF/YhL+wiMkz5RFqNsieF7SEPjqRabez2uXh4RehJvqc1OY8qd9IB608PUIZYpadU7uM5se27aBpWcYrkelQxpdLs02y0c28Z2TCpdUlQFge/x1S//Mb2dPRa7nXsLHgOETa1b4+j5BsEiqDzCDtoEbou80Kydex8L86fQRYnwYOXEB/nuH/6LZEXK8vElpGmCUPQOd9ncfBfpm5A2IZmjMGw6J45iWA5VkqHKjKce/xg//MN/AZTg8HDA3/xbf4Wf/qnvJclGZBObumuzMu+w3JonHbn0dmYsb2HktJouZDW2bk7JRjGBVWA7LexOhy9//Vle+NKQ0UHI4oKJLVwCx8ZnlccefJpOvaKKSwZJCU6CyGz8rMO5o4v0D3f+1A3ln3pDGUqLl752iWlvShgNaHo+VBGLnRZZFbM0v0JZlrhaMBxPsSKFVZgUSU5k2qjUxvamaC0oCoFlJwhtYEiXoowRuaBAYZsOFCWTg11EzcLxLKKxYO+gB6JksdUhn06w/ACUIDAqDJETbvUI/DZukHD+vlPsDXawnYDVo2PW39lDBEChsUwDpTVVJcjGOWGU0ai76KzC8220Y4LSeGKW4FCiMIwKIRxsy2M8HOF5HlKaZHlOXmSUJbhGguHYVGaFY7noStOaM8hTzd5OzNqFkl5vSitYRjkFURRhSUGRJ5hOE5XGSDkLlM/yCN9xKKucvKiQhkWpMwwZUKlqFvsoBK1WQJnnpGnIzc1thBDEowmZkJiWw7Nf/gKqKKnX6sSiRkpOWSkc16CoJFLamJYkq3JMWxNHY5baS9x//wO8+fI7FFmJSjT76wcoWd5DHUmQFsPDEK0ERVbgWg65cjB8g0ZjHrW9hbPWwc4E116+zNnF8wTHjpNZOTot8TyPKpyiS4Pjq0dYmDTQnZKsmSGreS7fLXEZs314SJYnWGUdLTWTnovIJcJIESY0OgZxXLG40GHQn6DKiiIviPY1hoS641EgkYVAEYFpkVglh+kO7eYyk36M780cpjdvXMIxbVqtFkk6InC6JNkEoX2kG2LuaV787FVqi00eeeQRXvqDd1g93qaYFHiBjyoklpfiyhZSuEzyA4SviGJQvYzuoiTomhi2QZa67A9GmJbg+LE1wihD5fDipa+gtUA6BkJkLDQ9MpEwLGKquMQyPWRu4hkVlrZIkympbdE/SPG9JqYl6fQkdw/6JIOClt2g7tlE8YCaVcOqwzQOMaih8gnddpvJQcG5x84x3RyxPdjHDWokyRBXzWFZORQJH3rkQTJ7RHkIS/EuUTLh1k6fzFR0azUmWYJrwvG65N3elM++cpvTR3KeemSJ+9+/SDsw+Ox/v8Kdm3eYRCWN2iJnLq5x5c13WXMe51d/5Zd58Hs+wLFaRX80pd1p4m3vMQwldFIcURFPQgrXgNxh6YhJtV/ju7/nNBvv7LD7XMHJhxe5cWWHne2M++ZAmqs8ef4pjq80+d0vvYovM9LEYsFTqKpgVNrUWxZBUCOpMo4bTeaWfQ73r/HEhx7iw08+yvBgzM9/+ne49OrbnD+3xpGlY7Tt5L2a6Fp1HH2LoHLpBgknj36QbHiDZ1+7yQeefpD/9ulnyV3B8nyJZzTZHINhu9h2iFEVeIZFFuYIEpxig0tfe4m5+86ws36bV5//CqPhkO/8oT9H0/Y42It57o/fot4uaPpdHv+W7+QX/8P/yrzrsjLXwVv0kXbApRs3yUr4wDMf5cVXvkqn00DFyWzgUYJplN5remZZ2qYx2xKuX1/nyLFj7KRjPLtHvb4ww3qpkHcu38KUoPohcVJxeJC+h0ybubtzTNNCvYegUyBMkApT6lmdyCVKFaCqGRxdG4hSEU7HnDh5jCQu2en1sC3I04S5uWO47hINUVAUmmgcEyUpbc+lqqZIOePx1hse0pAcDKYYpo9pQDTRqGq2HVVKobRCYgImWilMwyWepmzeXUeZOUoofL+GaXhExcyZ22yUUFQkaYoyJHbdpNmuMV4PuXHnkKc++DEuvfkOQghatsSUfdy2j0gyTASG6TIZK4TrIUVOrd4gik2iSY/5kycZ9kOEtHFsRTJUFGWJ4wmElRCNPVrNGqUakMYlQvqklYPSBWGicZTCVDaVqUinOekUhDAJAotKF1iuJAoTFhaWiJOCcOJSaRPHbqG9FOmH2LFN22/T6Co29zeo+Q5f+cwlDCzWX95lcbXO0kOggPGBwDBtXMdFFALcDKfh0NufcPfaJoatOf1Qm0zlFFHG5tYOewcp8XCXRsNlPB2hC5NofwoqJ4tsmkfbaODyV3dZOO0SFgPa1Ni7NqR1Xxvnes74aoy9VkdUUwzXJi8KmsJhMp1w51pFtFzhlwccW13A7vnEOzlW2wY5QToBnsppNuqEsUQ4KbYvyL0Qt+kR3c6QySxnW1QSi5j+tEAqzSTt0W22yIWDIyTCcXj+yzd59PH34T7SZHP9FUK3y9zCPEUUcfmVN5FCoiYTXvjtL3Dxo/cjqFhePsed5CqFTHGdCiO3qIZjKBVmYDGMbOqd4zz1wRNoMs4+9BCmE9BumRw5u8DhnYrveOYH+L/8o7+CaWq+7UPfilzr0d/WHJt7mFZboMXnsO0llBWTZJLmXA1LNUjzmKSsUeyWmFbGXKODt2Chq4xJYrAyv8idO0MWFzIuvfYGVgtqDZc0SXjwgbNE2Yi4NJk/s8jd3b0/+4ZyuJtjiQGdtoVrLDMZhSjh0Y8yTt93lmOnj3Pnzh3S0ZRkOkSHYzyhyBXIUmEoKHKJ683gn3legq6wbYmUJqUsQM/E3CXVLI0hSSgNNdMeVVDlms3JIaunFlGloB40sUsL1zKhSNlb32Su3ULbkrnOGgUl6STBEPIej202PVumRZjmTOKUhcVV+r3hLCJPaVRZopSGe1Fg7/mbREVZzqZsKQxqtRYHBweUChwnIMkK2ksBkbLI0hLTyJmGJa3aAlr1CfcUVa4wgoI8nZ1mPM9F0ub0maOYzhRNwcJcl6XlBdbvbnPt6m0sq87tW1tUpSSNUhwvQCKosoJRNKVRryO14mB3f/bapKTQJcIwsAwD03MJ4xjHMEEYSHO2lLZthzRNsQOXwG/UtvcAAQAASURBVA3IshRDesTTjBvv3kLn4Pg+QmmKXGMKSb3VJclSOq0WSRrROxzgWC5xHFPqnOZCA8M1MTwH6VhMxhOCxTYycClGA1bnF0nKKWkRE7gB3738MJ3elB1jAEEbe2tAurxKMH+Ch86OaE6/zpWXd/FVj+HUJRsb2JaBoWpUeU4mFF5QcbieUOYFRVFRpTVMaSDNgjgCLSOkUWJZJtJihmKiIEoj/Hqb8WHOiVMd8sRmPD0gnCZMoxHtToCIfIb9lFpQ0V2YR5U5SWXxxMMfx9UtDqNd3n1jj8WjCwz2D7HMOSaRJmiHnLx/ifU7WzRqHoEHGzvb+P4ixAmGUbGw0iScJkgrpz03z/DOXfJUML/UZnFlnnP3H+PV179GPNYEliQvNYFvoZSAok4SSgxcpNSzSDsVc+zYacbpPqceXEDqkp2bQ/a3LGotqHwXg4q6r6iqjKrK6e0OCaOShfef4eJDKzR3HV75xi1WV5dJpjmry6f5u3/3f+TyG5f46NM/wauX32By8Ba+X/Fvf+2PSBJJmGbUGk0Gw4iL5xf58xeX2L2mmG8YoAte+WpEd7VB974Gj37b9/L2pTf5ylf+kOvXzjPMClZUxNHdTeSV5xke/zCmUWO/P2JsVsh6Sa8vyJWkO7+ELfbp3wzZSHOW1pZ47e09Go0Vzn2oJC9jGn6X9pxHLC0GyTb1pSY9c4X7Lr6ft7euMri1zoceusDt/XUqw2HSL5gLWhT7W/z0P/xp9raG/JtfucIP/NT38J9+9Tc50m3w0Jku25s3uXDsPJvjEbvl8L2aOFWamncStxXy1pXbpNdyOvOQuxkDpXj0A0fJqJCViZMdUpvrMN5MOYgttGkjLQNLCMaHI77zB/8Hzn/4EaRqcOPqSxxruhjTEVaziddsc3vzOv/j//Dn+exv/Rr7Owd8/sufw1mdw+t2GIQKc9yns+yT9nbw5wK+/Qe/m5e/8SwiS2fJY6Ymz9Q9uVGJEOK9dykl0+mUG1ev0lhu43lNCiFJ0piaX2dv94DAsiizfJZLnVdk+Sy+lap6b/P4TZSQUgJ1L02s3WyS5zloCVqSFzlSCpTISAuDWn2OP/ejP8XP/dufpVZ3KQpBXpVoo2R7Z53eYBPbctGUNNsmli2x3YCiyEnSKUVRoKqZnlCVBnE0uPd6wLbtWQILYDsz/aSwKvI4x7BMgkadTEcUVUaWlyhTUyQVWAbDwRhdFSzOz5GpklIVkEss12b35ibd5lHyEqosJMIhM0xco4E9J1FpShj1MesS1ytxqoB+f0Q0Fcx1V3Aci17vAJSHkBpEge/XybIKz8+YRAlCCPy6wKsJoiQlT0uyvMS2JFmWYFkWWguUAsdxybKUKIowLVCVRkqTwaBHs9lmMEwIGgFp0cMs2hj3NLyvfP0Sx86u8a3ff5wrl/scO90gHoWsHV3ECVJKbUAVcOJUwHQYs7+9QzqwWWp2id0xBBmTITz0vuOIesK731gHYq69AatzZ+mrqzi+TRRZFElGbhhUJrjdOpUaUpN1Tp3qsrU/YuFkl/ufmmNjcxud2EQDxcLqAkk8plQVIp5iuy5ZluEFJpqE7Y1rmKbN9vpNHM9B2j7T6ZB2s4nWglrQpVZvIsyKUkmyGLpOk858h0EKW1f6mL5PEZfklcZwCkpVgrLJQ1AlnLl4kSc+8Umm4Rh0isoU9cUaptPCLLv09w+4+Ilz3Lou6W8UXPygzVc+/TsQOLxz5UXkNKfpNpCpxdn3PUL7RItHLnyQKi8pqhDQVKXECepE5QRTWBjCoN1ssF/d4a/+tZ9gr7/LJ3/oR9D1FreuTXj4wgcw3T3ygeLCQ89w8/otFptnGU/3mJ9v8cZrE+ZW6pjSZ3n1CEJDFG9ysDuksZhj2AV3t12efvqjHD1p8MUvPUueB0yLnBOnzqBFSeAGqHyPt74x4uIjJ/7sG8qjJ1r0NizGhxFx1CeLQwLLYzyesnXlBo4hufPOTYSULHRNpoZCKY2qDFA5QiiKzMI0FY4HWaYxpIGmRKsZViiwTMIwRkmFRJCNQ7orTYZKQVbQaNSxfZvtrZjlVUmr7UNZYpQeWk1YXVnCMCwSlVEpk4ocoRqE0xTbmWVsV1VFluc4Xp3jC0tMw5Qjayv09g5QZYmuFFIDYgYvR6h7uCCTOE2w72kI+4MdhFRYFlQ6BWnh1hRFXJFV4PoWeRGTxFPOnjzF4WCHWsdlEkYY0sexDA52ezS8Oodbd1CWTzQN2a+VvDi6zmQyIU1T4BDbtml1awgh8PwaWZKzt7OL1JLBYAbodSybNM+wHIeyqhCWiUJToFg+eoQ0zOj3++hcY5kmpmnOkCJFNcsnVya2IUijgrxKsPApk5IyyrFdj7KS5EkOaPb2ZhOL4zgopfFqPqUy0JVib2+PSpW4pkklocoK5uY6hGFJbzplbtnn8eXT1Esf15rn7mRC+8RxrCc1tz43xY/HeLXPc21zlzSCxSWDolzAKnMMEeIaLbIoR5cZkwMfhIFplVBJOt0mzaMVO7tDTGOeigS/4aN1RZanpLHANXIMGyzZJBpn1Joew8khgdXFdxqs76zTrC8zGvYIah6WIUjjhJXFo5hGQe/2Buk0430f/DC/9ulfh0rhWQHNuiZLCyQxybji3Kmj2EHJ3EKX4TilbSaMDyxGA02WjjGYR1YO67eHVNWIhx9f4/a1nEk5ZNHz+OrzrxGOLB566FHubr7MIE7IpvOk2RTHNrGclFLlCBGwuOqzu19yZ3sPL7BJ9scIL8RerHG2fYysv02Uh0jDZTzNOXbfGlbUZPv2Lh9634e5dec2J849RKd5mvn5XZJJzHd87BO8+saLmCzxv/zLT5H2C7zFBukw4dWXb1CmFq26oDfIMM2ETs3n9ddDLqw+xhOPTdjbHTKabJHsbTDcEOz1DpGxyeLCMv6ihSpSTi61Obx6i9WnO7y7E2HX75InY7peh8P6lP39dU4vn+HN59/COjfPyWMr9K5cxy5dwsOSgQiZiyb47SblNMIwEvb3dlm1bVq2zTgacN/KGl5kMt3a58f++sf41d/4A+JhjBCaPJWEvasUWcZrb17DczKWpcOVP/o1llTC/vWrfNuP/RhzS/NMqj32Zc6dePO9mlgYJQuepr8dkwdz9PJ9hrcG5K0EY+zSaK+wvHaBrzz7R4SxyUc/dpGN/AZ7tyKEyMC00FVIuxHwn/7LL9M6dQTlWhh1m7//z/41htDcHu4wGSXcvHuH1fMXWVg7zmTzCr/z73+O7/rUT/LHz36Bb3vme1g7fZTteMjZ84+zsXmXI6fO8w/+wT/lX/6Ln2FxoUOpcqqyQilmGkchsEyTSikMNIY9M9QMNg+p2Wt4dR+jskErijxlFEU4poUSkjBJqYpyRsxQYBjGTMaE/pNGVRu0Ow1M2walieMZzLwsNbZrIK0Zdk5Il3/yP/8vNFsutqsQtoupFK1OA4mc4U4CKFIL2/JJsxKwqHSO61igTPIiR6uQPBe4rkuSzBqyOJpJe2rNGoY5M1C6jsFgmBL4NsKWmPjYVg3b8masSCUo84I8T2eGSKFxXRfPaxKGId35FmWa8M6lr7B24gSFYzNOprTqXYo8p+61sVyXxeNjbr0bM54ITEpMy8BzXYoiYTwoaTRqhOMUwypRBYRhSKvbwLQK5hdmJjG/FpDEio5fJ4oLzHwGjnctmzhKMQwbQ0jyPJslulQVVWZjWhWFTnHtFnu7Q06cXKEUfVqNgP31MUI08eoJXVtRVkOe+0M4fnYOdA7a4Mqbuzz0+EmEgPW7d+k8eZzOvI9havZzxe7gkMPtITUvwJAJ73zjJuN+iGn74Eg6rk3dq5hfXeOtS9ewhItds2jMuexvCBbP5jS8ZbTUtB7wmLwy4eB6xamzLu974AEuvXyVg90eztEjPPK+Y+SWpn9pwPr24F7GekVZVfi2jdYVaZyQFiGVAMsKmAwLTFOCkzKdjFDAyTOnuLN5nTiD8533c7IzoX/1v1MVGqSgKktELvCFRygqlAmBbTEe3OWF5z+D31rAderMdVdRxiLalKhsRNPpUu8+yMr9LR582qdlNjh6oc8kUvS2t5nEGSfPHOcjH/9W/sMv/kfOjr6dx+9vEJbboGtokbN+cImFuROsLZ9mmpuYpmShc4zBwoBJ2MNq2Nz3wKMsH13CuFVnv3eDK3e+zKmjH+Txix+k3nyZa+9cZbhvcuHceZrNfYQOaNTquK5kOh3RbnfZ3xkwHluMezC3dox3N17n2ZdKHrjwOOM7G5ThHuvX7uC6CtdpcObC49Trl9ndH/3ZN5R3r98mG0Dcn6BVSTtoE0djfN8mjELefeV1XDNAVyVuXbGwtsr1G3ewDAdVFmBqlBKkaUmtaWNZs3QFhEYLheu65HmObZhU0qTKcso4JRxPqCyDtChZXjRYOR4QqohoMiR02wRNC60UgddAGgZ5oUAUaFFgmAoyF8dysBxFHCVIQyCkJIqmRGmFEBZlXqDyAlVWCDVLYvhm5qyYiSXRegYf17oiz0pM4aOM2WufJhFLK00M12V12WLjzgFx5HLu/ENokdDwAo7f9wBvvPUOUSaZ67qEkwGGLRB2Sa83IFU9JJL+/h4oxfLiPI1mB69uU294XLm1S7czR5yltP0GtbrLsD/7Q0+nU7I8Iwj895BGVDM9U64q9vb28Nwac4sLRJOILI2hUghTgNQUqqLIc5QAUwoqrUnTnIbr49U9JtNZOkiW5RiGREtothsIITg87CNtA8/1MbVBPI6oNwLmgia5GRKrlHg0IdeSSuUMp5phy6CpPa7t3+ZKfIfvmf8Qi40Odx+J0d0J97UCeoNT3L7ao566bN2ZImy4cP8ixaiGyjxu370CRkyz5ZJkCWbZ5MQFl+U1kwu6zZe+cIt6sEB3wWI6zpEWNOsNqnxKPeiytzOliBLiScbP/buf4Z23rvIr//G3cByLvCxYWp0HpVlodtnf3SSoS8gFc602vUGf1ExZ7KxwsLVNq77C7Rsv4LlqZrpIPF544QVOnFvhrct3WTmb0PYDsomgih06rS7jYUqzLRkNJA89+AA1LFYX9rl9U3Hn7T0KMpRyuXLlCo1WG8fwydIRrWYLpcYUpQVFh8pOSRMDnWXkdobr1qHMSA4N6g2J3dkhmk5p0KGsBLbpsL8zJE4jLOlw5dI7PP30U/zg9/0kP/vzv8yDD97Pk4+fp4xdfuy+n2Qw3uedyzuYNY+r727x+MP3017LGR6OefTIg6TxFuF4SNWK6R6Z54++8g3On15iaTUgC0uaHUH97ALL6X3c3d5nr3eIs29SZCaXr29S93zmnnmS8IU7yL19fLeNaY9oGRP2Yg9TKeYXWmSlwduv3sWxDALbJQwNgrkOg+09qskCQWOe9tmE8N2UY0fmyCqfSgW4VY3OkuT7fmiJdDyA6QHf/thjLLTrzK10WOvO8ZkvP88rb17lX/0//gELbp1TT36Q3b11br35VZSriUgpkpjt4S5V+SfYoMceepI7N99mb3+fzuIaK906J06d4stf+yqNo11WljyuX95lsLeOmRtM1z22Dw+wyHAyA79WI6wstM6wpOaPfv03eOqZD+HUbOIjbVp+h5OtNb7wh7/LdHuT9fEIs7VEfv0KrWWP8VSytHSE0w8fZeMwR9sGUTEmjmNu3bqBlg6PPP4E1y69RuA3sG0LLQvyPEcpdY97O6txQs8+d6XB3es7LJ2aBy053DnEFgY5epa2ZRrEUYY0xD3kjkYKAXxz8DYoq4ogCLAtlzgO8RwXIWauasO4p5uXGpVlnDx1mm63y41bb5NXFYG/RFVUDHopZ06d5vTJnN2dPlWlKPCQoiRNCio1axqETvEsidIFliFnYRuVQsjZBq/V6hA0AqbRBGGYaGHS7FQIAXGe4AYtbDfAMME3EoQ74wCv+AvkZYGiJMsz4smAVq3OVCcYHqz4NQ62NzH9BtJUpIMhjVYDx+gzmfZp1hapwox2B8ZDzXis8AJBEFj3Mp5zDLOiVguoSo0TKCwnBdGlNzzA8wIODnJUGQCCIGhg1xLKUpNlGX7NwzIcwmmKgUAoQZVXWF5FURRQmqRJgWlVtLoue/uaw4OMo0dqDKIRK6tnmQ4mM7NWPuP4en6DJIow7YC9zRHTcMDSkXnWr03Z3Ogx127hGAWV0LhOxdycwfWbI6pcUndMpJ2jDI+DXp+HH3mCn/kn/5Cf+OG/xvreVdoLJY7rU6vV2L4xImlNsBcEt66E3P/Ice68tM7zv3WZ6WPHSfoSxwOcArddAzujs7rAsFcxCSOUAMusMGyLMI5mFAGtqPIErQR5pVCGoogSRlVGqQymoy38msfaw+cQfhvb9GnNddnZXcd0bEojx5IehZ0hp+XMGb5Q58AeUW0c8LHOx/jwhaf42u4Nxge3OX3qaYzlNsPxNvHWXTKG2CsfpWau8hN/7/2sv/kSv/Mbn+Ev/U9/gWd//8ucefL9/DgZX/6NV7l8+TL3feAc4ajEsWucrl3AEi2uXn6LoN3CXJjnyU9+gie/5Vv4+X/9r1mrrbC7MaR1dJkkus3e/jaGtljfeo1Cb5AnBaUZMn9sgY3Nm0TFXXzzYZI85Mbb36DVapDkbZbWAuKojXC2yQ+uU+kJNfsMjXYDp1hD3amoL+eUKTTry9y8FtKsr2GUxp99Q+kUFqsnXG4yBXOJaZxSYmBlkq4fYDQ94jhDFBXNZm0WpdSokacS1DdPIjOnY1VIHNcgSysMMevI0zRFVgKpZogeIQzkPceg02xgBQZhXHLtSsjqWgedOJiypCwDkAkSjzhJMWwLAwkKVJYgygrfkeS6IgjqFEVGpStMw6DR6uK6ATdvXsMVFlSKmQpIz16v1EgxO5WYpvmeYcaybMpcA4IojVg9ukxzziDKHOLJkEa9oEo0jhtRqxeEvTE7m4rd9QlLR5aoSHHrdeyupMxMbFvgWCW6Kui2j+O7NuPxELvVBMvk1u4B0qxzMJgS1OtMkwyhodHuIKVk7eQp+nu7bG5szwC9fv3e2V5jiFn0WDieEE2mNBsNRDVrjGebTIOg5pPbYqYXKgoMW+JLn0JVoO89KKTEtR0MyyQtshnWyLVo1OpIaSKEJElybNPDsDwOtvZACXzHxbctUuXQ8edxdMnG9RuoTpegU6OYSg4HCS/96qvITp9G5HH1skaUUx4+VefNjQHOXIBTQZqb5HlBkSk++QNP8vxXb8yc4rmH2ajz6uu7ZF+26Cz5NDsNOs2AwWGfLCmYOzqPUTns3j7EtiSeGVD3Ciy3w8btff7oi18myQZY5jLNruAT33mR5/5wA1O4eJ6D55hUKiOOtgnLkpV6m9HhkDwqGfT2kcIkyTRIjbQjLlw4wnjSIxmNufV1G23N6AW26dM/TFE6ZX8P6o0OQhbozjyPPHiCOzu/y8LCEhubtyirDCMXDHqgSp88jdgZ7dLpNJCmwKkl4KQIaYNwcAUMboZ0Wg1MQyDMgEEvJRoFTIsQZIVlC9xCYikD2XApcsU3XnybSy/+DO3FOpOpwZW3b3Owk5AUJStHW/zuZ57l3GMP07u1gV9f4z/80n/jiccf4ZmP3If9fMnJ+z7Op3/x03QaJhcfnOOph57hzp1LLDeO8v3f+3383O/+AltbAw4OdzCUiVNv4Psav2mw0J2n7dax7TFZr0lSGRTmkLpTYCGJyxK/WWe7d4BW0LADYldj1SSjUQ/T84miiDCLsG2IE5PBeIBwI0oiCm1g2wWGJekudvm//fW/TXexhsakP67wl3xWT+3wu1//Xd69eYfTjz5ANR3Sblk8852fIpnmZNk+NBdY0gWvv7H9Xk28/s42eWnz6FMfoKgUDOsYJpw5doxReMDd/TrTsqDbkgSiQb075Mix47x/oUka3eaLzx7gz1to18EVBcl4zHf8wA8wmGjSEMbTgnpdML/s8dCj3086SbFEk+LCcfZyh0ky4MLD91PZAVG2TTnI2ertcd9jH6TV9fnS534fS0scbxYH57gGYZLeq61yhs8RAkMIuHf6NlA4lsfe3Q2kkNj4s3hdDHIUQkNeVjhCUpQVQgmQs42nVrPh+5un7yRJGA4niNbs91WWJZZho2H2vULjuJJrN64S1GzSOKIsMmpBjWmYkCQGrr1GHIXYniYvIyxhzeDhljWTJakCIRx0ZpOXKcKQszS2UlELmigMkrjEtmp4gYGiwvMbMxOfFmhhzqIqqwohS9AawzXI8xzDNjAQVPksdjaPI/ylNoPDCV1vgblWQj+eRQcWVcZkVHLfmVO4Zp1r715nacXFNKDZdih0QhJBck9H61glSnp4XhfDSun1D7CdBZrt4wTNDvE0Js9jkkKT5zlVFeF4BbmaPYOkLkFJJJAXM3mBaZpUhQJhUakEoSqKQrO13sd0HFptj7yK6R+Y+J6i2RTcvTFi+ZRBkuUsLC9wZRBx9OgKe5u7RBPNsNcjyyJa8x5ROMKsHSMZjFDS5WCaceT4IotHa1QyZ/PGHlY6oj23xl/5Oz/DKB5z38NdugsnePvSDdKsT6JMrNKlyiGw2gz6B+RjG6fm40cFwyTG8VzOHl9hoWPzzpu36U2meLbm/LGHiOOY9Z11Jmk8A/g75r1EPoOm26QiZXZGBiltdJUjzZgiydg+LKnbKa3FTYIjyyzf/wR3Nu/gOAJtGCRVjgtkpomwYS4PqZUu549/EL3RxJvz+cDcg9yea3BYaBplTMMPqJ19mrW6jZ3DQbRHttHEWVjjJ//636A+t8wP/+gx9rfHrD30Yf76mfvoTV3CUYnKFEJbmMyjtMXikQ66tDCET5xrAs/lp/7G36Xh++xPd9ntfYlL177GqeMXOXP2GNt7V3j3nVeYa13gyNppDvtbIE2E6qKLOYJ6mwtnVyizGkKGJFFJyw3wF+pk0wOWjq5Rqjl2b9/Fr2ma3TnCMMcwHEbDCEjp75s4Tvxn31DOXTzGdCei3jxCOBohixhNSW15CbPuI2yBU69oSJu8GDIZTTGFJM1ztFJgSBAFSsF0UlJvGlRVghQWs3o2O1fkYYTSsxisSlVQFNQth7C0KeVMy7ezeYBvd1nsLGAok6ISaFFSWRWVLhClRAoP26kTpYeUpkIoQZrmVEWBYQsQsLq0yu1b6+iqohISkO+5t/8k43z2scwrTFuitUaLWbRYmiXU2w6Pve8+9vcL9gfXafiCRx56mldevMPuwR5LtqTWWKOzdISbdw4YTQbYlkezvkKapBT5hOkkwvYM6vUmkzhhOJoSJinTXJIVOWmR07BcBqMx0rQoy4Isiaj5wWzin4xpd+aYm19mc32TIs0o83J21pYGCoWUFbZtE08nM3yI42BIg7wsiaIplilxPQtlg9t0SQ6LmXalmrHj6rUahmkShRF+zSeJIga9hGajTZJHzK2uMDzsYVk2AoNxr4dXbxBOpyitMfIx/cMBjmdzdn6emmFyc32dWGV84fbXcOsZD4j7iAcF5mDC2kMLHHh7OEGNmm+wdVNz2KtwpMPRMx5+x2YU5xQjgTQgmk5pdusYc5KqmKVsZGVMVSks20aUil5viFFZSCSBVacbmOxOxvz6f/ssZ+4/wSSJ2Fqf8PGnvh2hZy7FJIo5sjoPUuI6db7vhz/K0sWnGN4ekKcZx4+fpNZ02Ny20GqMQUCj7rC7uUWeNfD9BtPpmKq0qTUEi/Mt7tzYInA9oixCGyk3Nl5nJd3i3Zckc75P27OJ68dIqoL51TYH/S2Ghzt813f+EMePL/ML/+4/0V6oIZwJeW4QpwfEhqTp2Mx36yTTlDAbUmUpnqhx8cIRtrfHuE2b27dv0wqaSC3RsgJDsNJZYL5zhN50n6qC61cGGDgIR/PupW1ss8+tm++gTcVrbxicXl4mmF/mWz/yU0z2f57t/Zjv/d7H6BW3OXLsB3jkA9/PYQy7u2N+9uefIwkanDjbgP42qYaq5tGZd3BME9f3SYVNp+kREVAVKaQGeQFYgtIQ9Lb3aRoFEsHkcIjXamO6BlZp4VsBZ4+f54V3nqNjNVleajAa5DgqQhaHYCTUVEFZdBiEGrsBO3tDatKkJCaL1zi22OVTH/8wRwMo90MmVURfTyjKKfNLi9zeuUW70cYxA9y6815N3Ni7y+LyArs9xY2br9AWR7HziGMP3YcM1xkMS6bZENx5dMOiVjeYW+uwttrh/R98kGH6HO9cfRPH76AKzfrmOv/xX/0C3/JD3w+DIdTbjCPB0+eeYnsUkpkZYRXz8Lf9ONO3n+fK88/Tar6f8TTFkx4vX3+ROCq5sr7PxYvncQOH7tFlTj3wnfz+576IUimWZYGWlGWJRqI1M+OfnAU1lFWG0AWeacxMkwiUYZGks8hclEbpCmHP0n/4ZpmUAhTvaSmTZJb37XkzHePM+S3J8xTLcfG8AKTg7voNDFMhtaQW1KnIKCuDLI8J6i6jyZC8inAND1ODKR2qIgSlKNIS29IUWYIUNqDJsgJVzeQ4fr1GmuaAnKXwSANpS9AmtVpzRotQOUkyptFokMUaJRSOYWGZ7uxsHsfkacFco8X4sM9k/QBf2PSzAbbr4JmzAd90HeI443AKwnJoLMPSss87r04pYhe/UWNhpU1vY8DKkRVikZEbKVVVEMUhhmUAJoPxAaYJnbk5ppMDJuMdbM+gyCRJXFBIjWPN4iuFEOR5TlkogtpMEoW2yYsppgm27YIymE4y5hcdpsmUw11Js91FM6G3I/ECn34vJsocKEe4gc10OiWouZiyYG8n4ujRNZKsR5V5RKJPlE6pihy79BjsC0pt4QQVcWzzzP3fzpgBf+1v/xU+9q3v4+svvYMpA86ev4Dj3+HaDUk6Ckl1jbJImRc1Jpt7ZPkEr20j84LSLLh1LaJcbbHXy1DKwK6VNNoLDIfXUGhs36QQmjLLZkYraUAlUXqGyprFjua4jgBsFJpW3WT3xhXuHrzB+QcfQ0YlogDf8plk0xlpBoVpliw3PB4UAStqEeuOR/P0ed78xhXm6k2so5LgTIs0rsCEyhLU8gZRFjKcbnOw/ibt1TOcXDrF4eYE0zPQUhH1NJlZx7I8qjym6bdI8gTsmTSsWVuY9T2pwjclGrAXFsktiWvlfOP1L7Gw1uXuztscDJrUPEXdaSJ1hK4cdCkJOi6WazGOdplmt5BWSZpUnD23jKkE4/4Bd3tX6TYbiDjAtaeobMK1O5vUjIxKFWSyxG8ZxBOPoFZS8+f+7BvK3es9RrsTLARhFIKwWDu/RnOpzcbBgDoWlm8yPhzjCKCQBJ5DHhdobVJUCq0r0AZpUsz0abaYNXOVQBgwDscz5qKWYM6atzgMEUmK5fikWci5M0dwvTUUU4gy1DTFps00G5JXBYYhEJWg1BllahEnMcO+h18bz1yyGBRVhIHNsD9m2B9hCwu0mLF5mBl3NNV7RRAUpjWbqEsqKAUSSRQWPHL/Q+zvj7mxPqDWnmPtyFFc9xiF3sU2NfXmKm9/4xpL44pGq8Z0lBHYPirPkIVAlxGBK4jiiMNJiBQOgVPHNG1UWpJlEQJNqQ0WunP3XMkpJoIqL/GaAVEUUUb63ibRY+nIEQ729jElFEV2b7ta3eOyGTjODOPheT5VlmAZFlmcUp+rkyYhnueifI1FBami3exQqhkaxjAMVDlz49fnGiilKLOCpmehfY94GpOGEaudRSJdkVkOZQm5MHCEgW/BIB+QiNmDZqndpnAVE6EYjA9xwg7dBZtEl0xvO6zOuSTliJMXakwGGePDHjs9uHJ1iON7mJaiVqszGg0xnYw0AseUFGnAIApZOa4Jpyb93TELS/MkyqNUmjLs01lbZj+uuHuwTiwm5JXB8lqTa1dv88d/eIjWI+qNJllRUuqC8+ceJkx7LD/4AAdXvsgjj57nnXfe4fatbUzTxiRAlQn5xCdJFbW2ot6ymEQBskqpEoebl4YIUcdwDebaJsPhhMDysVsu+jCi3xeMwiGp6mMGGbuHhwgUnQWPhcUjeMYJNBamWzKNc5KswsTFVEOyzKGx0mLpZMBHH/kIn//t/0ZjzmNnNOZHf/wHuXTlKnfWt5lkGa5rQwW6yrlw3yn+xt//W0RlTJpX6CKjLARxMWvIkzAhG6WYdYPf+83f5dobb7P/5g3OPfAUf+fv/SOe+dZv40d/6GkeXfHYuzvhYLfH3c2Q5195iahwOLLkkB5s0PYb3DicEK0fMtn2cb2CvckWx061mEwUc3WJocFUEtO2UIaJa1qYIqexsIRTVSh/ShqWxAdTukdWKfIJX3nja3SaTc4fPcOd8S263WMkaQ1traBMyAuYqIqVZkCUJsggoNBgmB5Xb+/h1M9z+v4mt0Wbo8daGOEm1996kyOrD3J99DauX9FL9rGdFVbmV9+riZPDMV4ypcxyFn0D25LUdMD61TcZJy4rboMlkbFPyt/76T/Pzq1D3r35Ao+cMLl7Z5EPfMsZXn7xTUxbYePT6Wqeffa/M65N+b4PfS/1oA6Ox/bGTW5sbnJidQ27jLly7Rtsb2zw+CMXCMND/uAzl1isGaSjIbXVLse6c7z18rOYNQdtCw7GGSfPneDuzVszdmycURT33NamPdPeVTNTm7RsqrLCUBJpSMI4wrZnemuVZSjxTfMNWNKgujd4zwbtWX9ZVRWGKciLlE6nw+H+wb20HoHl2GRFykJrCQzBJBzjegFZkmIbNhJJpSyqysJ0bPIiolH3CZyAMIFcpZS5QsgUqhJ174Re6oKyLMlLMaNwGCZxHOP7PlJKoihDug5FWeE6FmWuyMoMwwJNSZElCC0RUpBEKTXPRwuBaTlUCkZxjFWv4eQKQyq0lZFVEk/4FEWfUpQEdZf1zbtIs+DRp5dotSeEwzpvPZdTVinxNCLPc4aDEU5gYVswnIxJ4ox2p854ElLqHMtUTIY5tqkJ6jUQOULYKGWgyHH9AN+yiKYxtm3iezOzim3bIFMcwwLtYwgHy46ZX3QRQhMNLEyjji41k72UqoAo1dz3xAkcP0JUAsep6O1kzLXrVNUIw6zo9/tEYYptaoo8okLiiBrJMGH+RIDjp/S3D/AbNe6MLjMYbULuItSQfm+A5SYc9DZYWKhzZCWnWtX0BhlZnLDaPYNhR2wdVuiyIhmERHqKXzvC7kFKd9Gk1hA0O3O8c/kbTAcVqVJoKXBdB60FWlaosgJLobMc23EoyykKjSrBtupYEnI1RfopdlTn3a88R+AazM/VyIqEsspnfYPMcS3JsEx4K81xVo/TnsS8ffUFJk0Le9DF7LRwjA6GqtC5gsBCF0MMI+bk2jIHtk3TcQnjEsexSI0KQzaoVRNiHAwSHFsRVz2Ea6MrMA1JnoJpGbMtuzljdIvCgFygdcD7P/gJ9g82GPfeYnXhUZS8w2CUEIYhUegy6Sv2dq7Tanuk2QTLy0gTDSJj1A9pNpbYGN7g7PH7GB4M8FnAwOH4mXmiyTo10WAa2mhrQqEiuvMeyXhImRb/3w3h/78NZTEsaPguValZ7p4kSUOEMMiGJcu1DkWcEZYhzU6b8uCQaZTSrQdkurjXq83A4NIw0FrOnE2OQxIXSFwwSkzXxMSkKCqyskCYEk+bHG5sotvzzK/M0+9PMe2csw+c5L4jbU4tWmwf3qBlX6DEIJyMUXlGpafEhSCo38d/+ndfJi4KkDZJHOP5JkopNu5soEuN41qUlUKj0Bqqqpy57xD3NpYSrRTCmKXtSKlJwylrR+dpz3mM0j4nlkom04TNt27z1Vuf5+jxY8isxeWvTrFlg0svxLiuh2XVSZFomYOEeu0CeZXhmWqmKypKVGGQpILpWJEX1uykUaTMLzbYCqcIDYZwsO0m455GiPYsRkxpVFWhc5uyWKREkyQRYTRFKI3vt5GWde+EUiGj2URWRBWG0SeeZkRxBI6mSC2KqsTFpMoqcnJcx5s121pjCgOhocgKbMvFKDPajk3btJlUBfN+i3w8IAtTAq9GFE5I3ZSaaTOtLCYamu02aRoySlPcXJCv1ii6iqo2JRvaMJKMbZvr1yxaiyWWcJiGBaqcJ0kmSKOayQ+qIbZlkIwrTOEgLU2WR6R5Qa/nMJkkHG20yZJqJq2wCqQ0yDOB7ZoEdp0kMxHkqMrg1u2bzLWXeOD+B3j79evs7u4zv3iGZlfyja8ecGw0pdffRWcGtXqdYVwxGu6wOH+UJJ0wGpQcOTpPkaf0biYsLygGw4yGEzC3JujtZ9T9gH6v5OTxZeI4JB3EnDuxzJXJNnl5yMJanb3DBJVEuHYNIXy+/OwfQmWCnDIeSzJV4XkOZZbDtIFTm9C/PSbvLLD8Iw9x6pE+h4Mb/MB3/QD1BZ9L//130WWF0glpEkFlMze/zFdfe40HX7nKfQ8+wzDqEbgNKunjNBWGYdBZgDwpOX/+GH/wuS8wf36NFeHwK7/wS/zUp/48f+/v/mX+zS/9Bz7+yFM88oFFvvrS13nu5ZcBkyzOONVs0lOwN4EoA8evkDpF5TkrwSJOUmPci2E/pbAVqaVQkU1VTtGJidNssLG1iShc8lxRbxv4/phinNI8ukpFH8eyubG/wcbekJY8QDs+kgoVTwjqDZyyIMk0RaiIjQzDmDkp72y9jD15iWikeeT7nsGsLdP1z/Hw4xmvX38B3zrFYdinsdihLFPu7hy8VxMvrhzhSGuet66+zanlx9lI92kpj3ZTcfHD55nqMRcf+Pu0GgmLRy/wz1/9Ff7WX/wxtl5/lmg/4n1PHOfP/egj/Opnb9HtxOSlj+EIBtdus/ip09y9ucH1K2/RbtWxswnj6Yibty8TZlN6o5C6cZH77z/GsRPH2bxzyNrqMm99/RuIY4rT9z3O1s4N9g73mU4zal5KUGsRjabvSXhM8x7jVoh7TOASLQw8x6WqCrI8x/JnJ+oiTrGESaZnuKEyyxGWNasj93Tms0G8QEiNEJIsy4njePaAvHdeRxiYts1kOmRuaZF8VOF7dfJMU5UGpm3heDZOkaN0gTRd0qTA0iFCTyiKAimtGY6IiiKbubnDeIqubPx68H+SKFVVSZplNFuNmWZUzS5LYTghqPtM0xnyKE+Lmdkljal7PlWZIw2DKI1QaLzAwzItrCSjkBYGAabIkHZBSzVRVYEUEl2VLC13sJjy4h+U7Nye4tV9pG1QFgLXbjGZjiGqKMsZysiv1ekdRAS1JranyPIpFCXS80jzCsPSOIGJX3fZ3x9jWbOTfJbNpE+2ZSCwSNOEUhXU603yPEXpGSR+d7MgywoMw8RrVIyHE9pBCyljTpx1OXGmxubdMdkwJMxKikwiBEhpUm95WKZHEhp4NYEa5XT8GtIVHAxK5ufXOHtxnhsbV9jfqOiVBzzywDk+/PD3cuv2JkEQUJQ5nuMTRRVlL2fllEOrLBFJQqOes30w5fiR46gy5YXDK3S7ixgqZu3EPIGRMk0TNt6cMIkzwmnKwkqXJJ0ShjG29LBdf+bmN6BWM5hMJjSbi2RJSqVSSjXFtVw8s05WpKAU/pxNoW3GZYYlJJbhg1RoaaOxKYoSq5vzhcGznFv7FublSVaGBi8evMmj8x+hEJpK5niigZE7TLnFs899ngdPPMx9j34LyTCntDK0lrjCwShSUtvGUgAVRSXRsoFRmBi6nNlnPZtcl5jMnrGGcGbUhKrEtFOuX3+Z3e0rLC8cYXd3l3oX4tin1QnY2j/E9zvcf/8HuHL1JpbMMaWF0BnRyGTEiCzOOfXgWeZsk9WFFbZ3B2zu9/CkyynvBGtOxdezKzieT6a7hOOUJK6Qf+ou8f+XpJwypTQlwnepTIXt+kzihHQwxLFdkv0eWVURSZeWHdH0KpxC4xNQkFCSI4SBYczOEuFU0+56mFZGmVWYpcKyHdI0p1AVpmlR5NVMByihLBIm+1t0u2sURKThiP3eUT71I3+bM8csvnZryiQv6PeH+G5BlQhkdgNzOuSzc68x2ZqizWSWISs90DlaRwhMylKhhURVGsOUIEo0CiFchDDQKkVIl1JVGAJQNlp7/MN/+l0szXWJQsUo6UMi2dwd0qw/wJtvX+UPv/B+RocBgW9QNyUmJrY5OzOpUjOd5Bz2ZnnOGAaoilrdxvRtzEojhcSz5CxdgRKdVTRMjRQmSlVUxUzTJOQsQ1wiMQxBPNWgBQqNY9k47VlKxmgUkecCU4IWgjSukNKabY6tHu3lP8JoNEinCbq0sISFbZv0BhMcJ8ByBY7vkOYZYKALE0sbWKagaXWYFgWGPWIhm8f1YF41iWsRUlXkRYVX2hTKxKg5BEbF+s1tGkc6FJOMZGjRmbcR0Qbj2GUYZ3gaeoNDllfb9A5i8qmFLVwG++sEdg2VlGhXU0oLW0lC26GYJOjCxmsn+J5NMi5YVEucWV7gpWvbCFPhjEzqwRymgirJ8OeW6G0cokyJb2ncdo35pUXyqWRusUk7qDM9XOcbr1fUGnWcKKbebjHMJ9RqNZaES6UVWg5JcxPT1Rhehdu0qLUCBoclXr2G5ToEtRq2P0tfkO5N8jKi2azTsm3ubK2TWAMCY4V6/Rjbd3dgBPlKji4qqrzJUw88iCOm3B3toHWFSiPAZ3lesrz4JE98/Al+9n//j/yv/+ofs2TUUWaAITRvvbbFT37/j/PS85/mtbeus3zyccp0yPU3X+N/+mf/glPnjjIaDBFSEsYlUhWIIkXpHIMuk6SHv28RjoZEu5Kf+puf4t8P/zX/7F/9K86fO0a3doJeXvDmpR2+8twV3v+R96Oqksn2Nnmc0osNeuMMg3moQpLCJksy6i3F7uAu0teoQlGFCkOlWGaONHxGOicpUh58+ARrvsl4us35J87Sqnd49evvcmuzT5q6TPOUlQWwDidY0qDuHzJgRDuYY5jfwqfOMIqwumt0V+ukl7YQDPCkJGi30LbHJMuRhwes71zj2LFjrLhHeOudq7S7dXZv7qM9wYphv1cTE6m5CbTPPUo1OWTZXeKTz5xlMO7x3T/5j0FtMtwb4QWS//f/9otEwuL8w99G3TU4Oy/42vUhd4cTFusKlI+hK9y64Na7d/jNf/fLrKx02LvzDgPHoxIQD++gCsHo4DoN2eHuO6/ghCNiYTLub+G1bAIrpj8uaS2dZDSNmQx3OBwksDBHtzvH/tYeNd/H1SahTsmrHE/7KCSGkAgUcRrNgOimSZmXSGkipUlRVZhS3jP+Gah7Dm99D/cG97SResaelMKgKHJ81yWKIpTSWJZE2pqiNNnr74PrMc6jmTHTttFWSVIoFBrXk2TTmDKZkikbISW2dCjyFCwHKSy0UTGJI8rKwK97ODUHQyryZKbXNC2FHwQUVGijRDDbptZqPoZp4uqKMAkxDAPLkrS8OklWgBSkYcR0OqHdbmMJiTYFVZFRlArpOGRZiSo1QldUElQW4pk+h+tT3n01pN1eZOGIJBxPsZ0aWVyRpsOZU1kZBHVvVjdEhe16IDRJktCpz1HlFWEYkSQJc4tthITJZIrnV2gxodatkamKeGAhYtAqAiWxDJcky3A9m2xs0GzYnLxvhfqcwWg04ZUv3ObhbznDsfsN8tRmablLWg5YO7ZA0oxAlFy/ckCU9EiLgjzxISg4ehrCocRraZAGlQjpthzWb27jeYLjRxdJ9raJhgpZzvHOzX26R00unD1HfzwgzyKm4xBHLGKUh9i6C2YXo2nip22ieMz+7gEXjq+hKolhBEx2E1pHj5IO1hkPJphWxVzgUrMKhB1gDHOKPCfzFFARJxrL9LDqPoN4SLNewzOalJlDnsRICkyjhrRdyjQj8FPiIqLCQ1QGihJTOpRVwtJ8jehIRk3a4Oxj9I+yI8douclo611WltYYCg8lUlwBtarOB578AN3GSeLRzABmKROlQAuFMiyMUoE0Zr4M4172NCUVetYbVSVCaIQooLRmG3Ij42ByFZkJwlRz/PhxdLnAVnaLalRy/tgx5rxjvDX6BnF1yNXb+0grIK9KpoOSZtvn+GlBNnUYj9eJwjHB6fPMHw8ZX97imfs/iXATrj73Ot05n/pek6s6olv38Homou2wuFD/s28oi6IAx0cVOUvNDkmWzjYnlkmUxBS+Bb0xhmfinjlFEk4Y3LqNSMHs+qAdinI6Ox2jQNtUpUIrB9POUEqCyjEMgbwHpZVSooWiLBUd30H7JgejIc1Gl7QoePvm1/l//ftf5t/8i39E1Rvxq5/+GlW9YHFtAaNwaS88zNkz8zjdL2HubxEEPoWtKcqESuUEQYMorGbAbjkTlUstMM1Zk6W1nmGEDBOjMsBQVLlGZQkXn1hjo+fyW188wHEcHnnoYUQlyPJDtGcyv/oY0/6Aet2j3fTpj0KklLjuzAjh1SzQKYbpEPguSRxhmRZBwwNd3GOMVeRZSlEqDMNGGhopLMpKg5hpoYQEXRUoPdsKaDFreoUUs39OVQEGhoRW3WfQCxlMY2ZybglotARVdImnPn4zwXN8UgVVMZMjGIYBpoUsLaoqQ5oNlNIYuqDMM44+cJwHH68zGc3jihQhLcaTkvOu5oEn2shOTLgXM4fmoBihNk3OnV/lY3/1o/zhb93g6PEGFz96lrfefoOnHjvPkZUutrXIH735PM2zixzuHJAmDkfOe2yuH+A2W5iyTW9ngOc18Osmvd4uj33wfXzkfU/ysz/7c1SGjeGUdPwmy65mwVM8eaxJfzogyT2KaopQKX5Z8e47V+naK0RmnzjMcN05Lm+/yWMPnMWUkmkV0u8f0mgt4rpw443XaQi4dOs2N24d4PuLrB1fIM43abct8jzAdd2ZZtT2sIyCsrCZDCCPCgxbcXvYI/DOYFkmP/A9345fl4R7hxTViH6U859//re4//T9rHxoias3rjK/tEjAEi+9/gYZAWeOniHu3eXY0Yvc2HuX7/quH2J7Y8of/+Yf8zd+8Ic59/4nqAqbty+/wue+8EU6gcVP/K1/wYmnzjP/a5/mU3/zr/LiCy9x7fWzfOg7Ps5B32SUbtBqtakKSVaNkZXAtU2yrI82bPK44szJM9y98Rxvvf02P/xDP85v/tpniYdD3EZKXpkcjkIuPvMAsm2TxxZPf+fH+PqXfx8zq7PUqjGZHpInBjljanMNyiLG8zzqDRMz9JG2Ta4FpVESJyEPzT3MrUu32d8ZcvG7HybfNknVAm/f3mPjsGSSFKycDkjuFmytR6RFC99pIUONUSSkeg+bAFmDZFDgNOd548Ym5wTYskYpakwyA7NKMGnw8usv4jRsluQcCRbHzh3jyju3WFhd5PAg5OM//H3v1cSb195k591bdE8/ghUske/t8MINhbAlV/+fP0dnYY6tnUvcuLROyKxR/if/93+N127y5P2nOZyMaLXPEcl9At/BjIakpSTo+Lzw6h/zLd/9CZzleeqeTT+PGa3v0Wy2OXX/owyubPPIh59hmo5wBZx44EnWb9xh5egxoumAV778x5R2hnBbrC7UMaRkHIUcPXOS9Zsb5N6MSODhk1chwiyhdJBypq+EWbMo36Nc/AkSCGZnbXWP1Ssl70mDqqp6b/OpdYW4xwCecRPBcixKMooqp1uvM4pzELMYVqU1Ki8RwmCu0+Xq5cscbO3g2RZKl1BBUeR4XoOiqMizeLZ8sDyWluYpdYwUM1al65hMR1OkgiSOMaSD5QpMa5Y7niTZTJplSFzXQ2tBlpVkVYqWAstysB2TeXeeJInwhItlmDQbAeNJiCEUtWZAFidkaYZpm6DhsD/EdT3cmoEyS9x6neZcjd7hhG6ngSqaHB70EWr2O3NtlyiaohSYjkdVGPT2BxjCxDAMbGN2Ep1Op8zPz+H4HmkaMx2miMpEyIKyShHCIS8qdCFptTXJGGodl/d9/BymO+b2tQPm55f45I+cpbPaIBMxptEkn2bcfDtkcaVJVlr47swo5dgNiHMcV9Od9zGUYndrj7q/hBCaIk8xDEFQk8zNLbB994C9LcHciS7PXfojHj3jcO6RZxgNvsCZs6fY2r1LmpvUm02ScZtaQ7G3f8jWtkXNN0iSkmNr57l9ZwfPk9TnLEwn5s6tXaJsj1rLYzpWLDdXkFbFMB5TGgqvMcfS8TX2N2+g1YQwDjFNg0YzIK8yHM+cDfLzs6Yo2jmkECWVUeL7NcQ0nznDRTELMNFTHMtl0hdYNRevlVPlfcJ0nWlq0TteUpW3cPMBrruC0ClRlmO5Ldr1BvqbuQfaQBsSIRWVVpSUCENgaA0CtObeEKZmDaa8N5Tpewl6UuF6LtJI2bn5HAe9Pstzj9Cca/LiCzc5dnKeRrOiGJ2kc3wNsfMa4X6M4yiKIqe7YBHnAxQLCAO8lkFr/jSeeZpjx4+y09ui2azY2LzNwxefYHHxBtF0QubWcfZTKnJiI8YuHQb9wz/7hrK0Hdqmx8lzZ+hNRkx7A8aDIfNLi3QXlum0GhwebJHnJsNBRKMzh/eAT3Ewon93naDpAT4Cge0UVGWCUrMYxTSd6T9mTaSBaYh7GicTXc0yYuNRzvFTx/BVQTQs2LyzS4nml17+Jcpwyi/+7/+SDz29yGtvj/ni26/QP7zOwfWrhHtdxtt3sZ0Z2qJSJYYhMCwQVFRViWFYIDVa/gn+4puF85vmHG0VuKVDalRUtkUpV/jcly7z1qs3+Onv/hEuXHiaX/+DDY6dP0PaXeHGtWs4DYntmESZwMDE8w2ypMSwLLLMoqRkNJpSqzVZWe0SJTkZEqUEmtl53fAUlBFpNITSIMtTSmUAFXU/wDUdPK/BaDShVAqBoNQKrRWGyEBKDFFQYJBEBUHQoJZrJnGEYUnUbK/JTFBn4pgeioqKEmXImZBdzmLjEiJ8z8VMPVQ+xXEFhWryfZ/4MZaP5ty+eoVw3yRJFB//jo/Q9s9y4b4WX3nu6wx2N/jC1bf55I/+GJ2lozx4eo10csCDJ+6y2cs4fd8yFx8OWFtp49pdYulg3Am4cXWCTH2kmVP3Frj/9AJf+/oVSjWLPouSHuOJINIV4x4898oOjaMPEu3d5X3PXKRz5Ax/6fu+k+He5/CmBScf/BS/9Xsb9NZ3+fEfP8e7uwPsVLC5YXLyIRstx7z0wjbrNzf4Sz/53Tz/0iX8eolhnqPIFVVU49bNPcZOjQfPHePh++eoz5kMJg5X3ikZDFPKKieZjqAyyePZScr3fdAFqlCI0iGrUoQYgrb57d/8PS489AwPPfY03brP6WaX3//CZT71t/8yztxRqk//ZxpWg8lwwv3nTtHvHfDEI0+we3iWH/rhH+Nwb4Nf+KX/wM/883/BjSce4vTRR7k13kFNB4x3ExrtgFJpfue3P8/K/GlOffw7+PTvP8uZ+ZN88qf/Ki+/tkt/b8TZcw36WzGG4yEcBTIgmlZYliRJKrIcbly/xTTt8Tt/8Ht85MMfodmd4623LtHoLJKGCeiMb//kJ/j6m5ewVYM7NzZYv7NP7rgQDih0DV3F1JpdKisjHsdo0WBuMSALI4p8iECjM4GlNbrSnD55inp3k69+6Sq9A8G5cyaHow0Cp0Hb1exej0nCirrjk0SHiLTA8o7jBC2EEWK5ddCCRlCn3W2R372LU59jUCaUbo4R5VRmxCjZ4+jJLpduf4PoUo/Bzi5PXPwu5p6e551bX2HOa3H7lZfeq4lriws8v/Mi+ypjSfucO3GcL/ze7/CXf/p7efHWi3zptQOc2CEwYqZxyLnjT7F4us3hTshvf+Fr5LWUVquFTAp8OyDSHpZQmIaiP+nz3Bd/n7/8N/86X3vxeU6uHieqRxw9sUw5TmgcP4lsH8HYhCgfMVQ5TE0ev/jtTPvvsLf+ObJyjbUzLeZrFnevrqPMkuFGxcOPP8St9dvoUQEStHSRuQIPuOe9+eaJ+pt18Jtv30QN6f+DdlIp/d7XleLeBlOgtaCqKizfJ45jQFAUBZYtKZXgYPsQ061hSIOinJmBLGkQ1FzG/R7T8YSa65HFKVpX2K6NaUE4TSh0jmkLavUlmi0fJWLiaY4hBdKQVKLAa3kUZYxrzqQSYRghLBPPC3D82RleAnmeIYSk0WyTxVO0EMRxONPzmybNZnNGwBCalufhOA6TcYhtmNS7XQaDwSxO0rLodnwMERCFYyb9jCIvsWqAdNncu83i/Bxu3aOqNEl4D2Du+EynIePBEMOQVGVFvdkgSVOqSjHuT6mqioO8RyUkpnDRukJi4TsuRVVRqZKPfNcjTONtXvnSFhceWuXM42uM9Q4dt0Gj67J01Gd3+4CXXtrlwv3nyMpN7tyNMOI6Ny5tof0ONUfhOnXSJKfRqBHlA0ajEfsbE0QVEIlDDBXgeCaCijhO6e3vkSQheZ6zf2uM69bZ393jzTev0h/sUN1QoEw8oekPdygLBzeLMLyUncMYgaJmzzEuDjncD3Fdm2bbJk0zpskhhnTJ8ohmt0tRaXRZYFoC37MJRykSQV5UWLZH3TDJi5S8zPB9h7zM8es1pDTpdObQRcHenQNqtblZ86ZshNSzq2FhklgaV+eIpCIZGBDYTCufhu1Qb0eMyxHn1p7ENX2qPEGJHCfwKYsSHSts6WCI2f8WeobJUve8a4ZlovLq3vLnHpZQCBAKIWbLHWFwj5F7QH+0x82blzCDIWG4z0ZymV4f8nJKnBjsrA9oejXevPkHJElI3V8iLwYMBwlBEDA/d3TGs04Sms0uaSoJ0y269WMsdB9gsXEBVZlUWUYyzJmbtzjc6NEULoZlMkwjhDKQ+k9/8/5Tf6c1SSk9n6uX3mW0d4jh2ayeOj4rFK5BPxyRIvGCGk4RkoyndOfnmVYRT73/YV57/RKWZaK1nJlxdAUopFSoykJgolWJEsywFswi0rTQSFOQFBOGoz5m0KDeDPDNBmsri3zs/RY2KZ/52hdpxEf5yueepfaIi1M1cPI95molwirgnuBVJ7M/MtKgyPJ7BVDP9JFSorW6d5qxME1z5tMRiqQoUHUPObVouD6RtDhx+hjlWLJ0fo1U2DQsQSAt8tEWrqmwHZ+y0kgLcvXvMTKQQhOOU6pSI1CIqqJ/WMO2FphOI8ZxiiHF7DQDWJaJbdoYsmA4GmJ7NqqoaLUaBLUaSguSQuPWQaOQ6FkEodKoSoEGYUjyNAazYBhWGK7EqSKyVCFEA+n8+dnDQ2l0oWZ6UqWwDBuqCqVLDF1i4mELi9LOidGYXhdTOvzFT/0lWp0md9b36HgNgo5LL0z4/JdewVMnUY0hL776q3xp/e/wHd/7F/g3//Kf8k//5+eo+222em+QVynFQYlwHKI85No7rxEVIxr2HNc3rtM9usK8f4qvP7eHA9TtOlgGYSbIihyMAr/mMonW8QaK+OAmk7DkwUc+xtYg5DdeuMVj5z/C5cu/zzPuGmki2E1uc30oCMsu6TDktWuHrD68SjZxufjYIzz94Se5trXN8QeOc/3NQ+579DTLjXlu79ylJ2Ju3prw0WcuUPoT3vziXbZ7u+z3JveyjV3KzKCspnheRaUzJtOQuudimgZCKFy7RhgpdFXgepqv/uF/5YWv/B5m1UQ6OZ7X5Vd//RLS/SqLfkUVZly82OQDHz7DZJoi8ahyl3j6dWpzAT/zzz/GH/7Rr1BrdNnd2OH1S1c4Xne4e2eLoVsx78KLX7pKkrU4jEIWGg2SU1vs71vcvnSHh59+hHDPIBkbmK5Amy6VbiHlCFRGnGSk+x3W5lrEqzWoVdx+5wvkaUaVKqJpitIJcdiiTofscMJc22T75mXSwTYyWOG+tZMMGTDeE9i6Ti/cwSodlAEb7wypeQrL8bGCE0gzpeRtiiynv3WX+tISZ87Du+9+FcQq7bkmr7x8mWbQoj8q8FsmpdYYtoHpwjQ6QCVThJEyCguWWgZuvosenqVrNHGtHg1H0chAC5NxYnP26EWwcyxTcjDew1xqcDg6pCxznrzvB/Esgyr9E7D5+u3r+DWb7eFNFtbOUm9qVhbq6LBkY2/A8rnzOJOMaHeDujpCbtg884EfYhQesr9/wG989r9yfK3GybNnWb96i3qnTZQMqMqKwJBUgx6f/2+/wQMXnyLpJ9S6DcLU4uSpE9y89jZ6PKDZqpFOQwLL4sj7HmBlcZlBK+Ti8CN4NRPbnWNp9STtxmtcev41zp7NOHH6PEfXTvIHv/3bOIbEsS1yI0FKG6Xke1vJqtL3tHT6/5SG882vm6Y5c4vrP2kiDcOgLBWWNXOBV6XGsmzgXiqPFugCqDSW5WLjkKYZrUbAZDJBIBkP+gilqXs2upgZAizLoigK/j+0/XeUJOlZ54t/3vAR6TPLV3W192N6/Ghm5C0ySEICBFqEYDHLLrAL+4OFu8uyu8DC4hEgPGIlhDBCEkgjaUbSSKOxGtMzbaZ9d3lf6TPDx/veP6K6B+7uvZd7Dr84p09XZfbJUx0V8cT3eZ6viRMdzYyoVcooVSBVIcIy0JSJbuo5JzyLSZWk4BTxCg7tZheZ6dTrVYZhRrc3oFAoIDSFWzCxvdxnbzjo4doOcZoyMTFBHKf4/QFCCgxNZ2t9g8BxKZYrmKaD45WxdIskbtPrRViWol536Hc6qFRRMDx6nT5FbBzXpOq5ZFlGt9vFtVw0BMFgiGnm9mvdbp8sSbFt58b5NE0zB+qpRJM6SqQgYhxLEEUJWSYRwgRdo1AzSTTB/W/ew/6DB+ixQd2psrGSMDJl8+JLp5CyQnXU5ZEHzzIxXkNIF5GlxNkArW/SDwyKZYPQ76AbDsXiKJ1mh1q1ARnEYYrQYtLQwjAc6jVBs7WGMGH3AcHmkkHB1RnEa/jZQdyRMu6owfbaNqZnISILq9DF8SrUJip0+k1kKBhs9fBMF02D0dEJZArIAtXREL/n4hbLRGFK3+mQhgaeV8YxEzJHMjM5xsbmNTIVoqcSw9IxDIv0+obN9UgTwVKzx22veQfj45e4dPIZNK9GKOw8ClHECBFgCguRSSzDRSiNRHWIdI/IKtMZXCaxLarVE5jmKGnUQSiDZJBSsDQ0adAnwjQMMEBIdQM8SgzSRGDq5nXtb36ofFqZ92QClWZYjiKKA1584Tk8z6KzWWW0uI8gXqM130faJmdfWGOsMoJXa6OvDwk7EGktNC2k4DlsrPZxOxqWEyOURjsc4nmKcjnnB8eRj5Y62JZNEvbobyY4kyPMjq+z0YV+v8eu8QKWY7K17v7zA8ph0UD2BwjLoLJnEn+rTdL3GZ0epVKv0Wx3UX4ISLyizXCQ0VnZIhwmvO3b38f6Rsj65hWKBUEc2VhWShzHGEpgGBpZqu9wAyUC0HVBll1XF0kQio2ra9QmFaXxImGYkgY2b3jtO7kyt8wTTz+O7PSYnnTRkjoFw2fyWBG9LNg7fYhvnLyMJuL8d6gUSAFo6PqOnUUa5qsbnRtTOZAocmBmCA1Cn1YS4bgF3nzTfVw8PU93VWNxocl2dIpXj4wTLW/y9ZcuoxdL+J0QU5dkMiPNBsRKY3Z2EikFtmPkFxACXYBUCs+rMK7KO7GUAqWDkpJMKgzdo1YvsLHZROmSd7/rHZw4cQuGbvLQl7/KF7/0CPfcew8feN+7ieOYi5ev8ZH/+XEyKYmjmMZIhbe8/rXcfuIEAsHMzDS/9iu/ycNfPolSO2stmXOPMiXRERhCI4oTdGGQKR1Xt0hiCI0Ap2gQRluMzx7joQtt7juoOHl2m89euMaBvTOU45h7XncLU7bHY3MLHJndy+Kl5xmduY1f/Y0P8+AjJynUjrJ+LaCdJJglSXM45NDuUW47/DZ0s8NDzz/Mzfcc4uzZDvFiD2UOwHDwnGmKjo0lt9nsdDBsB1dTrG8tMTm5n0Ztiq2la3z1k5/kV3/llzi7vExDL3PvrW/HdF5icelJhK+h+y22Tm8wtutmKsV1kq1J5i+fR9GnULFIZUqY9HGFZONynwvbMYsrpzAtD7cwwtylLf7qrz9Oc6PPzNgkQkuJtT5CK+IPBJVqge1rK7iejmbbdPo9dAS9no/AwrAsRkYb6IlBuVHE0yxC5YM2IOyv0vRXqY1NcvyN30pnbZO5+TleI27h0uXPoxkGtYk6G+EmZx9fot0MWV65RqleobnSZBjrjN11jF3HalgLIZ7p4cpt/KhDxRYEg0Wubad0oipdNri4cJqiq3Fw917GR23iUNJtbXHilgrdZhOlFQjFCvf84D1k4maC1EfEBSwP4synH/solZAEBmtrD3L/61JGahklbx8j1dv447+/wr9873dwam6DuD1HIUtpdzWSjktq+ugFRSq7aGSkSYHBMCFVIUmm82//7U/xmYcepRRH/IsH3ovPkLX1Zd584g40w6DV6mGIPJ5yIdxkslrHKRuYKqJREmh9QVN06Yc+M7Uy5nqPgahQFgEijglUicjrs9brsLG+xYnb78ErrPH3n/ozbrnZoLm5TOTqbCY6wsxu1ERv134OmkXsuUtsNzsMjheYObaLi0tbbG1sU67vZf/0OC9trDLXXub43fdyeX6Tha2zTI1OMWwvkAb7eOu3fwe/8H/8LF5vFadQQ+hgkJEYGnMXLqBFGvtfcTeba5u8/qZbsEdG8RaLXF6Yo1irMVIpoZkGz1w+S/XaGuuDK2Rxht6PGIYDNqOUB+66iZeeeJErKyHo29z72geo//A4n/jwn0JPoVUquRweuWM+rm6Amn+4rbnufPEPtzj5v8t9EIEbbhCaloPQOI5vGJqjWXk6mkqxLYtMDZjdvYt6o8DF85dwTQ9/KEnCCA0BQsMwDPwgwnB1amOjWO6QOMkYDro4rrnjZOHgOnmwg9I1HNPGsC0yFWNaDv4gptVso9kGmi530rMCJAFSyh0T9ghM8KOQNAPbsqiNNOh3umi6gVCC3mBApkAqQX/g06iN4LguXhwzDAM6fQ1UShymVCfLVMcmaLa6uGWLYS/DLZjsm97N1mYz9/c0DNIkoe2HOF4BoUyCIMC2TGzHymkAMh8SoHKOq6GbpJEkSyw0HeIsYM+hWdxixvy1EtUJOL94jakJm+WrPr6MsV2dxug07WbI9GyDm2/ay8mvzeEPBkQKJqf2EAz6JHHKsB9Tr1boDHq47jiubdH3W1i6jVnwsEydfi8kChIMswixQ6oyynWD8mhI4HfRsj2IVCfo99AdjULDodPuUW94xKlLa1NRGTVBGai0SOiHuJU8Z35tfZEw9shESLFUIU6HlMw6SoE9MSD1YbgdMhw6eGYdmUgGgx5pKkliSa3WAKmRJiHVao00BaE5HDy0D9sd47Y37EOoiMVzF0FP0RwDPUzzrWAaoZllYj1luB7zihO7SPqKkXoDq7qH3sYQzU9RZp+CLrl8ZZ5KYwRneoxhApYwEfJleoiUGbrQMbQ8TUqSezoL9Bv31D+MQUVZkPk0Krt502u/m2rNo91uI2MHTV/i/KmrrK5fJpQXidI+Z86uoLKQolkhEhq1ym5KZQsZw/LSHAWjQNGeJo4lSbdJN/DRwqtoWx6WbrD/8BE802R8pMLVS038OCapl6m5DpqKabVaaKrxzw8ovTDFbVTxe33iro8fBTQNyEzo9HsMWz6j0xXiRDHoxSRhim3q+MmQkyfPMbVHculybtmg6wpd94iiIVmq55YVmdpJonm5aOm6jhIZaaLQ0hRbZfibm0g0HM9laXGFB7/4VQ7cVmFh+Rq7Rxwae++hGw0RnU0We1dorUvmF07juV7ecbMTEyY0ZKYTBD6OY2IYGpoOuq7lHAYpUWQ73YSGhUHc9bj7tts4cNMJou4S4dYL+K0LnH0RWmvP8GwnYqJkoR9oMGgdRSiTaqNBFEZ023mX2e0OaIw2iJMMXdsxUDdyY/AwSdlc3STxfQxDw2nU85WR0InjDsNuG892GRsb4UtfeZivPPx5tra2+J0P/z5//Tcf49ypZ/nRJx5jfGKED3zggxw8uJur166i6zDoDfnbzz7MQ1/5Mmnf51d//de4trTy8mRSCHRNzwnjSf5g0YRA2/E7U1JHGiFCeTiUMJI2RgppN2T1wlWeN47xhvtmeeKFP+Pkl57CrRQQ42/iXfd4TM1YXJud4pX33MqZlTamdS/j9S5lTzJbHGO1t0K3F2B2+wzX5/jrM88TS0XfFAy2OqhmxFb3GoaZUdzlcHH5JU4cuZ2R8QlWNtawHZtUN6kWRrl06UW2Ftf5hZ//FS4uXuTPP/dlxncdZ2rc4fADR3C916E+18ffPM9t9/0gxfEO0pihfmQJQxSpunXMbAF/awMtMHBLFe565Vt58eI1RLbN/bfcx9pSwoXFBRoTU/zAj/0inc1LfPxP/phdM3uwrGkkGsJqsbS6wZve+A5On3k+z2HXKvT7Hb7pLa8nyUJOvXiOkutQKRXZ7G6w3tumUhXIUEN5JlPTBWRT8Rcf/W1kUsSrOuyfG+OuV/0ML3z+o/zWn3wGfcwg6Qzph232TY7R7XYRQnHLbTdzbX6NM6cXKXsFtJpGbddx5q5dyzOTBxnN5auYU6OEYZuzZzrcdvt+krRNZ9BgdX6JMGyjO4cQskY0WCZ1BcvXlshkiG5LVNDBrTkEaR89MHGKKZadoZIUyyyxtd7H3l3gU393Fj3UWNt8mMcfH3DkSI1ydYDjmgy0jIsXBrznPaP0+30Mo8nywjWGvYB6rcJmt41Vley6pcI9u3WCXpd2f4rXN+5l0GuxvjVgZnoPkdykvTYkK2ZkbYdHTrUxCgWs9pCsEzM2NcXorYd5en6BfnuIKSZYCTW0yf2MpBm2D6ZusbSyzB0P3M904wj7jt+JUVaURMz4rfdhzl9ma+3cjZp4+toar3rjN1GuaiysrRJ0tnn27EUO7prArZjsdj3qsUWv06Zgl7ELZdY6V7Eyn8HmPGW3wujofkYqB/iPP/VT/NVnPsHi1SUKJQNcHUs6pCrh4uXT+HrMm9/17TiWRXe7RbFQQ9MzPKfCxvoSIohojI3w5a/8BZo5ileQiESnFw3oJglZP2Xv8SPUVMyr3/h6/ui3/5zjJ+7ke3/yP/D3H/kDVlbWqI9UCPzcfuf6yvs6eLzOl7yu6r7+Wg4q/yHQzNB1cWMVruuCbr+HbVq5u4TIkDJGioxCwWRysoFh6iyvLlKtNWitNREq306BTpZmBFFMoepRqpeQJKytDTl0ZAan0CYY6uhazkM09AJCt2l3ujRGi/jDBD8YYGBimQXicEixoGMaJXq9nGtnmSa+7xNlElMvolsajVKZ4XDI5vYGTs+GLE9Jq9dHiOIeaArPckgSyWDYwjRtRibqOMMh3X6AEBnCtAiSlJCQVGqsraziGi5xXyMRGdnO0OD65NYwDDQEpmWRJgl+f4Dp2FRqVYbDIUIIkjRGNwyyTJGmCZYjyFRMqVRh7tIahm7i1br0fUkUJpx6BtqbPWYP11m9Jkh8g0YVVuZ7zIdtwjiA1CbxdZYXegg9JA77eG6FWs2jUTHptTu4nsBxTTobEWmiMDQbTYNiwSEK09wNpATDgU+xHpKkGdlQR2Gg6xANQtrLHnGko2VgujGpGHDxXJ+JmQJBtI0QZQymSNM1arUK9YbDVjPJhaVOnWJhF4m7gdDKlGo6RqwjdZc4kHR7IaZw0GRuLyhjRX/oUygV0S2XYrGCMD3sWpmi0tn0M0688p2sn/sdDLVFLEN0oaOUS2gFOJaOoIfecbGjW9m/bwa7cABXjjB24DiGWcLXI0zDZebgPjRSojAhA5SySNIY9FwgqzJJKjKMnY1sloIUOZVMKQlcd1nIQaZUASJJMHUHlGB5eRtd1zC1hFZ7neK0yYiapVqeJhGKltYj057BNYu02wWa7QV0ZhkbGWfP9E0UnUlsw0E3Jf3hOs1mn3JphF4/AT2fgIvAp1JIcXSbtbkqWSnAjwckokDB20UYrv/zA8riSIlhu0MSRZiGjR2n+EubaMOY6miDbODTXvJJpUGv16ZRHSEM+ozUa3QHPfYfD+j7DtfO6XglnyzWEULHMBxMU+H7Pkmad7YKbWcE/DJPR9MdEIok7JP1dBJZwMfiC196nFdk+3EqPda3IU10yo0CMouoNSxeeHKL7Y0WVkFHkYCQRFG000Hv5FnLBNOwyPmTOZFcCIFlGRi6hswEW1t9bjpyE//1d/6MK02dNNzmtre0CZIVrp5bYWvxGqfOnGF1sE58NSaLA1J5grXVDTR0pJJEScy/++CPcOjgQbI043d//3e46aYT3HfvK6hUynzhiw/zVPY073zHB6jXalQqNf7yr/6aN7zxdfzhH/8x48VpsiRiMBhSqpbpBynj03vp9Hoc3HcAEHmHGYSgMiI/IPAjdKFjOTAY9tGVw+Ejh1hcXGBhfgmFlScTod8wmNeEQZL6O91TDsBNqdAyDdPQkEojwCVOTF57/C726Ve4embIyqUa/+Z9b+WzXzvJerfDl3/zE7RedxAjHtLbjgmiKt948jN86uNfY3pyBq1QZ9/eg9RrGUYpplyvMhguc+HyHMuXO8zOmpQmC+h7Rgjn1/Fsj85mC10plldXUYFPoSQIAh/NLOMPfUbHHLa0lD/83F9RGB2h1lmi8I3nOPyffpqHTi2xtdAl8BXtVHChPcm//+lfxarE2JisrG9w6717ydpt9jYcjt5UYHxshl2H3kh93zYnz7aYLFuE0deYNCSf/ezjjNYPUmm4JKmF7RYoF6e4dHGeB175Bl79mlfyxje+lne9+234aUaqp8Shhlu9mX1Te+iFX8V2Igq2yXf/yE/xGx/5K7bmF/C3X6RQLFPQqzRsgVIeUvN54N5Xc+30l/nYn5ylaCUkdoTf7EJsUC4brG76WKUisely5sIl3n5nhffeez/IImk25KMvXOFffdNxxkoWKx2JjAJUvUjQKiNciR9uoeKMuQsLjDUmmZqssLSywdrKkPtvN1hqrWJpXUzbIgtNjMyk12xj2BJDFBi0A1A2/UFKwfVRRky7HbK6HHD7iaNszK+zf8pk6eI2Y7dIupt5QpaVBWwuQiYSRhpF7ILHIIgRsUkcRCwvPMn5i5cYdW8jC1t4BcFmLyHLmjg1RTM6i600SmWboNjBFaNMWoKSLehg8IXnnuNdjV2cP6dTuPnV3FTYxaVvPErkQrOzjR1VoWwTZl38dJWP//kfUiztYnK8Rhx2yPwRTj13kkFrHSrFGzXRcid49qvPMranSPXQJDVzArd6lqW1eYRt0pTzVNzXYo3XOFwd4/zcaWbHd2Ogc+9rX81XP/84i1eXuPnwK3nzG19Ps7XFn1z+E9I4QRg6iZ+QCEV9rEL76hyf/cTf8BO/9rtsn32Wb3ztK3zLm9/AXH+Vjr/BHaNjZAWN977xzXz8r/+at73nBxiZ8Hjxpcs88cJJpsdLCARLCy0ee/pFhJuysX6Ng0eP8S9/9Gf5k9/4eVY313Fd+0bN1XUdXTPIZE6s1G4ovMUNcJk/DHMBjxAKTRfouomUAAJdf1mso5QCleSm01nMrl37EUbCoJ9x07Gbef7pU0R+giLB0AxkKgGB7VhYBYtES5ACGpMVJIJKeZReZ5XmMF+xx3SRwqFYLKIpSJKMfqeH5xTIogDLNgiGEe1gSJoqqvXKywkzKcRxSrFQJEkSipUipbKDzDLa202C0KdgGniOhUQjSVOSJEVpud+lrusUi0UMU6PXTFE6ZKnEskyiqIeWKRxHo+yWuHj5MrZbQAlwPAu/M8Q0LIJgiMCl6HogFSMjI/SHgxshFZZlEYYhwhDomk0mE2y7lP/cRY35K3NMzRxi99E2ie7QXIywCyU2ljJe+8abuPTSFa69tIHjeQwGgqJjEYcphXLI6kKGW05wHYsojum0+xTLFWzD2NE7SHQbdANsSyOLdYIoRZFiaCaOXiJlwNaKyOuBJ8gcg+LYBPWphN5aDeI6AW0cewTH1QkCjUFvhVLJwRY6yPU8+SzKaPfaFIoOlZrOxkbMxYsLNCaHZL2IYGBiZxPI4ibTu+ocP3aEU6efZ2REJ1U6UZigiYQ0iQj9AE13sfCIw4xBp02ilWjJkM6wm08n07xxEZrE0kD6A0wTpicPMHv0dUjbpCd0LOMIZQTDJEHqCZpeQJHg2IAykSLByAyEUUAzDYSh57GmmkIoCTLDlBq6MIC8+cr5kyKf1CoBVowuQaYpGQmeXiWKQ8qOxtL2Mo89/Rw37Xsls4c9FlbOoA0KlKp7CfopIzUPKWax9Dqb23OUC2WCRKc1aDI6NkuhtBevHGLZFSZrNqamk6QGWhzjGQZaYYhyAxplh8FWC8M1idMu/P+DQxmFPnbJRWqKgmahHBspoNtu0+13scnoGRa2UcFMIgb+OoGQNBoulkiJ2eQ170i4dFZhGjZCCbJMEIWSQlGjUHQIggDd0NGEyL0gd2IPNU0jViEZJoISIlUYmoZuWbgWnDu5xu23NFjsbrAQv8TusVG8UZdLFzKee/JZig2b0I/Rjbwg5lYXgijK8mxqkRdLoeXv5YVQx3EMdE0j8GOMzKavUr7zfd9P4EvKFRtPFImDmPJUFafhsvvIbfSbK8heh7WFAYMk96FKBBgCXvXqV6Ck4pd/9ZeJw7ywPvHEE3z+wc9RLFf5xZ//b3z9icdQUtDqdPmDP/oIYRhz6epVHMtAAakwCaMOspXwPR/8Hh544AH+/nMPsrU1wPEMMiXYvWuGkZEGaRywe3yEDEEsFfWyjgReef8rOXvuHHv3THH1ShslBcKANMtQMlc4ZjLcOVcWpmGTECL0OsNsHcOsY0qHJNnmbd/2zRy9/wHK19Y4dWmR3bP7+f995zRDHLyf0BEIfuXX/4i9sw327qvys//u3/FLP/J9PPbYV/m5X/49Hv6zT/I/fuMneeyZRYxZhR2XuWn6BBXvPI4bcWFxmeZmyog7jZal0JYc2L2fcqPK/LV54khj355dLCwvoIU2K8sxr7n71dx24BB6fYa7X3GUYDui0+zQXllkqjbGxWiTwO+ye1eZQ5Mxr76jSrWyB9e7hW4GUaeLJgWmcFlfHfKD3/eT7Nt7kLVwkdmRXdhOyMLcVcpextbGQzSXqxSKJr3egOOHD/HRj36Uqekyn/zrL3P2zFWieEhBl8SpjgwjLl56hLl5i6WlNcquy4G9u4iSRX7xP7yNj/7mZ3ho8zTTYw08YsyGwxte9W7ufvPrWE1sHvwvv0pr4SQLsoVrONQ9j1DLGCxblCZ0gsgnGfQw6qM8eX6L5+IubrGB3aggfI0XFrqkicQplzD1iEo7o1JWpEFEwa6CSjENRTwccPvNB/jaM4+x2bKoTb6S5V6HolvGD4coMwM9wrFNROqgLEG9OkkQaOi2D4SEQwfXGcVwNghFm9HCOFUVspr18JxxDAZUy4rxA6PobgddFDGUiYwG6NmQeNClWp3ALRaQrZCN7SVmxm3Wr61TH6+RphElexSpNdFbe2n5mxiuQAsTap7CshRzKwNu2ruXXdP7aG6cZ+nMAoeO7+eO4/vIZJlnzp4jIWQQZ6ytXOJHvvdf8fVHv8HCxiLnz1yhONqg4Lr4S1c5snc/X3r80Rs18f4Tx2kDSVajdWmR3mxu7VWvWPibOhdbDZ5pnWfU0rl67gnuOnI769uKfXuPsrgVMrJ7jMl9JTb66yyuFYhkm8zKsMwSURCQiIyiZ5OEAcq12VqY48M/8wscffURigcmOBM3aS712TexD1GWfPGLf89P/+vv4ehNP04QjXPmxTNkbNGPAropXJh7CTOJWHxhi2EWoZTiwU9+gqM338lvf+Qv+L5vfxNBqm5sh65PHaXUECKfpjiOg2maRFG04wmZnwtdv74Wlzesea4DU9d1GPYHgIaWJRjKxB8q9LjEnXee4IVnz/L0o98g7Of2bCqVZFmKbdgkMiOWMYYtSLUEhUG1UaLd7DA5MoWQNrrYUZEjccsFwnBIEA6wTYtdM5MMBwFSV2QyZtCDOJZ4bpEklGCBbRlESGxHo9XqUK4W6PY6uJ5JtVBgOLQYdAekaUIwDDBtFwnomoVje3g2BEHA9laHSqGIChX790/R7Q3J9IzJiSqrVzaZ2TvNansbzbByuyDA9lwOjY+xtbGFkpJjh4+QxgmDTpd+d0jgD9A1QSrz9DjLNoGEOIswNUGamshM5L7GmWBh7ipH9t/BC4+/wPTRKl1fkQawdq3L3LktDh3dw9ln56lXa5hmRKyg2405etMIrU4LmRXQbQnGEM0w8cMBSqS4noM/7FMoeqSBhWlCpnyUtLAtj05ngGWXqLowDLcw0BHBkOVLQ7pdie83KRfHmd47wfnTfQqlDNtNMW2NLPOwbYfET/MJnXTwPAOl9Wg1TXRnSHeli+45HDgCnc0hay8tc9P+W0mibV48+RRxKmi2DLIsQEkNIU1UnNLeXKXbWkMzbFrFKlcMh3pjF0m3izIkQpeIOEMXglTEJD54BYthmmEKn6WNOS6cXaFUE8hQYAoPVy8w7C2wubhIxW3kDZVjISxJkuiYrkeqaUQZGI6N67pYxo5Xq2ViaCa6buxwjq/fazooA2kYmFqKpkw0QyfTJGkWoJKQyalxjhy0eOncabyRUbabCwTtSZCCyZkScd9EalVsq0ize4lCYZRKucGgu0qst/HK45hU8aMIx7ZxbEGlXkTEdfw0Tw9UIp+kOmYZgyJZKWCjrf3zA0pNmYg4RiejWCvkCjXNwHBsLEDpBYSRkpIglYGFwFUpWRDlvmMDi+JozMHjJnPnoFSP0cIKKuuSSgdkkDNTlYlSOrZl593YDk/H1hziOEZoAUmg4wQmzc0VDu49iqWKbC0bHDpwDyIz2draxu8lLC4ug9KJVJ8s03LrHQG6KfLP1hIMU5EkkIiEgukgkjzfGlfS7qW5/9Owzy133Y0+sR+5LJidjBnZXWPQiVg4v8Jb3nGc587MoRKf+tRu4pn9BFafdsskd4MUpKlkz+xunj/5PNvbbQxNI04i7rj9Dt7+9reTpCmjo2NEYUKSJFw6d5ler5ebCGeSyDTQyM3hB/0hlmnwB3/wh3zkz/6UX/nlX+VLD3+BVrtDvd7gez743fzmh36XNNNAy01awzhlq98FpTh65DAf/sM/JYkAIVDIvNhrBnEaYVsKscNnNXRJKjMkEmSMkjVcCRExtlvikZPP8vX5GFSJyrjJ5b7kYMkDIfibr10mDC1WNwcs+QN+7g++xPnn5qlrAd/xwbfxb3/6vzFa0Jg+PEmneJR+d5tBd4uy/jZGzD2srD+NpTymd62ycnmJhqgw5eyiE2wydXWIk5lolkMwiDCGVUKng6YXqFZGGA0FlS0b/wWTxvFJdk02uKVW4O777+DLn/s0XrlEpOl8+4/+F9Zf/BytQYoeQCYBUQIBfidBiBjpn+SF0y9SnRjj7PYV4jjGtnRKs1MUtF0k6z2urq+iHMmFufP8p//yK/zMf/y3PPzoGV7z1vswlElsWAT+Godv3sfoyAzdzUWkCKkV64zXoDP/JBcfn+OHf/TdfPWpL1MrlXjPD72TP/ydz3No90HmVyWf+bMPce4bn2Z6vIqTNBj2e7QGCYaT4BUtwiBmEITY2giYRVbWh6SdiCi9ijfiUK+N8cL5NSJDR6Sb7JneTZb4bIcDSj6klQ6+MqgECtO+wP6ju2jUZrm6voIWWYxVSrQjgWe5aCJgmHgYSR1d89FERmEI/bCNI8r0ggDLLmD6iiCUjFZniZOAza0QM02peXvp1s4hkhLNwSZeqrPV2WJiJAcySrPJlEs0WGO6eiflwrPsrhyjubJEfXyU2AnQ+hbZik3Pk/SNq+iaxrg2QqlRZHFOMuuZrK62sGsjLK15HJ64mc9+9XOo+YvsP74H5aR0+21cu0Amlrn55m+B4gH62uM8e+oiCMGRukdt4hDnNq/w7PkLvP2Vr7tRE1c21ri4cIWRepmV7Tm2O4tMFEboBQOcakZjdA9mu8naSo+J6UOs9jfQ9JB3HfkuOpubGGmRolsH3eHKS9cIh4Lbbn0Fc2fO0g/AsxQqyGhaGuOxhjPqMnfxy6zPneLI6+/FEwU66RoXrz7P7OweDs1onF/bpL9tcu+9Eyxc/F2cEZcktVm6tsjYeAPT71E1JcurLcIg4QPf/l4cc4ozZ87ztg98F3/6m39EzSuSqgCpZyhytbYSBoapMI1cUJlbBeWCRsPQb5wTXc9Nt68LdqQ0MXa8OzVNoWkOmm4RxSmzuw/h9zKuXL5INMjXvkkSgGkiI2h2ImSW0tjj4iZ1enqXerFIlLikokWru41VcjAkpKGFNxbhaQLPKrG0soGUA2qVIkLGREGGbTcoFhOipIffa6FrZSBCF2U0WcLUY+rTVZIo97BcW1yAsdzwfIBGHGSAhow0omiI5bj0sw6lahEVGySJZBglhEoQxYrD0wf5xnPPYLmCe287TuD3WNtcxyuP5M4bQ4mjKhw/fBMPLz2EWSly+oWXaDQsCHUyS0OaJlnfZ3rfKJvbPVIEIk0pa2WiuEcqMuySxFAjDIItJvbXscaH7L7FRaaCLO6jC5NBOE+S2Ww1+4iCRi/zGTHKlG1FJ05otxRZWqafbDEzvYfRGZ3mZkJpskC/2yOJQUkDXdr4vk+1ahL4HlLLSFLw3BKaplGbUqRbCstLkH5M2O8h0nJuEegGiLjK9J4hvY5Fpegi9Zh+EGKXIvS0TMKQJIvIVAMpQuqTRTJpomcuvXaPtSsphl6iMuEy7EWkieSBt9zNN557npI3Qqbr6FISZgkylhimQxxk6BUdP2sipMB2qohBjAa5oElkJGGMsPJnYJoNMVIoVaY5MDbBgy9+lJGJMWSasdXucsft93BwyoVuSrHUQYUQyhAV51ioYtc4+eJpgiACLc+1l1KSa0EydM0gzlJUVge9h0wVyARL1HM7J+mQiTg3XBAJSA+sPu/5njtpRX3qtR4bixFuwWF6r0FGREFPmJ/3CYwVinaZKHIZ9AK6my26gUMit9ncnsP0G8zun+XFZ54jTm3i+DxHan32VXyaTRetHxOvWOhTEIoe3kaBuln75weUIvcY59AtN7O8vYU7PUq63SVsNSlZDmahxCDpYZbKCKFhCMGgvUnRskiSBMsdkMUOB28VXDsXYTkaSRCQRTZxJCmXHLI0QcoUdlSDSqkbpFWZpBiaBULgByFj2jhHbr0F3x/wivvuYHv1GqeffhE/7SEsg1p1D8urTUxissBAqpgkETd4mtdXNtc/v6A7yDRD6SamSFFNgeVmBLpJ0Wtw4I5bGKQeL50/R2gWWL28TdQfstVucXV9wPmVFBm6TM0oahUbwxAUSyUqpQJpIlleyZi/NscrX30fL774PEmsiCLJu9/9bv7zz/4s66sb/M0n/4qVxWtEkU8cDsniIbbj4g/79Fohum5Qq1coFjxcz6E/6CNSQRRHyExScD1+5j/+R37jN3+Ts2cvUGvUEJqNVBJT10Gl7N29i8XFBcJhF9/PQBbQRZ6ZZhhmnuebpjdUlcIQeI5LkIa5hlxBlAgwDITIDbB7rSXKXojWGed1d/war3vHXbz9X3wf5y5vUA4HoLfZXrrC4vxZ3nTL3bz1ne9jUFTsmjzAuXPrvO+Nv0a0fo4f+/fv4trzF3jsyRcwyhmj09NkQqASgR2n3PGKeylt9Lj4Qguz1MCQbYRXRNeH7D2maA5n2dja5My5JzirCiyf/xi2OYoiRK9OsWt0mr/6299hpqrx3LlL/Jtv+V4cz+FdrzmM7dq5YlVIlNpRpGo2Sgg0w6FU9qhPTOI4Htvb23iODSYYjsXmehfRb/ELv/x7nLjvHs7On2V0wsA1+jz42U/TDof0ej32zt5MFmdc7kbc9Mb30v70x9mzz+JS3+PZMybd5gFuv3COsYbG+bOXeeDed+PYNzPAQKfPtWvnsaRFEuq0l5poZQ8jM0j6CYHeJyKhXHLJ/BWy4RH6cZ8xz2KfIVlJFUU0dCcj1DRIDDwrwS9Z6AsRWT0EvcbU7jFYa1PzRhn4iuGgzpjXz3PIY5epEpi1Cn7HJ4xKVIqKdhzh2hWkkJhLJSrjJfY2bsGKSiRJl/tOuNx67BX0F3ucjB6lsudOXjq1yZLcwKl4vP2u76QT+dTqm+iGxHY8pOhQLNkkw4h22GdyT5lHr3yDiBaNIfjKQCs1kMEFisEYtek6/c01IqNLt1zm1mN3UCnVmIl6TM3sRtdCMktyy7FR3vKK2/mvf/Tr3HXo1YztGuOlzSX2Ht7H6rUmjz33aywvLfIT3/0jvHD5s6xHbR596JPctn8/1xbmQey/URMLxSaDtMf0mM0uirR16GUK268y+8ABzn3jK7zixKtx7/l2/vjPP8Mt+6p4yqHfH7A6uEovXKafzHD2S5/kO977Ji4uJMwcO8Z73vIufvPDv5ivVHULb6uJMyLopIJCvQZhwJnPP0R3/xHKByawSZDL24zZBygmkqeuPI3rVNFLo9h6hhZts7p8gcFmjdmyxf33TNEOFWfml/nIhz+LNd7g5j2j3P+m+zhy8Bjn5i5SdHVEECMcnUzoGCg0le1QYF6um7oudtbd+YMzjhM0LRfS5A9RUOQxj1mWghaSZDHlmsGDn/9zgiDCdW1MQycOIRUORpqSiSL3vuoQTrbBxKFxLrx0jeblPr2iRVoYsmt0P73mBoZlECYGvphHDCcpjFZpbW2ghCCTgrX1DoYJtuGSpAGxH3LPPa/g0qWLdLrbJAloIiZKfPr9Ib12Ec8pYBgaoyPTVMounX4XoVtohkUUh0Rpj9179lIqjXLqzHk8y6G7vkrZMVCJhERipIrp2gg3HzhMoWwx3phkfXWR2YlRYt0jjNpERkIkB3zpa3+PkpD2JSWvRquzSa3QwC06BGEPxyswOXWETvcMdhTSD00SLyMVAh0FkUGqt/DKGVnfYuFsRL1W5fJCl835Ibcc24VbMSGao7ulmJkYodXepttsYegOkZ+SRm103ST2dWw1za5JjfNnHmdiYgQhA4b9AGKXxIpR2GxvRRiah+lKdEsRxz6apnHxbEqxMIGmF0nSEN1UKEOSqQhhF+jJVTJdx7A8oqzP9voARxtDN+I8lzt2ECJj2BuiNI/meoxmRCSBwhAOiW8xjGMcx2RrrU21WuPc2Zeoj5i4nsagGyBJqNbK9Ps+iZ5QLBpkKibuRVhunfbGkMFGE10zc9W8nmE6GqlI0YXKBytS4roFen4Ty5HURmpYlkWh6lAZbRBnEaQOIisQJinoFobIreyCIMC2bRyvQBzH+TR7R5RmGh5S9tAokIkAJQqoTAdspDnA08tkWR4kEPsJhjZCSkR9bIZOv4Xsuuw7PsWlU4sMmoJo2KJY9HjhpQ1o61j1AWa9RNHQCPpt5q+eo2A0mHu+Ra28iXJKFKyQ5dNnaTWHJJlg4sg0pdsOQLxCKmyIdLx+Ec8z0D2DZi/85weU290eugZLSytolk04GDIcDtEsk1hIkjTGLhRzMJhldPp90jBCq5ukIsNxbYJIEoQpuikI4hTLMciimCSz0TQTXZdEcZZz+a7bTAgQpiBRikwBmsB2HRYWlnj/B76f+17zGu687y4aukmQbBMm63zjuWf5qZ/8BbLERxcJriMhNm/YWlw3Tdc0dUORGKcxVrFEoWjR3Wyjo6H5EplkvPf7fpLIrmAQ823fvJfMTCACQ0X0hwPCQcSde2q0222mRicpeGUqRxRXn43p9poUCw5jY6PMr8zzBuf1/NIv/QoyTfnt3/kQJ08+zy/8/M8zPz9HGAbs3j2DY++MyS0HAfwfP/VT/PaHfgtN19F0jVKpyAe/+7sZHx/HskweeeQR2t0u7/+O72BsbJQf+lc/hK5r/NmffYyzF87xoz/8b/jQhz6Mbho88MD9fPnLXyEJh9imSyQhQ6DtqM/Y4RQplQuYroPLfhCTWRqGpSO0mDiNmRiZ5v67b+fKUsjU3nEe/ezD3DMZILdP8qGf/6/cd+8baakVrNBg3Jvh/e9/KxO3HMJPPQRFnjnV5bnPPsEhc5XRb9rDV756moWFBd7wjmOMjo4zv7iNURxjdX2IPrWPS2nGQcukpEAGPWQ5wy5W6QxTur5C13wOGBXGinUMu0qzkdAY9+hlgnR9nandFR598TH23/RmBqLA2L4JzKIAzUEGPkpIBPk1Yds2QugEUYZleUSpIg0SBpFP4MekQYJu6dhFRTwIcTyX3//rP6X5ic8wsucAf/k3zxFuDDh4WwFbd/nOV3jM7vdYXFvn7MICx43bicdmeOSR01iiydJWiJI9lu+8B7dS4urSMr/w33+W8v5XYkj46hceJNxYYrRYpt+XHLz9GNNTJ9go7+XIoUkYvZ24uUYh7EJP8dTCnzPZGOMDD7yBm2ZrfNtP/ScO3iI4OnILlszAFHzjwgbbiwF3HptiOOwTJQaDYoE0aWNmOs0rXTZSxczkbsrlOivbbWaqUzxxts3xKYdaOSCL+4zWxlm85DI5PaQ8C0a8n2e+MsfI7oPc/6rXc235szgzNf788w8TG1ssb13mcqRz9fwSSW+e+F0WM5UR/E7KzbdUGYQ9hnGIpjRKDYcvPfE4G8kam62QzU6LyZEqqa+R9pcx4gicHvFJweyoxa37TnBtrsXmxho/9P3vYCYuUqqUuGnPPiytzFMX1nnx0ja7pg4jzYznW22G0RrBixqBsYJmJNTKVYzRUZzFSbSVLkeP7SeVITfdOcN6/+yNmlguFNFSi7QP+yYO8tTZDYLMp1qtEMUFDMfj+efOceSOo4yrPnXLQLc9FpaeYOnsVZqbW4wwpK9fIwwvEA03oeOx65V1xqoNooJNaxBwc6PIuYUurpsSD3poZgmj6LG8eJnw8nnue+AEYtLgxK2v4sDMTTzylS/yhS/+Bo1dMzTjMpWCzVtfdQfv/84f5cyLX6BsDPjMuc/w7d/6/bxq9zTrF0/xyWeewdBfjdUQcCHDdkfw6aGnEkfPiJIYTc9jW6+vs6+Lc/I/GkLooLJcwaq0naYsj93VdY0oyrB0lyw20KwERIjnuejKIo1zMZA0JIYFRubwirtewytet8anPv0873zzCTbv2OLrT86x0tJIy5J+TwM7oTpapliZIuhZdLabyB0PQs+uYgmL5aVF4jjGtAQGFs8+9SylYgPiIq6r0dvwicMM23bRDMkgaJNmMWNTDYZ+huMauJFFOBywb3wPw16fu4/cxp49s6huh9ldezkndZbmNyg0BJZu4Jk2frfP9Og4ftSl22zhWDad9gCrrmMXiuhaimbrFK0qveUmWt/kyH0TrGxaOMpiZqxKE9jeHKKnKdEwI+zYuHWfgR8iRBHLVURJAEluRTbcXEXs30+toRgNBQcPHWPu9DqZW+W2V8+wvZzhODZeUCDwUzKZ4JUM/CjGclwca4RuPOTzn1+jteXilTzcQgW0Dr3tmMFwR0ykSyQpum6hawZJHGMaGYZRotXqMFH0UFKQJSaGJ1AamNYIBafMhTPnGR3JBSrlqsDGx2+bCCwQAzQ9j/VN4wglFaChGymlqk7YU/lEMQ7o9jbYt++VkJhE3TqmMrFKMGjF9JYTHENDK2YEfg/wkEmRULToXNmgaBYpeBbDVKJrDnEc5rzlLMYwiqRJQLHoYZkOhUIdhEGaGhiWTmWkSm/+EhqSLI6QIiJMYsqWgRImg8AnTVN0oZFKSZqmN2gkfhhhahLdSFFKoqSOoQr0oi6p0tCljtRTbDtP85PJAEyT9dU2g15CNFC8+MIcVU2nEmsMtyqM330P9zxQpJjFdIKr+FFMuXgYIQocvLmF4wlsbQbTdLE1A2lKDt55HMP2qNZdTn3uyyxsBQhDEGchnlPHjA2icIjV0BmGzX9+QFkaG8FMJb2tHgYCvz3A1ASGZ4PKQZkpNGzDYTDoU7QchGlSLVQwkGQiIUokU7sKHDwx5OJpqJc1olhHaYo0UwhdQ9My4LpZrobKcl9IPU0RWobScqsB29b59d/+Hxy8627+4OOfZ62nU1Ap8UafF772FTaubOMUM1LlMJQRjmXtEMgVQuVrdF3XkDJD0wSWkHiGQdjrIXQd4Wq0tgLe+O5vxdqzi/lrGwzaWxTCEigTKzNwbRtMB0FMisnMoRFkYjMIDXq9AMM0cBwbUzeIUg0yjd//k49hGzqGppBK8tnPfZ7Pfe6L5HJyheMW+MIXv4gQGo1GAwT8xV9+gpGxCRR5alCSpHzkzz7K0tIiu2YmydKUgufw6c98mk9+8m9QSqNcKbK93SVTkt/6rd9FIcmCiD/8gz/AdlwaYxO0mr3cEknPAbymaTdUnNenC9eTL2pVG3/YRyiBZlpYmkFzc5vf/h//A8sZR1QLLJ9Z4nt+6I3M7BqnNT/k5rvHKE7fxKc+/hdM7xnj0OtfzbmrPZ4/02Rx8UW++ud/yUi9jHlkL70sopf20UfGeO5Cnz3xLq4saqByBV9qxWR9h0tmheI3VRhtDWksXmGQSepYBP6Qsi9xbZPt8xtk5jWmlIOuF7jjrntJow733/9mTp0f8qlP/xljsy5LDz/Pgb3j/Mh3vJmu0FEqn6To+vXED50kSYjCGFW0yDQIoxjDsVFpBoaFVALTMUEWWO2u0RwsEXYXOPRAFavcpGzsp2F4/NhvfAvW4DTEM6xsrOJlF7j4wia79zgcani88hXQbTUYm+yxNehyy7FZ3nb3Ba5dvchkvc5t7xqy3R/jmZcqLIQGe/ftZqZaIJicZWn7IF//08eIg0voXMN1YS1e4VCxwUarw2rzCtOHjvHMyTaPzp/lh995iL/9ykmueQ3efv8UY+UyVetOCmbM504+z55DExw5UKBoONxk7uP0ma/xzNMljs/OsksL+cL8KZxddzPpztCYOcDicJM//qsP8dyXH+SRR5/m9z/9GV511034WyvcdWIPzz/tcvLpa6yfPcWqmmdt2KNilem1IwyzxYsvfpJL3ghj9iEOH95LpVxjY7uHbdoITbLWbrK6tYhjeuzGYf3sPEPDyVeurYwT91SoHR0FLeBLXzpF1YlY3HqSXfUK+3btpaIHYKa0B+sUrArf9a3fyu5nJ/ilX/0FRm++lYgynfYawu6xuS4Zm6nyp3/735GDhMZokdBsMnexiZ7qzFacGzVRGQOSoc9NM6/CLseMLF1iqaszzPpkSw63TN3FIytXeeTURbLqgGw4QzLY5tn5ee68+Q4eO7/AeqtFyduA7YSlVYHInmHuua8idPAmLEwFR6cnKKtNTm3YmI6JFsckVget1KBiZzz3/IucPFfkoc/+GE0/o+m3EYU6e8wxzjTPg/TI9ON8+lNP0xl3cQY9zKRGVdmc2lIcfN338ZOveTu/+uFfprnZZ7RcIo66YCoQametbSFTSaZeFuj8Qzuh6+4cOQf95aQzIciNo8nrbJpFudAxVuh6AZUJoixEZiGWnW9IUAYTh3bR8tucfT6gqAxedc9hNrMqmWnzuc/16aeXSR2DirOXqBcgMh0hBUUno247ZLFiemqGhZV1yuUykR+AhNTICId9omaEaRTwhwG2ZqMbEk2mJKHENCzqtRplr8TiygK2a3HTsXy78IM//p/JEJx5aZ77v+l1vOP7fphDu2f54He8m2vzV1GiSpylaIZJqiShH2K6Jpq06Pa6SN2l5w+p2Q3e8tY3s7C+zOjoCA/cegevuu2NnPzGQ/zwf/ppioUanWGbPVOTWKHErXnEqs9d79xNvTJBwdN58mtXWDyv2HdsF1Iq5i/Pcdv99/LeH7ydB//uU2AU6AdDXM9j31SJJJMMq9vMXVmC1EYocAs2GRkjoxUwJHJ9QHP5KmE0wJY2STvCEzGb1waYuoPuDDCMjFqlTDAUCAySWCGUS5bGCDNC06DgFNAR6AZo0sA2wDIjRNYm6q6xsD7AqzfAtKgUMjzPJFUhWZZg6rkfqgAKroYfhXiFEjJTKGLQhjj2KEKMEQ1jbDOPcx7zKmgU0Es9AqeHngrCzMazCuieIKpCOrC4++itXDl3gSAKQYcsixFaAkphGTmw1cUQ24Xh0CcMY9Az4nRIr5NhYJMGEQBRmiH0FCEFSZZbIKo0txbLkhihBLqYQGKAMNANiS4EYaJAT0FahJGGwkGlXh4JLSVBbJFGJRwnJUssoiCgUTyIZIDWdZG2gW84+AFsfG0RIfN0Ma+moWET9HroBFgWBH6KYayimxpCmEiR5hjI0NDNFIY6dauOMAsMgzpRUxBXUky3BO0VXnnfG//5ASVplvsRSgjjiFKlhIZg4PcRUmGYCl1q+L5Pu9uh2qgjMo3m5ga3HJvF8ST+AJprETP7YPFKgSyz0DRFmMQEgcytE250vXLHViEvWKFQuS9XlpEFMZ5uEncH/IcPfg+90CTsboGI80SVskahUMinoVpKwXGJ0x11YZzdWM8ALxv2Ghqd9iamXUbXAgathN3HDkFplL/7608wObEPt6bR7bUwpU5XDjEMhyRJ0PUhiDqdbkSmTGyzQm/LAjVGlqW54lGlhAG54MgyKHguQhN5B6bSXOEFiOuO5qT5X3lGU64+R+S53Zmk2dzE82xMywGRYmgCXTdQStLt9lDo6AYMOj00TaAJhaFrCAVJJBmqCF3kPpyGqZNmEk0TIPKppG25JOR8ziRJcG0LyzRRmcRxy2yuLfLBD36AH/l372Fxbp54s83ZW2eIohLe1FHe/vbb8DL4pT/5GqubIc3BMt/xPb9KbxuSdJMg3cCr+IRim9ZFAyUkmuYgUxdNi3n+mafxvCIJPnFaoFS2kSpiu73N4qrLhFVhv1vk6c0lvv2d38Jma5Ev/tnfc3zfftbKEftmb+cDP/BOmhstPvX5pwjMGi/+zy/Q7LUYr5fpbWZUzFFEOKDqlNhqdbFt+8b0RdcFhm0QNQMUKXG8o1RFkWYJpsinMEqJ3AzfD9g7exv7rIjzp1+iE52gZI0wNt1grb/CR3+/zUzNwNK/Tmt1QLlcoNsJ2DNbQqYp+w5NMn92kUF7nGHHYrxk0ktMhObzefEmpsbvwp/0qR82qLS2WEwbRHg8+7Ur+Nc+zKHSAguBz9TeXehWH9UucOWls7zppttYunyO8lSVrYtLvOOtb+BN9/kMOxa/d1byxa83Ee1LRN5zGFGFI285zrnz65x6bpv3v3WCJ09+jkTzuP3uSaaPvg3bhc3BWab3v4lGUScxBPunppkoF+kPihw6cguvvecMX33sGm//pleT9jvMb61x8cwjNGmSppJj47PIyCfzUnqRYn1dEYZLyJmEWum7KLsVXNMCMgy9yMrGHKZdwvIsClGZu/fewYZ3hnY74NLSKu1ezNXFy5jVIvhdilMlpsIZllc20Owio2NHKJg1JMvUxkpcas7x15//HCOTJbbmlinW6ii2sKISbklhNKokHZ8gDnDiAv7cOpsLA0qehvYPcm0n9k4we2gPK90l4n7G6N5prp5+iYZT4EBtlF6vxeDReThQYlWZ3GHH3LnXxJ7ex2PnzlE1Il46dZLvvt9juPhF5lc32L13D1YywC0IKiXJ2qZAScl3vr2C/sSQZ05nlCfKiK4gbA/RPB3bKDHspvyLd/8YUxMu+49O8+jTFwhHapQdk/7SOp/81G+RbXeZveMeJg81KE7vZ9Pa4vzJb/CJhx/kzqPH+Zkf+nF+79d/iy9dXaRWKuIkMZElCFWKlSk0KRDX/fTkdU7ky8Ayr6k74gPNAKWQKrnxfq7uTtGMgCzWSGIbpRIkIaZVIIlNwm5AacTCO7KPVpDx+gnFZGmGlfWAytQIpUpIN7pCw6phhT32jrisbcZYBYHfibjn7hOoKCXyJeXRCda7PRw9Y3R8hM2lLRAJlfI4wXCAbuT8NElKrA1ykY3l0u102D07gwA0peN3QwbbPcKez8/96x/FsEzi1OMvfvN3c9sVAd3BHE7BIU3kzvspmqFDDL3ugKJbRgkd2xFkSiPodjn55BlSYbC1GrL0UsDXv7pIlF7i+Kv2ErVTWvND0mSIgcDRRnnLG99E4cAWiYqpNgoc8Uu0NraoWbtYXL8Ghs3C3BX+8wdPsueO3YxOCjK/S2l6hKXthDSLGTQLREGbSlGgaXGe357atFZzpbpTMTDTMnvHbiaMeqCK7DlsEIsrbK01OXSoxubqkO3tJqZWx3JA6SmalQ9MvIJBFGxj2YKh3yfLAoJAYegGIg0h9ch8cNwUxw6JY4twqJElkum9exk2r5ImAY5TptsVpJnEdcsYukmvFWE7ErIKaZIPQNxiCdvOqNRyzq6mUkKp4zaqWCogHmYMNgeYA7ArDrNH7ubWY0c4f+oMuqWjslyEYvwD2oYkJo4UUlpohobtOkShxLQMDFMgA5+gG5DJCNsoE4VgW0XSNETPJGmc4Bg6SQrN/luRauQfQSmh5ak4pp2LkoUSSCVRIcTqemKOQNNM/GGKyjI0NNZWFSqr5rDNVzkeuA4Xdo5Ok//lNfjfv/YP31vO88ny7zuwuCGxrQFFu01ivPh/hwr/l+OfrvKOQqI4xSl4FEpFkmGAEoJE0zCR+LpCaYosTbFtk+Gwj2nalEpFMi0DpWOagqVrGXfco3PzPRHPfjnGtEEKiziK0YSeT+rUTiQRL/ugFS2XNFaYwsRyBEkicUoVtrvrCEejMAYFy0OlebechALTsNHMlDiNdgrgy2rFG/xMLRe6xEbuw5ilMQIoVws4uDz4kb9gbHqUZ598npsfuIXx2SmGQYBu+wS+Q9kqkwURytwmDlOsgk0UbgMuyAqZ0tCMFL/bQ2ZBfhMUiviBT8lz85zanRX/9UOhbgBLccNEGECiJDi2wdTkaD76T3ziOCFNczsky7KoVSpoQjFaKzNad9BE3u2lWYYiP8UaEj+ArcxE0/Sco6pbpJlBImMyJTFtizhNQGakWYxlFxCZIgiG1CpVHv3ak3z7e+4l7m5jj9U5PH6Mh59v8vRHH2Rtq8+gp3joUw9y9GYDzRWsffZ/EpDR2DvDRHWagXQxjIxKmBFrOloUYBAjU0Vs+mRpDz2xqeh94nnYDLfxTJNCocKVbYk9MUZZhJhkvOWtd/HqV43y2KMxb7/rXvzU5Nve+TaeP7nJ3zw6T3+zjZlpmFHM3LmLZJZOFhrs3XcUXwvJsowgeNmgWUqJSR6DlakUS3Ox0Xc88jKEVICFZoCtGwxFyMSY4rb993L/wVvopDHPP75GxahjNXaRWjanenu5+6a3UTAvUNoVkz7+OMoUDFqbLF8WrGcGk+MaDc+lk4LqtpCDvTx0+Ra0YspeU0Mvmrxr4VnczOR/dkps6RWmpu7m2mCGqRMuxYrG3CWfsPg8sT7G0dtexbGbj/Hnp08zdkDRj1x+8RMnqY3fzv7xHp2NDfRazHorJS0LRJRSMTSWW2sce8/7WZRlbj9yhHd977s5++xZDuy/G8d0cMujmKYGMuJ3f+tjxOMl5q6dh4JNd6DzpUf+hmunnuGpjzzM6uQ8c+ESKo5oTFd58fRVPK2G6+iMOjW2t7sk9BiZOML45DRx3EMjTzvJshhLs7m7uptPX3iGaqPJlfl5fFzqUxbjs0UqmcmMu4ezG/O89s4jXNpaYWJmP5nZ4tkrTRIhOHb4Nr7y5CM89NxDfOTv/wADj/pIgc3L86S0qU3s4fKleUZKNtunL9MdJDQaJklvi27YxlYxjUqRrX7rxn3aOmfhjRpMT0ySdtYIbcGe0RrdzoDF/ha9uMj3f5vDly4usHZRUt8bMzaWcWX+PCvLNcb2mOypGmyLUSb2mVQmfLSoRy/qoUmXCbvEqtpG6w9ouwXe+gBsnbO4sBBTrGrU6jbtXh+rrGObPlv9y6w2farrY2hSsGvfKMFFn1RrI7oBCIPm1XmaC2tUju3iu977rQTtDBWd5tlnPsPoeIEHv/o1vud7v5uPf/RjVMoVlIwwdLHj+GCiiZdB5PVmPPdSvK4Elf9ocpkrWK+rvjWyVNvJMAZNT8gyBapCt92nVI954C0HaAY+S+shR/aX+aZ3vJMXr36DNPDYO+nx4ONL1EeP4EifmdIUd03PcDrtIQqT7Dnm0WnGKFNnte9TO1Dj/vtexR/+0e/y2je+if1HbmL76jJPPPYk5bKHJQxSJYhSsMwCSSaIsgDdlBgmZCqfqgoBtmFSrFZQZohKYlwRoMUxaeJjuCUuS5PWMEPLFCrOt0iDvk8UhyAkKlP0+31kFlI2S6TDTUSviSEkUvaZ24hx7t2FcdBh3Cyz+mKL0UoNz9AYP1RlYq+JVR7nK8+dZXJqhEuXLtDaDHnX976F0fI489e2uPDikPPnO3iihi1rROsDgtCGQo/1Cwleo0R/s0XFq5AFCYZRJo0zMhljJKCUznAokGmb4XJKZmyjaUUmJ/fgN1uUbI/QN5CZRbFcQBMmg4FPrV6kue0DCQrJ1K4idjGFSMOrVzFMj6mRWaLERxh1JvcdwDFTWp2AQsFE10Jk4jFSHGfdnCcMFUpAnPXxPJcgSBAqRco+vm+iUcCrWaQyYHzao709RKNE5Ke4XgmhSQxNEA9T+kPBq1/9Oo4eOszrvumdzDU7nHv8YbIsw3McBoMIXelkmSBNFK4jkCpEszy80giDZB2rPMQq1rHMiPWNDpoucYwCbbmJjHx0YZImQzQzRWU6WZKCVChGUGrkH1FEBIJMSkxH7Phc7zRkSuVuKyJ3nhGajuWY+H6ygwbVDlzTd7DAzj22c9ddxwv/d6DxH74u/tFX6saXOfTK72cpNZTSCOMa3V7h/wka/qPj/wOgDHCUgYoSEhUz7PZwKiV2HdzHsD/AHSlixtBb3iZMYqqVGsMoxil4hDLEGoKpKY7driH0jP3HLC6/kNHfckjTGMcxEEJHyZTrZq9ZJlEqL0axijFNFynTPLPaNghkhO0YSC1GC1wCP8Y0847TsBMypSDJZfpoMTLLf6lJknfNigwhFKapIxOFaXpoRkSUOuw+cpQrFzZ4z7d+M3ajwsqV85w58xK7DuxD2FUyNaRYLjEMhhSKE1hIhkmE0mN06RJlpZyMa2iEQUTBfgUn7nVpbvlcvLrKvfc/AFHAsYOjFKo1XNPBthxcu4ime1hemSRL8pvdkAjlAD66KCNEikoEQsswDYVhOGQqQDcK1Mc8trYGhNEAgJJXIU1jzMwhVQH9YQfLAttMWFnS+NjH26ysrt8AtJnKxVCDwYBKpYJlWbnRsdTzGycdIHQLxzTYbDW5cvkiBR02rl2B6TewtTbPE198jMtnr7HZ3uTOfWOMVWssPxNy19tPoKc+X330eSrVIrY08BMdWyXYmkWm24RiHUOrIYIaptGnVCqxsd7DjDZ55d5Rnj8/R3GmwnitQSBTqlWTr3z1EZ46W2W0MsndJx5geXOT7fYUv/ynX+dv/+iv8NNN3GJMc7NHwU655VUHuHrlLJurGbafkvg90CW6MEjTLM8bNm3iKCUMc7CpWRaRJpBmbkqbhBJdyzkyIRmWZbG51GTZm6PZCSnXXIp2RKlQRfYGaCKmVjQ5deEySaZz9/5XUmxcJU49rGqNrFJmOPcFNNNgEPcYt6okQZfnGv+a2kKE9FvsOuxzdP9eLk8c4/wjj2P1NkiDMu1ijVDXWHxkg7IZEyQxyUgXbTDC5bNDXvH6Q5z6w88z8LZZ3tJInNczVjoO9kl2HRVUpvdx+9ZZnj3zJPPLfX7otttYLI7wA9/20zhTBtcWzmHJiJmDN/Oxz3yCSytXeP6FRxgZmWXXyF5Gxhp0X+gzNj1Ko1bjIxsajcJBJqZmUfsrrJxOGGZFkCWGmwnj5b2srvisd8DRDSq7Rrlpn82p530eevgFmtsGw26drt+jUrbpbrr85dwqUo0x6nqEwwB/YBJub1Er7qfcOIov1gikzdb2MSarNxG427Q3ezz91AWunmrSbV1jO+tSbUyhfIlpV8nSMUbqRUIVM2xaiLhAc8vFc2qIZECnJVFRCSNrYNrbpMkYjmbfqIn1+igb/W+wudxFDJpM7Jsg6mYUzXHOb69Qrk1xyRlhmIS47pBe3KSU9nG7bRpGHVHax7PnTrIpBY2llGFXcOToBK2NHhVnAhnHxAPYc0KRxT6+b/BDP2LwxGOrXLzmsCSLFPZ4rF9bp+BZWMLAKdcZBCm9OOILj6zjeDbH33o377j1fsykw3a/yWB7ns8//AI/+9sf5r5X3M7Y/tuwx9Z5+Ktn+Js3XOY97/se/vzjH0NaAVZqEIWCFBCmhpISTRf8Y/7kP+RUZvnTaSff23EspExzo/MsQ4kEMCgWC3S7PaJAI00Vt959iPEZRaAbVGsunWGLoDXGz/3i4/TRuHjyQQ7MHOKRxYvsGr+T3SNlXn/HNF/7u69y0+HXMJQreFLDGy3z0POPcbW1wbX2FpPVcWbHdzM1OoVZKRP1Iv7lD38/X/vyw8xdnqNSKUEao+GgkhBluBi6jmWVGERDNMuj6JSIE0XJK+DaYxhlhySJUSTIuEupUSXa2qKfBNRMAylj0AS1kQbtdhvLFgipMTk5TScJKHs1xvfP4NlVHNMg6LQwjhTYfV+FLz72PNtbIccPzOIes7HdlDjt8vUrX0RJj9HxBs31TcZKDquXMl64dBLbgeEyLFzuc++9hxkMVzj71AU02UWrTKIXJHLQpeP7GFlMlqa4Zoko0dD03KDetlxkZqLUAN02IAuQFBGGyaDl45nThNGAxasJtWqFVmuDchUqtQbdfotMGlSqFQbdNnFkIVzYNVnn7te8FtOqYCJZ71wGLeTo3dP4TYc0XSGM26SZSZaFeHaBJFZY+ghBqFEZLVAfcTj93DrOeA3L1hj2wPZC3HKJXs+jXPe4cnmBod9kYnwMQ4nczH5QRE928VP/+T/wqtfczhNPPcknHnqRsXGbixeuYOgmKA3LsAmDAYZpYBkaSSrQbZ1yo8y1laswWGfPsRIJHQxN586RBxiZrnD+8RaeaSF1E5mFuCWBij10zdiJdhbI9H+121FKoRsahglplHu3SqV2sMLOBDuTxJGCMAUJGjpK7CQkqfxJveMilWPBfPaGuo4NNW5wT18GnzvJViLfeLAjxP3Hx84HCchphxKlCdp9+U+Fif8fbIPiGCUzhsOAOImplCscPHyYbb+P6TokQYjf8fGHQ3Rdp9qoYwwDkiCkaJYIgoyFJcFNt5lcPZOhGTHlqs2wleUXdQZpmpKkuUpQE+aN7hcEQkl0kZImMQgN29YJ4hAtNbHSGrHRxDGtHe5bDgx0XcMwAJEgyO0sAKIoevkU7igVDeGQyIhhElOrVjj9jWtkVoZZdSnYDtWpcfpffpIvf/rvyIwiVhoRK5PEATIL23QRtoGp6YyUSpiGi8wEMo1IghCndB/nXnIRCFS4xXNfNTEtgwunNTQtRWMIDIBtEDqG6WKaNtfPQA6yQbG183PnhsE5+f36/0Yis3yqJjQttwNSGyDUDneUfLWt62hCoQsdFGSZBJHzA00798eK0uSGOCUIgp11VZav1TFIU8XAD1nbWOPE4T3Yld0sXXiOwcLXuX2fwfL2BmNjGUEnJJEVjOmIo4dvp2R6PP/kCq2VFqllIaOEhJ3NPopMKmyvi+WmyNBgrblIv93hg+94DXtecwfh//w7Lp1+AW16kpnREqpmEagGTpTx4jOnOXzr7Tz56CO89OQio40GxXGTulNkOBBgWNzxiqMcPno7jmgwf/UhUlKkO4phhAglMYxcXKDrOkmmEHquuNMRqDjD0HWSNINUIjWJEBAMfDRdp91tsdZuM+xt4Wl1BkHI3NZVNBHQHbSQfRthKAzH4bd/9ffZ2F7glulRpFZCm/pmzn/xUebW2thmmV5X8PT5Mi+mY9z3ylEK2Dw73E3QUnzt78/gXxtw770z3Lp7L2k/II4TwlGDgqGTZpCN7yYY6zKIzuMZu5ixDZYGm3jOJM1uh+2zZ5HeNdZDmOo8wMHDk7zpviKfPvsFFge72c58DK4wMbmbc6srLEav4ZkHn+LyyudZCXp8/FMfJ9C2ublxgOrUNN32EsP+kP/0Mxd55tkqm5uvod+3Wd3exeJqEaVyBX2aGbTIbWdIJUGiyHyD1lVFmsLHPtIj9GcYDutIFJZp4PszJGmMoetcXdfJlEJleSSpHNo8uekySHaRidv5ylVBpgRpUkCqDEMcp5NU+NgfRgwGLqZropv7sTQdvw9STZMmIV3LJE7iPJFEy0GOVJIhoClQQjFcNzCtl0vmVS1FNwvYlb+gPjWJjBIUDh3ZYv3SgIMHE9yR/dRqNeRKn4JTJC4mqNE9dLeG9ObPMTfoo18uMTFrE2tbdKJZTGeCJG3haBPM2g4NxyAxd7O1dA3N0rj77jpve43Og48nfPXFTaaKBdqhACsijUFTCaOewcVLzyIEeKVJOpOHuevOe7l7j43qr/D+d76WZ08+xUbWZWLvLTTbo3xj8CLf829+mPvv2k9pZAzTD8g0MCwDUyjS5B+n5Nx4NuxMLHOjc3UjIMEwTBzHyQ27RW54bpLbn0VRhCYsbM/kJ37sX3H06HE+8YkHKZpFbp6aZvHiSWQlYXU15OLcHGu9OVb9FH1ijEHzAq14midMWIwiaLfQZYFINQmLQ0w3YXaqDIbg8PFj/MsP/itOn1xk/doWpd2voTY1yw/+xNv53d/4FRavLFN0HdIsQBO5wXmWJgRxhSAysC2bIAzo+QVUYiHjANNSKC1DSZ0wLLDVzlDRDLZoInHQjJRWt8TSpkEcl1GDGJWCMExK7jHSLGZ9yyPTJUJTJEGDqu1w4UyHIJygUa/Q8zPaiWDPnr2ce/45gjCnMNUKRdKeh1Wssmf3JZLYo9tbZ++xMjEDZu9zWT9fZHSzz8Gjs8xdVGh2QmoVkcMY6RQR5M/JYtkl9CW220BHoZspiaxjCCfXFugOputwaWWDxlRG0RilOjCwLI9KeYLeoEkUKjIUxXKVQb9LltkM+hnFUQfDGaPbaZKFbWrljE7UJeq22W7WiHohve46nttAWDmoX9lcIk5CNF1HtzQMW2K6Ibv21jCkQ7enUyhKFBqVEQhli1g2yTLFzMwMItPpx30yLJSQjM6O8Phzizz6ZJP775zi4K6YLzz593RWT+PYKl+RCxvTkMRZHr2sm4okNcl0n9GpMZq+4PKVBbyKjiWqzE6mXFx/DKPcIekOSBKPqb01/KBDsGViFGPiNB9YSSWRO9xioWlk2e+iiQzdEESRRMZgGmLHVN3EsBUSRSozNEMjTXa8XIWOYQjSDJTKgxWEJkBtcn2mqCQ3MsKVEghNQxM5RS5HmyBlRpa+HB37vx4FoIBCAyWIU0WcKvzN/j8VJv7TAaVlWWjoZH5CsVLFqpXYHrQRCsqOzebGAOIMlcVUxkbA0BnKlMmRSQRt6qMgcFFpRGNCEPoabiVB6Qa65hGGPSy7gCbsfJuvFLqRp7b0+0NAYBgCzXQQIh9PZ4mB0CSp6qJLG0EuqND0HDhdX3Pn6YkKXXlI6edASosoFSqkyYA4kijdJ4thvDZOsTzDe775DVy59hKLVxc5cOIB+qvb/PJvfoi9u+tEw0GeRBMMCKKYONIJ4oQka+O4Gl966HG+8vA3SNNpXMemXHJJ0hRXQZYlFAoeuq5h2zbVaoU0SREivzCkkqAgTXPT4DwGUu3wLXMOZh7ZlKdI5J3J9RW5QNPJNds7Ku38KhN5NjgKTRjsTNhJFfhhjMzyq9HQJUJpGIZAyGzHb87G0HRECoaRoBkWKItIJhQth7m5LUxN4Pd7JGGJb37gBNeuhZx66u/oX9QRu1xc6zjtjT5fufwojqqyd0JntCGI0xTHECh9SJpU6fcHFCsOTz19ktW2iURw7MRuwu46D526yJ3ZCN2NAa9/zQnKBQOpLC5dW2TYz9gOAqQW8eu/8EugGTg1jXV/m+xKAaTCsAqkWcDV+RF0a4WlzU1sx6BYdTGyCGKbTOvnXFZpIkmxbAOZ5ikHSZJSN3U0w8URIUmWENsBjlNHVMZYtGw8Q6dqg1ctEhmQ0kPrTaCkzqDlo8x1avVxvviph9AxGDGKnDx7mhN3nODaixcJ4zt55tyTOG6JYST5xNe63HnvQ7zvju/n0oUhH/6dD6EXDarFASMzJd75bXeTBV0MCihRhPoIVlbGUE1Cu4Tfybhvr81n/uBPWB+2EFYJMwLPSTC9IqGQaMMFms/8JNtzr+LSLGjFCb7x3JMU61Ve9c138cLyJmPlGi+e+jvOPdri4OvvQ7Sfxi7pVMU0//pfv5enzn2Wfqpo9cu88OImUupomkkQKvqDEJnopFLtFDqIUw0NkQcXkJPZRSYwhCANIQpTsiwHKWmmk0mBrnlI8rWQknm3niQxUSAJGLBjAUG6MxzTNROkhuG4BJFJHGWUvDGCMGDYj+hmAYZuYRgmMtMZ+hkaOpqRT6AhRaCjsvyBqSGIE/mPCrLQDBQT7Dv+FvrDBS7Ohwh7DoMijYLHhTNrbFxN6Q9ajE0UaRsV/vyMTbuZUh4psHX1GodGxhmbqvDSIKE8OkGo1plbBeVaLF3bZNQu8+hSneW1AXfdMcHukTJrnRWuLJjce3+RTz3Xwu/4eEUX03CQBKhUoEKLilMg1RPipMvv/97v8cfZb2FYIyTC5wd/4H3cf/ddvHpiL1HmMjm5lx+w7mL1WpPucIn/8HjKhm9hmOkOmNTQjet+EPKGDQrk4DFLr6+6bTI1xHFqCFKmdpU5dzrAMCzCcJjXdc9DGIL+dp93fsvbmdr3AF9/7hLtJKLSK7JZSkijAY6+j2wkQrZbvO7O17DVk3RFxuyecdZPLTF4aZ29x4/T9SVZe5uaCikMBFPlOgOngmaMMtzs8Ys/e5lOT0fJGpns4HgRStMwtHeiyY18ApumSJU37RqKuSsOicx9OGWasdgCyzJQO79+TdcQ10dCmiAIw3yytHNttGKTQcciy5IbQiVghy+n8iGuzG4sHcWKQOiCOE4QAtIsRdd1zhk6WfaGHWGUQeuahmUbXDslEdxE6EcUiz2K6d+zZ6xAa7VHz4+4600F0IrElyLssEhncwndcEmymJItsAsaiZ0xscvBXzdor6WEymfP4V2MNBRJ4rC4tU7P7zM2UqFc6HH+pS32ztzGzESdJA3oBC3afpuRag2/2yFNQhzDROoeVauMH6xhuwnbvWU2Nzu4BYVdyKhULeKRgMYej/56QGs5xHMqrK0v50OYJKK8V0PUTbSaSUVCdymjNlqmO8yQKqKb9khdh812k821bcYmSoRxQhhHFM0SI+UGzdYmXDxFRYv5wpeuUpqdwDsyZG5zE0foeUqcChBoWIZJmkqEYaBFAdXSbvYcupVSc55w8Dy95T5rWcyw+XUMR8OzHab3T7DVCejt6rCxOGBml4NejOjGIWGQEkbk03wdDEshwxjdUDnNOFHo2s51RIJAoumSJFHomkDTNRIgS/INqqZlGEKQ3Cg/ig9+8APcfc/d2LbNF77wBf72b/8W07T4yZ/4CXbt2sWlS5f50O/8DlJKsizJJ5PA+973Pu5/4AF0TePnfv7nWVtdRWiCH/rB7+LmW+7Ati2+8IVH+ZtPPsjs7BS/8PM//k+FiYjr64r/t2Pk1kPK7wzAz7tUt1zEdG00TWN1ZQVTVxgobGzwPAZhQHm0wmxtmpnZbSb3naY6AkK6SD1gaxlmZgVf+Eudqy/JPCPUFBSLTp6FCaSJQGY6UZihGXnWd5JkN6x/oijKfROVxLOvqy9zQKlUXvSuh7ILXUPJNJ9ExgqM9s5UTCdNM2SUG6ofvnUM15lgrHqU/nCTVr9NakiCSHLrzfeRBJJMxJi2hWEWEIYJhqRg5lO7olskCge88MJzXDp1CFOfRNd1Ou0urucRJwmu4+J6BTKZoaRC1w3QdqalmgEiv9jyqDLYMVFC0zWUkmgCFNdH1rk6XO0sra8LfYR2vVKpnVl4PsnME4h2SMGmhZSKXr9Nc/sCI2NfIZUJpmnSbjfxPI9SqYI/CIjTiCxTGHpGsVBHCJ2lhYv8+I//JP/i/e9lYXEFRUoaNil4dZIso5N2GPM8PvaXj1Oolrjzpl0MBx0OTE2DCvDDGFNAlIEyfGSSR2F2Ox3Onb6EaercduImhoOUp55/nnJpmpHxBhMzVQbdAMe00fWYwaDD3BWfKJL4fkqaKsI4pt8bkCmFlBmJikhSyJS2YzQfE8UxB/Yd4O677qE/9JEqAmWQJgLdVBiWRbPV5RvPPYVXaFCcHEHFGeH6OppbxykX6SwuIyyfhavrHDq4n34Y0B10aRQqdId96mN1VufmOXTLMTQBza0t5q6s8/73fwdPPPEYvc6QQtHk8PFpzp9Z4urVy4yNTZLGMevrGzRGjiPtKiU7pFwWdP1ZMsuls3mGyYOH0RyJGUukjBiKGDNLMWMHaVpoCG49WObpR16iP1aGrIkMZ9DSGENJttRVXnP7QcYyg4cfu0T52M2sDK9wwtCJMp9e/xKBO0sab3DLLbfwLR/47/zc7z+PmX6WY/Z55KDEXW94M73tVT75+a/SkD/Mtbn8LszS3KvQtHL/wRvZz9fXL5q4cX0rlV+3mtB2soqzl9eoIuccvdw0Xa9c/1cuEFzvlHI6y/UPFjc4fZrQdxq2fILPP1jXqh0CktppZv+3ZCTxv/lWCCxnSKYCsjTDcTtMH/giYdRjpDLGZm+A6ShG6wW2B32GfYORkSmUEtTrHqsrC2jCYuhHBEmMhk+lVKZc14miiOmpUZpbCZurMbN7dd7/yr2gdzhzrsUtxyb57T++yutva/C5hy6y3FdYJYtCCZRfQEZ9DMsmEQGm1sAWQ+IsJE01uu3cTaPg1Ul1wbFb7+KeV9zJ1N7j3H1PhV/5b/+Rx774AvVKnSDqE0tQuoEhcwsUeDkNJz8NecOrlMB2JWEgqdcKHD4+xsln5vK0syRAoeNISJwifnvIrbe/gt3HjxJ0t7i42OZdb30VL77wEltun73GCNuDJt2NFo3RIrUxna0oxeiFBGubVOsjbKZFnOokaesah4oVYtHlykabZjrE1cFJxnjpzGsRWl5blQCvWCSMIizLRqYp/nCAIn+4s0OxMoz8WtF1jThO0NEwNIM0ky9H1GovA0WlFGgi32boYierWZHEMYZhkaX5ZOq6ZycoskxiGgbsiEEUMo/9VfnFnaQhktxOqVwqMOwP0GwDKTPShFxsqQlkBiNHP8GBm2x6c0N83WD24CiaH8IwYWGpw+zucQp6id7KNk+dWmfswDjClYTJENPIaK2E3Hx0F+O7q3keua6x1VrBNqu0e20cW+EPM8KhwXh1kqAX0Wp2UTLBNS1iH6IkJNM0hLA5dOw4TrXDdmuZ4SBBSYOpmQr9oEMoIFYRphJoXYEINJLUIpNFultrpMrh+H01emKbTEk84dJeHNCaN2jsLjC1KyUgYX4+5tj0MZrzbTKZoKROmhWoFCdRVko0yDCsIm5BY3OphV4T1PY6LD53HkfpYLmkWUQaZqBlCM1ATxShgtHpKrq1n/LkGu1hh6DrEYd9ECaKjD3HZuj3h0TbQwq1jE6gUXIBw8DwIVhyqNYOc/HqLEqkKClIk9/AtpKdxhhIRf7cN/Tcqg8JEpI4Ai0Hk7qhY5j53C9OIA3KNzQWhtEm29nofuxjH+O7P/i9PHD/fRw+fJg//tM/5Sf+/b/ns5/9LGfOnNkRAsGhgwd5y1vewm996EM7Fy43rudKcYxuHzRd52Mf+w3e/y/+HaZhYFoh/d76/6X6/e+Pf/KEUg4CjAQGQcDo2BiNiTGWN1dRAupTozTKDbY3l+hvNrFsgWVJ0qjPxdPnqdol5iKLOIlBRf8nbf8dZllR7n3jn6paaafenaa7J88wA0hOkhQVAUHFhHjUE8SsRzAiwXiOAZWMigoqHPWYFTARlaAkyZkBJsfOcaeVq35/1OoeUJ/n9f1d17u8ZDrs3nGtu+66v4la3aU9o5ge1khdphS0iYAoTvF9m6WdZRmuU6YdR5QqFaSEKIoWEhjy3BRmuTmlIEDrDCGEnSppy+MRRXGwELpEC5cwaiFVgjQKJRVGK6RKmAtdjnndUl72msMZGZ7EYY4hr4JfKqFza58zPfE0blXTadukG5Mq8rbd3TZigcZjewi5yVg8UGHRCY8xvONR2i3Nqr2rdEJrEJonOdPTDcpBQBrbnWiUGVynghYSk0vm5kIC30G5DgZB4GjSTCKVAwp0qhlYtBjXL+EHHsbzabXngJxS0IPrGMgcPD9ACgsxBn4N4eRIaTPKpRK0Zw37H7qcP/7hD2zcbJXw81ZBeW4XV40hyTPKgULoKlGnQbXSi1PymI1SntnaYPu0bfjrpbWEufUYk9UhZLmbjnqU1SuXcOQr30icxci0wlxrDE+WMYmm6rlUgxIzM3M4rmbQlRx4XBUpJTMzMyx2Kxzx+tMIQwijFjgpUrg4bgW05MuffYQdW9q2ySj4H7bgiwXlnizIA4Z5rokm8GHz1pTHnmgUi4JnC3pxzlvvvAqt9lEI6SCfsYTrPOlBeT5plqHMfmih0UnCgw9DpVxGCUgbPlEcMT0paLXWMv4niee7lMv70GrMct21ik77cDrtDl3VCp1GnTTfn6WLT2Z0dAzH9dlzbT9JHNpJe6bY+MwkSWp5xUMDxzL8REZiXFzPQvSdVhOHCKEBZUgTw44nHHR2COmEBrkKoyWB49LJMzBLeaRZo+R4hO2lyGcrqKibDcpFOR6Lh15Fd//t9PWs5A1Hv4rjjlrBD790KYt6Kxy017F4nseubVP8/prbGdjzRUxusEVdSlVYUF1CmswXLBDsJv4s0FnEvODM8oKkdik6E/szO8xBObIQl72gk/x7NaOwG6pc5wuNoRWP2CHm/J+Wax8HjPW5FSCNhaWiOLKiEW1V/VmekWUpKPseC2wTkcalhcfM8gyDbdBMNsDcrgqmOyRsa8bHpqgM9HDY4AHMpk/TFYeMbd9Cq6Hx9l1NMpcQhRHKzXARZJlgRsTkWYCOXYadFlnHwRMRW7bPct39FUpemzzRDN82RTSTMOjGnP6vy3lgS5PND83x7NYU0xNSqyic0EMagyanIRI85RH4Cn9AkOQhgauJwjmefvBmnnjoVtzyKl760v0YH0vxSj6pSZCOi28M7TAqIDQW/HvnIXAhBBpbu40xKGFRrdmZJsoBXfDhdQEVl33NXJ5x7PEv57CXrmX9k8/Ss2KahHH6BlzGG3bzumgxBEISVDOQU6hWDn4/cV8Pk06DTnOc3r6EnclGdugVTKdt5jpN3FKNjoJqd41SxSNJcnJsZG+jOYcSkmYc4bnWTo4i+ccKEiDXGikEWWrr8wL3qEB8sjzHKWzWhCmQMG0wOiVNdgs/ZfF+ea6P57kI1ykEGwJPSnJtbfEQBlNMLHWBUrpeDdDUqtBqNjBCEoUpUtjnaEWkGVIq+rr24Il7n2Tlvr3UuwQ9lR7cfJrexUsZWDtKvauf1pYqq1/aQ2VZiac2zhETcvBh+yJ0SLY/9CyuEszmPHD/Y7ilPtIkQ8g20vUY39Fkz5V1kq4M7e3CqST0lcqYqEJ37yBbdgyzdEmVRdU+ppuT+JWAMMqZnpmj5PfiduU04pDmbE6WCoRwkCVIVIKqK6peianRhDi1riNjm0KU59C1KGVschrH9FAbajKwukK7KZhrlxgcqqKznEU9Q3TiFlu2HEEU15mZcrHkwxJBvQunnTM5PUJX3sv0aJOotS8tA0KqwgvSPI+DaK0Fd2yUlKt1hrfuTRonGF1wHTW4JYd1ozlJZIWrUmrSWDPt2FVGIhE57NglyPOS5TQawNQI04xzPvVe9li9mjzTnPe18zjy8KM46cSTKJVKXHvdddx4y4286z9OY8nipfT01Ln6f6/m9a9+A+dfcDXGzG/mIE1sctOyZcuYnp5maHCAfffdj0cffYzFg4Ns3LiRww8/giefXEdfby+u6/C6170Oz/P43ne/y3PPPcdll32zCJGBuSZoI/Ecj127xtA5xHlGHP9/EL0YNlo4foBT9qkM9DAXt+gf6COLYnSSMdeaJYpTBgaX0kxTSkFAksX0dSsOPGAJee8mcg1J6CGEZmsYUanD2gMzRnb6BEJiyOm0EyqVKlL41ptSSlzXRhdKaRtNpWye6Txf0hgDQhc7PWsD4LpOwS213J4sy/E8F8f1yHPLaXGqCUZXGd8pWb0vHHJMP9t3brZGp6LNTCvFTSCQVVKjkHVNuRxQyodwhY8UArfI4nQKKD6OQnzfRQJJFHL0qyo4jiIMIxynz04oy4FtADqd3Vm4uU+aS5rtOVw3YNXSAyyUlNQIWx5SzBClihyHXKcM9S3iicef5Z5772dk2xR7L3Y49KC9KHdFNNs7MaSI3AM9H5Vmm+1mq1OomO0iWOnVTEz00ImbpGm8kJRTKpWYm5ujUqkUJvA2Lsov1OZpGlMu1bn/oT/z7NYniOcaQBXHCBAZkgpTYUhd+ex9+N785S/3c/2v/0S1q5v2zBRxNEOauThJBk5EJ1E0Z0OCIMCQMDBUJ4o6GO1Qq/bj1EMqXg/NmVkCr7A/0opWVGPL+kMLVdo8/dhYiH+BgwuYgnPKfNEoTNxdQbnsk+dm4WemaEizPC7OOUu3SKPc5gY7JXSmkVoilcJkGTqXSNfB4JIbQZpbI9wg8JGk4Ehcx0OJMr6b0WpFaCNQbgAyIMkUcazJc4c89xBS0okERnvF81f0LeoBkWG0oV4r045n0HGOYyRhM8GG0gtK5RJaGnJjIZaKp8ilZK4d0dddoux7gGRsegaT5qS6w+BAF16pwtzOFrXuHgLfI0sEe644lJNO2JOAGndcfwvJzOOsw+Mjx36amQnNiw8QrFzkc83tOwj8EkrlhZhOF5xfjZDKLoILVI0XHkJgm1BhmzrlqIXpobaqPGwEqV08f/yTy/jNb27h2utu4egjD+G0d74ZjGHnrlG+ct63LJQo59XE9nHPOfuD1OtdfO5zFwMGRVLw/awNl5YSgaJc9hBSLPiyGgFJGtOcmyHXGcK4xedRev6wFCk8MpFiTM6yoR7WTe1EacWinh5GZlIee+JZQjo4pkUUd9BOwPD4GAJNJ+6QRx2UEnT5XbTDmFxLSF06MzFlXxFGGfWBQdphRJRkNCZz3vm247nr8e8yVz+Mev80U080edublzCRdPjrUwEP3r+Tnj6NKnWhk1lE7pNkGp0atEjBhXYa47tVai7EIsfvbOHu329DlXyEjxVACoFOE+qlMmEaLwhxrPJbPU/tDY5SKAGZSGm3W1RqtikyRuB5imaYU+ryaDdy1uy/knJfizvvvBnhuyxZUkXHMbWeCG9O2AYqmaJnoEIrjJiZMExNj9PVJ6j318iziD322JfGTMjg4gG6aiUmNs+y955L2TW+HaFLJLENv5WORGcpcfxdpNIgJFJYiFHMDx9ygxELnQVZrlHSQWA3SY7joE2MwPoNWp6abTznT2Rjcuu7qW3D4jg2jAIknSgjb+9OgFsQShRz+732ew2RchCkONJDOLaBLPsVDlni86cbf4fbBd1dJUZ3ulTcU0mzkDRN2frMLvp6NHoyYfuWBjP9HYxu428bpad7CUJOQjLLxmEY3LvOIpXQGS+RzaWYIGR4ok07VNTNLGv3HCIMBeVSH9MTbXI6HHXC4ezaspVWu0NJDZBEu4ijjMaUS1YZo29NhshLGC2YnJmmVJlDG0ljNqW21GN4xyROLaVvUQk97ZB2NOVyhQRB1AlJGlMEpoyvDMJTdA04pGmHNKnatB0TEuhF7NoxRWdaoPwKRs6wZtkQU3Mt4iQgy/qRIifPU3tV5hHNyRBjBI5wiDpNjLS2gfPDpvkJnSnqkBEgtEA6EMZNhHLwfEUS5+hcI5AIY/2xncKAPU0MRkt0AmmWI5W22d3CxXFc8kKUBnDMMUeRZ/CRj38ME0uEI/nzn+/lpptvQwrFFd/5Otff+EeMUYyNTnDeV8/HcR2+/tz3MLn7d7Xz7LM/yStf+Qp+9atr2L59F9VqlcmJSbZv38XQ0HaOO+4VC2tgmuY4jkuaZnzgA6fz4Q9/iOOOeyW33nq7rcFK8dEPv4vjj38J1157M7tV4P8cig3/b0Q5soeS30dvf50slXZXnjl0Gi2aMw10J6R/cA3KBCgTohMXnWX4tYCdk23q/hJKlRzluGhSlqyo4AVtunsd3HJGa8TH831inSBFDddTzMxO47iSdidH4Ba7XMsDtIKUHFBkmQFhBRNZaixMhkOeZSjH8uBAMjvXxHVKuO4i8jwljRVzjYw9XjTI696+D7OTOXk235JYs1KTBqRC01droDxDnqZkJkM5EoyDzjLSKEbrjDwHx/VptjKUa3crs+MhQrmYeHchlo4iSezO13Wtma+OQ+I8I6g4zDZAZwNs2rqNVmsWIyRCZyjXwXFgoK+L9c+u55qf/hpUhluC+3c12Lhthvd//FiqS8uAxHFd8izDUSWkA3HSoJ71UQoc8jxFGgedd+jvW85D900UUI9DGMakaUwQBAuLhiOAvIIpxeSxwpOKKMw46cSXsGJvSbMxi8HFZBLfs4W3qSN6vW4eeqrBHksrrD6mzNT0LBWvik7r1OvdVCWkeYbrGPxSiUazTZylLFm5hEanQZhmKCWsQEnmuO4SHK9oALVkw/qE760LSNO/2UWZf/Ct2b0TtSlMgjSDKBILF53lp1I0loUvmQkWoNoFcdQ83UAIMC4QkMeQLKRUpQihaLVyhKxBBs3E0G7nQAWT7o4VnUs0s7OdYvqTIUQXOhaMjcaYhReS4SiHet3HCMPYeIqjqsiStsb2vrsAFYehtYciE6BccFyUUvjKwXNq1pxdSnxVRkhJuRSQZ5rpqRZ5ZEiTlCSOcZTDlkem+f34M1QXDTHbNLzoxf/CPY/dzR2334MWEM9Bhs/wthQZxaSp9TM15jJgAinsdEovbNN53mua/3AkGmEXcz0/QzQLqkXmPx1hqFSqnHP2mbTbbXTe4N57R7jnnhsAGBoawPcbRFHE8y00HMfhJz+5qmhQxwhKdVau2YnrlKjXSyT5OLEQuI7H4kX9hJ1Z2zzkDmEnpdVssWx1P7f/8Rek+TIA2tMvsQpNbZD+w6ThgRZKdyWDK2tsbbtopUnjiP7ufiant6FEFz193ZT8MiZqE3dCsjTFVQ6+6CXXEc0oo6uvThQ1ra1OLIjTWaq9y+hf1M+OnWOUu9okusTYtMQPlrH3wScxvOMXOO2MuCw56LBluIt66asrHnhgF+O7QsqBxJBSqSiUFgjlol1FlqWYJMUYgXQlWcml7CuMCC0ykxRpNwoSBK7rLqA+z4e883w+clGRplbgkCQJU5MNgqDE7EyVLIPAdYlTnzROOPLFr2XLpgZJ7JMYyKJpapVFTDRHkK0uZrMSW7dHeOUA31dkuhsjuoimXLJJQaw1jYmUMFKUgsXs2tYhcPrY9oyi2r8arwLNiW507qFNjhQKIxdwCoyJi2GEeh43fV7wAL7vkmV2Sm157cLGRxYTbzD4vs9HP/wRli9fRqfT4ZxzPk1vby//9V+fo1wqkeU5533lfBqtBtrYSVdPby/nnPVJyuUSz63fwPev/gHGGErLDdUkwVWSJBvH4NM32EMWN+nuqXDKW17G9dfdjaoKDjxwP9Y9FiKUwBiHFXutYvHipWwfmWZkJCXLe6h1DRF4JbY91sZTPSxZXWLbli1sHRZ0LxpkbOswE5umKdVKrNhjL5yW4snhFssH96a/x9CYbdJV7cJ3PbZtarJjpEzZrRPGHiZ30J0U6cQkUcDMqEM8LZio52QsoTVVZ2BpSNVLiKdiVEvgCx9TrlAKfNqtSSamZ6hX64TNNr70yZIqJo3AS9nw7AhpG6r1ELfmk+uQemkCHXosW1Jjy84J1g4uI2rHtDoxSWbha7spVYAgiUMwhjyXuGWHjAxHW6QDKbAbHVuW5ocIQtthhM4tlcB37Q0cxzoc5DJHOR6mUF7nOidLMouc5tYGCGMHMAibkmOMLIqRZNXKFTz00HMkYTcYUDgcesiRnHrqawFYunQpeVoF7fP0M5vJ0zpox0YQP39AUlTFq6/+H66++n+45JKL+MMfbqTVatHb18uKFUtZuXIZYRgu3DqKIlqtFuvWPQPAgw8+zEEHHTC/3JFnOV//+g+44oqfcuWV5/GHP9zO1NTM3zzm//345yHvzttoh4rJnTFSFCaglvwEgNKGuTHJZBKjXIWjXJI0I3YkjUlDGMfkGbg+OI5dkS3R24pnGo3dHMB28/nwpF7go1ihyfMMOBe4UxS8qEIPXXBULK3F/syqEgUhBmllm+Q5IA2TYz7/8w1lITchCojuhbvH7oGII9/0GH5J4bpllMgReUjgllGiF9fD2hEZ8JwcR+VkWYrjOygcOso2ZtJY098gs9CzznKkkKTONIFXo1pxmBye4Bc/+S7d3WB93RSq4hTNLszUKqx/+jl8lVLyfZQj0eVempMtWjMz5F5Ilru2UVMOadwEJ7IT1BAcJ8NxJP1de9JuCuJoB7PNBspzF4RMQVBmbm6OMLRTw6pfodGJSPMMhEOcRphcMT21je5mzmyzjHEyyqqf1twUjpMSCxB+Dybuw+/uUF+cIXv78KQCWSfKQubyBM9bjMhChKcp1zzcyNCOFULXUXkL10lI2iVSEZKamEw0wcnw3DLDI10vaCa1/jp/202K4j8LqNXzIFZLjrc/y7MiVk783V284NsX0I7/L5u3+duZ5wnr9N8P6ICT/+52/+jIMxgft/zYhfPaPP9yn7eGKFi3QhBGhk5UTEwEjIz9fXloNll4zZKAiYmgeK9Sxidynn56J1pvZ95XEAK+uXEzz8OJyPIqmPB5rzFZeHMGBwfwfR8w7No1SqVSpru7CykV09MzNBpN+np6ipQixcTEFD09dUZHxxee47vf/U6OOOIIarUqt9zyJ66//nqWLV3KV77yZVauXMGrX23fwyRJi5QtWDQwwH99/jOUSiXuvPNObrvt9uJF5ojqMMIztKVLd283o5s2sd+eBzEzuZN23CYlQboOff3dLFvhM/rcJt7w1lP4yZXX2teYH2Z5n1KgaSOxXKg80zz41AgIg+95jDUbDHZp1qw9jE1bt7CoZxEib5OnMXkq6K31EbY7JLHB8SSZVLh4ZE4JryQJlM9c2GByboRlq+v4qUN7BLxSzs13XEsSNjjvM5dz9BF1an29xKLGpq0VPvix7/D7a67iNW8MaMd1fnTFpUzPjROHhlbUIc0h1z6+Lwm8Eo4nIA9JJIhcozC4jkQblzRPUI4iSSN8ZZeM+VQtC5fZfz3PQyo7ubeKb0WzkRNHbyZLFwGCJAKNYdGiHv58i0OuNczbnBS1Pkrr6NxFSEWaDJDloJRBKkmWaZSQGOwin2d2rGTIbGSfMKRRjtghLTUoc0nD8sLmzapki7VBlhDOTJGWZieJuqhvQijSJCfXOYji9WYZGGNpR/Zy4y1vPoV777uXuy++227IA4+XHXsMO3bu5Jvf/g6vffVJvP71r+Wa667DcQKacy1efdJJ3Hf/g9x488189IwPsc9ee/Hs+vXUHJ80d8hVSFBdSacTgXGo1XzG5yZY//CeCLGSxoyh0yiRxhaKl0ry3AOHs05bhbRIM6ZnJE2pmHA9NJZXPrrLITMHkucwvcUjKCmiNGF8R8rwJse6feh9GEVYW5vc4LoSkxvSxHI8y2VbG9LU+iPmWtPcZdEAJQQTMyl5BuFoneFNkyxaux7Pixmfa6PSGuF0g1JQp79aYro1huMFlKseSSuk5Ho4riEOEz74n++mt9pPq9Xh6h/9CCcImIsSunskaQfKTjeuA40xQ6XUS9Lyn1fjJTq7nCzvLNRFtINE4LoecRqhc1MMBooQFcTuej3PO5AOeWowSHQuMLm1/MnSYrqZW3eanloVk2dk0loHIiCOrI+jMZXiDgUwztatT3P44cdwx+1/tY9l4D/+48186EOfxRjDtddeuXB7rY0VwRVw+wsr9xiuC0kSUqlUiKKQJGnzxBOPc8wxR3Pxxffyb//2bzz22KNAim2ec5544jH22Wcf7rjjVvbaaw927dpZ/B4cd5o01SQJRFGTJNmFdZ75/2JCqRy0NpbInOsF0r012zYIZUmk1ogbK5cveGyYHNebnwpZwYkueGx5JvB82z7aneH87teeCPMw5ALZ/vmN5XzTZ8zfrJKFREXMt55mYapk/7Unj+d5LBrsR8hCJIou4p3mG1ID2NfZmPTI55bRaQ+TxCG5jlAqxHEaOKKCMuAFLsqxqqzcURhjOVexjvEJILXTOK0MynXItUE6DnGeUXKXE0UJUdqiUiuxY9d6phsW8qhWu2nP7UDnrj2JSVi5Zhkzcw10lqOjHJlLvLKiVPdppxHKsWN+rTVlX4OpIaRNqfA9a1jczkfIXEHQZeGcNM3RAYUqLF9YNLIsw/EUrqdRbhdh2kBrSZrGjE1OsY//IhI9RVe5RhxGKB8UFaRxCVt99Ay4TEzNMTyeo3xJJ0kQYhLcHMfkxJG1WUjnOijl46sSWd5Ea/C9Mq3I4HuzNqkncgkcHy2bZHmHXM8nlxSbCcz8HucFx/xnP3/MwxsLX2uNIfuHzeT8fZ9wwit417vehtaaMIz46le/zrZtOxdutXbtan74w8s599wvcc89D7zgHkqlgAsv/G/2229vfvObG7n88que91w6/LOH49jzOs+tG4Ajd3MKTYHb2P2VKbiEGkcqpLIbNHvh7b4eoJgYLjS6emHyVNwS37eigrTIcRYFRmS/3g11ZtnfP99q1Zribt+++31qNls0Gk2EEKxcuZxGw9pSpGnG6Og4xpgXNJMAP/7xT7n22t8gpeC7372SG2+8kfHxcT70oTO49NKLWbVqOc1ma0F5DHDaO/6Dn/3852zZvJmzzz6bRx99lJGREZSU9NW7cNyckq9QoozIYyYmd1Kr1ck6TUolD6NhfNskPXvvyR4HLWfrM+O85tQ3A3DPn/qYmpgCkVNWklAqjDZIJeg0HYKujGo1YGIWXC9n0+bNpKbFjglD2MnQrkuShrTjiGarhfIkcRbhOgHt5hTCkUgvoDk3h19eRKXsk7YUbTOFERlxR2GqOYtXLSFvtrjzkZ1UeiuEJU2Pu4Y/3f4Ml33nbj7woQ/z3NhzDKzaD3+8mzPf+3ZyJ+TZjRuYHG2zbv3DPP7kOO1OgPIyqoEg0z7aBMRZjCG0tKIsw1fzlCP1gs9GF7C4lJBlSRHdmhfWQKtJ034E1l8PoKu7hkHQiWJALdTZXBsEmiwXKCkh1yRxitYCJ/DIkwwhTVEv1ELZV9IOA7JU4zoC5UOeaZRRzF/MQoiFYYQxhrPO+iBr16xC65gvnPcFDj/scE4++WSqlQrX/fZ3/OH663nPu97F0OIheru7ufK73+OUN72JCy68CJPv5kgedcQR9Pb08K9vfRt3/PnP3PqXO9m5c4Q91+yJ67j09PbQCa2tmMFygZcsHuKWW29HCMGGjZvYb//9efrZ9YzPtJE6J+8YPLeJcHJaSpL4FdJ2D1PDOcL1yXSbMGwiRE8RTGTdDzAStKZUDuz71E6IWylW+aFJ4xShLN0nyRJMagVBnueDFkhtKSlS2imb6yiE0AiFzUF3LLUsz1IcxyE3mf19kRiWmRzlKIQ0KOnQblSo5j343SF7HdHN6KYc4RmGh4dxHJfBJcsZH5nCUx49QRU3dWnkJWqVbo5++Zu5+69PEVSHSFGkcYtqeREyLWGEYWAJTE2N4+s1LOoJCFOJxEOQY3SK1jH1Lhff80mylHYUEjge1YqLzjqkJrd8WA2IebGg3exIrMOE8gOMcYhDh3e+820cccRB+L7PzTf9mWuuuXHh/H/jm07l2Fccxfs/8CmE8K0ziBS89jWv5F3v+hcmJqaZmJjmv//7M9x11z0cddSxfPe7XyPLcj772Uv585/v58orv8pzz20uHG0c+5lhewhj4NxzP8AFF3zvBdfdJz5xJqtWraRcLnP33ffQ09PDfffdx4knvoof/OB/2LhxI+vXrwfgjDPO4OKLL+Gee+7lmGOO4aqrrmJiYoL/+q//Xri/T37yE+y1116A4IYbbiQMI170orWceeY/r/L+pxvKNM0XVG1CSFzPNnpiHluah0CMwfG9hQXHkYI0S3F8cF3IUjvUlBr8EsWOT+B6Vm0tFprJ4jAL/wHMC3KmLRF6nug/Xzh4wRTT/p4XNAnGCDCWDD0zYw3ABfYicjxBmmtAoU3OPKlfScnT93XYa/8uhDvGssU9VCoryLIMZIhR0hKllS2QWZZR8qxRq0OJTIW2EGcgpI3v04V6Nc9zMs8gRIBJfVynApQIAoPvB2jtUqrXaM02cF2FclyarRCpSuhc4zqgc0UYJcQRZLqE55fIsowkTHC8HFclJJFGGt+686uyTYPxXJK0xMTkNI5jPShd1yVJoiLhwi4WUdomR+IrhSMkvgpw/Sl27GjSCCuQpnSaLlke4ihJknskbsYzj05iugTTWwVdvqRncRuVV8hNAlphjCA1giLJkCxPSI1GmBxHlYlzTZpndCKJmxl04iIjRaXaj45DTBw8rzMEbcaRu6mSz/vMX3CavPC0mr+BseeUNv9wjMi9997Grbf+AYBjjnkpZ5xxGuec86mF359++jt48MEHgA7QeMHfZlnI1Vf/hD32WMWyZYtf0Ny67nwHPD8d3w1vaGPI0uc9HyFIkxQwBEGwEOP1wh1VMeHXOXkUIZWL46j5bRbm+V8Vb8T8QpvndnGYf3N0nhFHifWGc5wFZ4L5t0w50p4nRaP6t/PPj3/8o6xcuYokSfjsZz/PUUcdySmnvJFarYsbbriBBx98gPe97z2sWrWKWq3Gt771Hd785jdxwQUX/c37l9HVVWVycpqRkZEFl4c4jsnzjK1bd9DVVaVardBs2mt62bKlTE1OMjM7y6ZNm9h7771Z98wz+L7D0NJ+oo4ky+aYbc7SW+tndGyamU5ErkMykduy7le444FH2He/NVDtojFt3STyTNLb38305DR2h6wRUliUIp0h0YodI+OUVAWkT/9SMGkPzYYVYU1OzlD2JWncxMHBdxQGn1KXR3OuBYlDHEVonVH3+smcGUYnh9F47LF6FRvWb2FqRLB4UZuB5YsY2gOaSU4niTn8CI9f/fa3eCriuUf/zO33PMraRUOEdLjtka00OhF9PcvY96Ahjj5+Xx5/cpRnn93Jpk0Psn1rh6BiBTcOJaI4RCibXkUe2Lg+ZeNZ57O7LUdbLoif0jTF81yMkcQhBf3AsGjRIhzPI9eaJEoRToA2OTrLMNItpupWvayNoeyX6KrVCh64RY2M1mRBRpppUm2DH3KN5dgriS52iko5CKGRRixc6/NrwsuOOQKjDR86/TNIZxYpNLf9+c/c8qc/0dvXx0VfO5/f/+EPCCmYmJjgK1/5KhjD+Rde+IIiYgQMDA7y+z/8gauuvpqvX3YZDzz8GNu3b2fVyhX88KrvghB89MyziGM70ZNKsmnzZg46YD+eWbeOQw85mOGREZSEFf1lpCMIfInrulRLFQKvRLVapd0qMbIxJY4z4tS3XFZMkW4iilhYqyKPoxglFdVqF67j47o+ruda2zlpE+GEUOSkZLmFaAQZSkgyYydjVohqnT2EUkWt0BitUU5GHIV2/dKCLCmaMyHxShKyHG1SlIKx4YjhkTkO3r+fPVdqnrwnolytEYk2UzPTdNdLBF5Ku9FkYtRD6RiVlPjmxd9irpMiVYc8atNVr7Jk2VIcYfD8WZyemGeeidl/2RAD1T62bd8I5LY/FBIlFUHgECcZaEFvrUqepERhB53nu1FNIRdgb0FR/3NDuVQmy2tkiUHngh//7+/5wf9ch3IkP/nxJVx77R/JczuxXb1qhT3vE0ufAKyKG8kvf3kD11xz8/MrGRdd9JUX1LYf/nADP/zhFS/42VVXXf6C7y+44Mu88Mi48MIL+EfH5z73ub/72Re/+MWFr7/2ta/9w7+74srv0VWr0mp1mJmZY3Cwn+3bd/GBD5zO+9//vn/4N397/NMN5R6rViGkotG4kKDkQBEZpDFFtqtVZmaZJk0yssyqI7VJMXmbtJWT53aUrrUhS62XpFIWBs+yeRsRicXs7MkKFJwsO5GxcJaAQulWVArbRM5z5OZZtkUJ6aotQypI0hQlNOASx4Kjjl1Dz2JDrafMH3+7CULB2z6wJw/fP8Vfb9zFqrV7Mjx2EOSGZpiTtjaydp8epOhilpAwTXEyReppvMxgUsila3fDwsZV5toghEJmik6Y43klkiRHCUGWWREMaGummkVIBzqtNsKdpbd3DVmWMDE5ArmmXKmwbOlSJicnyLIUT2nSNEVLB6NyZOrTaeXIMjRm2+A4GCmRiUPHpKii6HcaEjeg8AacwKQr6bQTyuUySRKRFQbPrusTRQkgMXmAIxOSaA7f60aLJuV6jQ1PTRFNNpBBSBKnSCckCV2MaVoukAQTVvBLGZnQpGlOOx3F8+qYLKeVJEjhIknR5BiTolxrQq9UB6EdlHIxaNphVkxNU2YTgecFdIy/m5M3P6gWhpNfezL/8ta3gjE8+OCDfPs73+aNb3wTrzv5dbiuw5YtWzjvK+eR55rLv3k5GzZs4IADDuSaa67lhhtuBIrEj+KOPc8jjjuAXWyr1TJCgFJW8fqa17yGhx56kDVr1qCUFSCAbQ6TJCFNIx5//B6WLq0DvcDs866u3y98pXXBs10wpLUowPyRZ5ayIYQk1xKT/98ZLo5r0AaSdPet+hatYH5RtBYwdpEUwm4cPfco5tXURxzuMzn2JEmoGVi9BtPu8PiTD9I1OMTY6Cx9i0oc/tI1bNva5LF7PNpN2xxr43HMS19KlmV8/vOfZ3x8AgT85c6/8Oyz6xifmOTKK77DAw/cj1KKqalJLrjwIjqdDhddcinyBVMwy+19z3vew1FHHcmf/vQnenrqhKEVJIAoasRuGyCALVu2ctDBBxPHMUcccQR33303tVqNMEpZt2kHMvHReRMtNJ5XwnVCdCiYCTPGAUcoBro1q/trjG3ZwuD+R/LIY88C0ApfRL1aprvezWxzDom2CnehKFVLJOEk9Xod4cC2DQ266hnVoJexsTFc10FJcP0KQbWL0InoxB08X9IJHbT2cIUhatr42qg5TTgZoYKIwd4h1j++kVJPBTJozc4gtEuzk9KebrBUGpL9XI455gBunNpF/5JlLF+9nYrbxm24zI7vYnxqB7f9YReu6/HiQ1/KxMw4e71oiL1efTL33vEUc805dg1PkuQNSlXI0iIdyEkQxqB1hnQlJssL1MoDkxeUJQdETKzt1MxxDHEMnuuRaE1j+gIrbJECIg1aFH6eRSyslgTlAMjIcocsVuS5JkszpFDIYkIvlcCVILVV16Mjex+ZKBosXWzaFOgyxcWIMRM8ve4+HnvsLoxukadW/e17Ppd/69tgDMuWLsVxHDDw9NPrENpgpCgmd8V6U8ClrVaLx556knKti2fXr2fNmlX09/Sw7pln+cJ553Hkiw/nX9/2L1z1gx+BEQgUv7/hD5x95llcfOH5jI9P0GjM4Xou5Z5eK9ZsJ0DA2FxGnjfIsimmxyVxstIOObSDMN/AaK9A0wDdQAiNzjTK8ajUeglKVeZJW5mx6EaOodWOidLcNurS0gxqlRISaLdn6LRDjJCW/6wLwZyaD+UTkIPvK/K0QyfuIB1R2HtBFimUOgMhJVlkGOqrYUoJ4YRgzkwjyy5eSeKYbgwp1VKJ6emEwK1y2rtO4Zaf/46sy+NlL38Jd951D9rvY0V/P5HQzAw/xNDQvmROwKYNuzBRiVq5QioExgQFbSknCAJSo4g6KR/80OmsXbsGgK9+9csceeThHPvKV1EpV/jVr37NLbfcxHve/V4WL15Cd3c33/v+d3njG07h69+8ijxXGGGnsllu0SvXda0Cuhhqve1tJ3PNNTdx5pnvtacYxXyjKEOnnnoSJ5zwUq655mZuvfW6/0ul/v/vcBxnwcYrz/MF94V5FEEIYW0RtUVu5hGm+eGR3QBam8ZqtUKj0d7tTOC5RFH8Dx71//J8/ulbyjKZNkhpuVCOA2mmyXIIo5g4TkjjEKOzAta2Clglc4RKUV6+8CZbPy6D69npjEFTrsGL9jyUr3zlAkZHRgC46eab+NWvfsnatWs566xzKJVKpGnGt771TR555FE7PSveTAEcf8IJnHbaOxEItmzZzHnnfYkkTfCDcaIoJgggjDKWLtubVLc5/pQeYh0yPeUQh9OUgxhRWsHRJxzAXX+8i8Nf+VLuurWORGAmmyxetIJ6t2Dj5jFUrUqa5Lgk+GmNRMXkQuAIa0ruFCMotzBvzjNDIB3SOMGVCqM1vnHI4hzXdelEEb7vU3IEiQsmabFrePMC6U5J68+3c9cuRLHAWpip4DKZCEmApxzSPCcLNY6v8VyJTDNy1/KV0izEVVZJTZ4QOF1MDU8Blp+EhiAo0263UdIliTuUS1YhncQQlFy0zmxElQzoNGfYumma/Y5YwUwWYVBID0yukZTwVUCp5rJrchrPreJ5AWmmbZSjshNs35MkqYOnfHThyyk9ayZtpMD3ffKkjREuSglMccFkWYbzD4aJe+yxB29969s448On0261qdVqCAS3334bv/vd7wDDhz/8EY477nj+9Kc/AlZB/O73vAeA93/g/Tz7zDruuutu+vv7mJmdXWhUXvOa1/Ce97wH3/c5/fTTUUpRKpV4wxvewIc//GE+85nPkOf5QiqT53kvgJDn/12+fDkzMzO0WpajMu+L6DouA4OLForE5OQUjUaHvr5u+vv7+NznPovr2k3L5NQU519wAaMjo/Z+iwd43/vewztPO41PffrTbN++nU67w+TU5MKD77vvGt592n8QlHyMgQcfepgf/eQnGCPIc4GQSZEnK9jvZXvx0mNeT0XWydwp+vwKSZ7z9JZ13HfTX7j/rns4ZJ+jyBOfpx2JEPbvTA6rVq/iwYceQkrJ8uVLMRjWrlnLv/7rv+I4DsuWLSv892D9ho12I1bwYQYG+hmbmEBg/SmN0Zx/wQX4vsd3vv1tfve73xMnEX19fQSBz7LlS2k1m7TbbcAKdH70ox9z7rlnccwxx7Bz504mJiaYnZ1DuBWeenAUoSMc3yNKNDXlMdseo941iNQ5lYqk2tWmt+ywZsUqlO6nVoo5/JQTAPjpz9uMTkxQ7+5m8cBejG4ySGm5WDoTlH0XaUBpRd0ziJZmdnIX5UoF5SrCJKQZRvT1dhHPtBGZtM4TpRTXsXC4cAXSgygO8VwXR2rGZyYQOqfLE+Q6wjEeeZhy0L4DzE4Z3nziuwnzlFtuvYPWnKbZbiNw6XQgTyscfMghLFv+Olphg9GZeygFHovqb2R2CoTT5vSP7k3Y8ZkLJxgZX88PvnsbjmsjFTMNStgNiDSSVBdZ1wbAQgxpEuE4LkYIUp2hjZ3cSiVpt9vkWRvl2qbQDxRJGJPmkXUCMDaVx/etQjaMIjAKxxVIcrIowg8qCKlxHUUaZ5z18Y+xcsUK4jjmwYce4ue//CmHHnoQ7/i30wDBzp3DfPnLVy5M0AWGk048keOOeyVpmrJq1Sq+dv7X2LFjB5/9/BeYa85w3S9/RVLQO4yxzaTd5Kli0GFtqaRUPPHkE+y3z348/eyz7LF6NTfedBN93b202m3SLGeu1aRaqWAFYRD4HnEccvEll+F7Ph/9yOnce+/9xGHM5qe3I2RGqeQzNjYGhf2MFIIkrtNu9qI8F89zCVJFJysuaAGSnFzHlLp6qHcvJc+tfR3Cpkrp3JBpsLSu3HokOZmtsXnG+9/z76xesZwo7PDQw49w7XW/47AXH8y/ve2taG0YGR3lkm9+y06QdU6rE/LWfzmVl7/kSLIsY9Wq1Vxw0fncc+/9dpObCxyhSLOUcKSFWCRxSin9a2vs2NxCyA4SRbwjQ8cwsGwPlvatIk1zfFXm1nvvIo5aOJmD21Viv71X87IT30m0pc19Tz6AO1BnpOHSnp6lVE7o7etibFTiOJIszch1ypFHH4njSs791CfIc5vv/Zc7b+O3v7sez/X59re/zS23WOh6bGyMr3zlPEBwycUXYWQFrQM7aSxmWJ/4+Lv4wx9+wdfO/xLGzCCl5JZbrmVkZJwvfnELMMbzqY433fRrbrjhl0X9WwJkxZ39A27Q/+vDFvvseTyjecu/+f/Po41a5y/YaM8jYGmaIqVaCCloNtrFJsy+gDhOqFRKBQz/zx3/dEPZDtuWHJrHOI6HkYrG9CzN2QbC5Hi+Sy1wEa4HQlrBC4YsTVFS43iQxZDEEpHu9p5LU43nQaVmjbcffughPvf5zxSqO3sxRVHIl770RYZHhlm5chUXXnAhb/mXtywswvMwxEc+/BHe+c53MNdo8IUvfIlXvOKV3HrbH8m1hdzjJGW/Aw9k771ejCiNkod96ETy25/cxoc+fgJPPbqVLY8aHDHLO971Fu67dys6PRAcTU9fF889t5NXxMez/779NOeg4ncx3h4hjjzIGygBSrggcySaBUfDDITrWH6hUtZQPIcoj3HdgDjT+MpBZII0Akc6eIGPFImNXZKutbBQijRN8QMPoaTdWRScUyHKGO2gNThC0t/bQ0pGmscEXoDKNY25iMCvE3iK3AT4bp2Vy+tced0t9Pb2kucprWYTpXy01iRpVOxwMoKSJM+tDUKWJSjpobUmKEvuvPUx9j18JWmaYqQD0sF1EjpRTJ46dsBmfKs+nm2SZAkITaVUIk8NOs4QMkE6HiYX1ppHWL/RTtgii0IkdleVZjYOMokSy/XMXkh4NMChhxzKfffdR73eTb2rzujYKNVajaOPOpq3vvWt+IFPuVRGSsnjjz+O5/ncfdfdDA0OMjY2zve/f9U8ZkF/fz/NZnPhwr3pppu46aabOPbYY3n3u9/DBRecz3vf+15+/OMfv4C/B7yAnvGPjt0Npi36QhiGFg8yPj5OFNmmzn62MDU1y/T0HB/60BlEccSqlSs4+uij+fAZZ3DuOecu3Ne+++zDAfvvz9jYGLt27mTrli2sWrWKoOUXzatgZOcOPv3pzzA8MozveXzjG1/niEMO4cYbb8IYFxjGcR36BwbY+ORGfvj9C+ktV2g7PitWvIgly1fwon1W8/r/+CAnnXIKP/vupdx31xaUejtCqgUj8q1btnDEEUdw+223z4MKfPlLX+ZjH/84Rht+9vOfsW3HTowxtFstwrCzwE6ZmJgsJur2B67nkaUZcZzQarWZnJoqfEpnieOEnbtGCTvthffBcjAFn/ns51BSct55X+LWW29DAGVP8fKXLKPc7dJTG6LqVlC9gnrVpxRU6EQxvV2LKYkuYtMmESkYF6OqPLduZP6Sxvc9oijksCMOZGp7iyxLLYdMe6RRhklCvMDlsJe8iD/f8Si1njqddpOKG+DmOXk7ppNO4BiQQWCFbnEO5LbBQhBFCX7QhVGQ5AZpQhzpMTU8jV/yUbJMqOGEN72Eqdkt4E+w7akxOu0QP9BMz0wyNTXDin4P6NCu72Sn3EJWEqigQiYrDOtNuIslhoj1WYLxSuAn7Lf/Ghbd/DjjmyepVkskaQehgoJcK9C53RDa0ASLG3qeh84S4iS2SUR+QBQJ4iTGK1es5ZtOyLOcyGgqtYB2sw1G4ZcCHMfGW7bbbWv6bUmCeK4gTzLyLMb1PPJM47gOUgouvOwSNm3YiuN6BOUaGzZs4eMf+xi5Tjn33M9zwP578/gTzyxE0/3857/k9ttvx/c9Lr74Ih599FFKpRKXXHIJm7dsptPpUA6CF9gBuVLxyU+eyUWXXYaU4LsBQkiuv+kWzvzYRzmtVOLRxx9n6+atTE9O8/nPf5aXHHUUQgjOO+98pIZPfuIjXH75t1izejUfPv0MhBD88dbbGCmUck5Z43guyoXeJTWqpTqCgJIX0J7zmZkoo5S1oPMdl0jOhwVYmkrv4DL8oE4YhXaKK+RC8zsvoHUE6ML7VWdWEFvyXXxXctk3LmP7jmEwlgLzyGOPcf+DD2EywzlnfYJ99tqTp9Y9jVIOpZLkV7/8NdffcDN53ubKy7/DX++/D20E2qS4ypCnmixROJFPey6iFHYjWjm1DNzAJ0kVRkDqZvQv6+axjY8xF81SmxWYAZ9Br4v1Tz+B2yNpzxruut9laX+ZSvdqauyB702jVIBOPCYnRtBCYLCv1fc9XrTP3mzY8CxdXTWLbOaaI1/yUk44/jUALF26zPYQQvDMM8/sJtkXLYUQNuUmN2DMKJd9/QIwmfUTFdbCbHR0GDAMD+9iXtxCkbg0X/q1hl27dlgqR7GxmZ8snnbaaRx22GEEQcAtt9zCddddhzGGc845hxUrVvDcc89x+eWXL3CXsyzjoIMO4txzz6Ver3PyyScXp6jgq1/9Kueccy6OY6eTfX19fPGLX+L00z9kaQzFE9pjjz14wxvewKWXXsZb3nIqcZxw4403kqRdHH/cyyiXA6655ma0rvHpT7+XSqXyf1zD/vb4pxvKvnoVR0C7U6IVpRx24P689VNvQEjB408+ydU/+F9edcIrOfFVr8JVDlu3b+OrF1yMQnP++d9m2/b1vOhFB/GH66/l5luut/F+wpAXSts81ziu5YOJYvWxwgLYsWMXlj+p2LplC6WiGdBFF44pxszCTrOkkJSCgKmpieI+BFkmacxC0klZ/9RWtAm5/7YpKqWAXqfEE49OMrw9JQ8bzE3uRMsGwyM9SDFDUC5Z7olQ3PLbdaxetZwWITqfRhGRd4ZZu08vwlG4UqGltFCcLNTlyp6l3T01kijGmAwhJUHgkWMIXAelPZIkw5gcV7ro3KBzaylk9O7mxNqZWKrAvKLXxlQ6NNtt0iSi5AmSTpskSxCuIZeGPM/wlSDPEmZbMyBcyqUe1m8aZfPWKRb11ui023ie9ah03SJhSBpynZJmIKVHpx1RrkriPMN1PerdZUZ3jPHkA1s48qUv4pkN6+jpHiKPU3zpUPID0ihGYnAQ1Bwf4yg67Zi8k+NKhZIOqbZE+jw3mDxc4JdKYSj5JfIoQxnQWiAyEBl4ykX9DeArwJLMgeFdu5gHalqtJu9733s559xzyDPN4UcczupVqy3PNIlpNpuMjY0v3IcBhoYGAViyZAkAu3btYmBgAIDNmzfzmc98hvPPNxx66KEce+yxANTrdY455hi++MUv8uCDD6KUYvHixQsT5XK5/ILnCnDcK1/JaaedxsUXX8To6JidjmBIM2sVNL+51EYTRiGVcok0TYtzQzNvd+L7Pp/85Jl84xvf4MtftpybeW7bfEMsBGzYuB6kRQqSLGbjpg0sXb4EN5DkGQUHOCaOOiweHKRUTlBViT/X5s5rvkPXQIlrmhrXXcrRb3otb3nPxzjkpGku/eyWwtzZXsP33vtXXvKSl/Dd715JlmV84Qtf4M477+Sb3/gG69evp9VsWnW0sNeHklYwcOaZn+Ciiy8pJkT2PP/oRz7CiuXLcRyHm2++mXarRaVS4StfOY81a/bgwvO/wk9/+jPuvfc+zj33bC644CKOOupITjvt3zHG8POf/9w2OY5Drd7FngcexmynRTPKGZmaQO9QdKIYZTQrl/Ry86Z72LRrlHq1h4ryGG1OEWiHOLXFdWriEAvLmxy5eYYu1cNkbCk+cRjheBLla6QrUG5M1omIpCIJJUkYIR2BXyrTMSmB7yNETtX3iBNtP/+iHlZLJZJQI/LU+sR2BSQJ1LtXMDc7zFw4xeLFq5kciQgzh2Z7K8vXruTRh59l7dqlNJtj9HT5DC2tsmldShi30Z2QNAmodfUzO9tGqBnLHxMuWdbCL3fotA3jzhx7rulnenMbqbJiszdPTZo3hZ/3XrRCTGFs1rrr+4hckeUWkhZC2ZouFLgOnzzj4+yxejUI+OqF53H4IUdxwvHHU61W+f0NN/DHP97MO/7tPxgYHKCrq4sf//RnnPSqE/j6N79FJSgzO9ewxt5a88mPfpwkibnyqu/x7LrNlCsl6n39pJElcI7MC7yep8QbG5tgn31exPr1G+h0OrRbbd73/v/ECwJKnk/JL/O/P/oJSIGjHHSec9nXv4GSCtdxUI4iimJmZub47H9/eTfFyRiazRZnnf0p0iTCdaw1nOf5XHzxZQgB27bv4Myzz0I5btG0KAyGam8XMle2cceiVu1WkyxJac4oGk0PkWtSAxWVgHE5+5wPsWbNSoQ0XPKd77PfPi/itSe8glJQ4jc33MStd/yFd7z9XxgcWES93sX//OyXvO6E4/j6Fd/D9zzL13TsWvqxMz5ClMT86Cc/ZcPGjaRZiusptNQICVMz03aCrDykCgi8hNmZJgcdfCA7d+2iHFTIEk27o5BIstjQHtdUFgmkqCCDlCg2dC2tMDPWxnPK9HSnxLFheP1m+vsWUfYrSK3RUcIMGzjlX/dkWjfpUjE/+sUveeXJx9HdLZkZaVOvwa4tM3R1dREEZRQOuTBQmJVv3bqFgw46lL/eezfSEWRG8+ZT3soZHz6dPM/5+c9+WWyOwFjjMoywgjJpxMJQy3rpihdM+ezpZIccz/86yyw0buZPt3lU9nlRpVJKenp6KJVK3H777Vx77bU0Gg1++tOfcv/993PkkUcyOTnFBRdcwDnnnMPee+/NU089BVhkK4oizjzzzAUu5MDAAPV6nU2bNuE4inK5TL1e56STTuL+++8rnu08xx127tzJ6tWr+fWvf4Uxhne9690IARdd/Bl6u7uRUrDumY1s3fEcn/3cfyNEFx/96Af5Z45/uqFsdGyzMzEyxeJFdd7+tjfx+f/+IpMzlv/R6rS47Y47+N2NfwAj+M/3vp9XvuLl3PmXWxHYNIkzz3k3WsN73vUBNmx6hgcfvguRzxNiBaA5+OBD+cEPfsTo6CiXX/5NhoeH52kr5HnOK15xLM8+++yCDZCNWbMf6Ncvu4z//d+fkiQJDz38EI8+9gjzEVlZ1uH9Z7yE31/7HHvv43Hwi5cyOjFOT71OOwz58433s2xFL888PY6SDmv3WUSp7LNtS0bUbgKG/Q+KWffArcixblTNpdmZ4nWnHA/lfuLMZrNqk1ulmLJZ2sqxOwNpHPIwxiQxSrk2LN5olDHoLCX3XVAZRiYIx8dIRW4kCoFSVsE4r7yG3RZFQlqI0GiJEh6uDAg8hywx+F7Z2hYlhtlokv7uCo4LUvZiVERvvY+ffv9B9EyTuOQjhEKhyItc5HnLIGMMcShx3BylfAQCrTukxuAoqPf08rsf30lv2efFh+3Hrh27yHROUkD6quxgspCq089AuYojJImnyXRCLmKy3OCJbrTOQBoLseUZWhcNeQLaKRVCJ488zen26hhjKKt5j8jdl83TTz/NWWedxY9//L+02m26ajWrdi9X8Fwf4xhOOOEENm3cRBzHxXk0zyXcTd4fHR2ju7ubbdu2IYRg+fLlxLH15xwcHGR4eJg0Tfna177G+vXr0Vrz+c9/nmeffZYHH3wQKSVLlixhcnKSRqMBQrD//vvvnnwAvb29nHrqqXzyk5+0XDTgRz/6ERdeeCEjI6PMzs4+75XZZu+iiy5i+fLlTE9P8/GPf3zhvj7ykY/wm9/8hmeefbaAWZYjpWR6ZoY4SazgbZ7vXOye+/p6OfYVx3LOZ87BoNFaLjxe2Gmj05ijDj+CP939IK88+eX0r+pn7dpBUA22PLyV2370LW79yZUc/cp3ouR+CBHhOAqBCzh8/RvfpNjrAfDTn/2Mn/z0ZyzEISrF//74x8VUxIpavnb+BQsc6HlKywUXXLgwOZnnTDYac3zsYx9DShtuMF/u5wU99913P/c/cD9FhShuY5gcn+GXP76FshNQcqC7KwDjsu+eKxga7KXet5hSXuOQJSHbNm9k6YolDPYdhc4yxubs877hDm8Bxqx1KVzPY5HnI4GXH3wipWCOTjJH3+Ai2u0p3vqy1Qxv20kUa+aaTdpJQnsuJxPQSIu0FKnAs8IN1ykRxS3StsL3qqRpkyDwaA6nKE8w2/Z50b6HoEWDen0ROzdGuKVeOmkb023wTMBzj0fMNic54oi9SUNJFHfRHOslaeQYU6IxHBEEvWCywmtR48g6kVEIBbWBjAP3c/nz9c/S5UlI7IZ/3mZFCIMVY2cIowCNxnqKRlFCxS0V3HcDwlhfYJlz9BFHoXPNOZ/+FOWuGr4b8MBDD3HTH29nqK+H877yZW677Q6MgbHxCb52yaUoIXn6medI45TZ2TkrIDOGK793Fc1mi1UrlnP2Jz/ORz7+SeIw5ITjXsO/vOVURkZGmJttLiBZ8+cPwPHHv5Lbbr/dijOkoFTyiRLbxLmOU9jQGRwlccslsjQnS1N0nqPzHKOhOTuH61kamNE5iJyo08H1faq1mt0QChsf6nhWGGeMsXZuaVY0PzmIjJ0bniaajPFLAe2oQxi2KVdL+J6i0ygh2A8ciWMMqdYcc8wRaG04/UOfx68a0rjN6I6d3Hj9DShHcuU3LuG3v/ktSRiya/tOvvSjnyAdyebnNlPxHVxPkWaaqNXi29/+NjOzTZYtXcpnPnUO7//g6ZTKZU486STe/KY3Mjw8yszkNDo1dHQCKsdTHvWeOi9/2THc+9f78StddHeXKbX6CsqUwNclZiYE9XKTVjsjCLpoz2YMLqmThLDXQXvx1NMbOOmkf6erJ+He++9hxcoX4Zo6h75ykDX7ebjjAUNdZQ7YN2bztnFe94q9WVoJmYtmiaKQaGoMUktHkIU/pFSKhx65n4MPOYwvffkCsizjgksv5O677+Gb3/iW3dC2Wgu1c0GsaDRnn3suF17wTYzJLEfVCAYH+/E8l9HREbuJ+eQnWbt2LXme89nPfpZjjjmG1772tVQqFX79619z/fXX8/4PfIDFixfT09PDlVdeyRvf+CYuuOB8hBBMT08vNJirV68mjhOGh4cZHh5mjz324Kabbi5q2H0ceOCBPPXUU/T19TE1NcWOHTtYtmzZQg2dmppibm6Oo49+Cddeex2tVpNdw8McfPDBXH311SjHwRRccyHhda97HU8/vY4LL7yQww47jHe/+118+9vf4dzPf5njXnospVLAs89t5OXHHsRbTnkjZmFN+H8+/umGstnuMD0+jUOb4044mbvv+SvbdoyASVGeC1KwevUevOedp1GtVKhUKsRxwp1YlfVfH7iDJIY8hx/95HvoXJImVunteJDnhufWP8fb//WttNstXnbMy/jCF77EBz5oeW3CCJYsW8qHPnQ6nzzrTOZtgeYnlI5SvP4Nb+Cd73wHk5MTfO5z/8WJJ76aW265hZnpNi95xX7ss9++XPvzEUZGI07Zd0/WuKtpthO80mKOfdNRbHlqjrG5mylXfE55+4m00w4//U4PxmRoDC978xHoP7dYsmiQF79uX6ZmZglb0O5Yv0eEwHWtOhMt0GlqVdnCIVcZOs9wfIjSCIlPJ4kIXIdSyaERtzA2NwBHS/ySixJ2VyOVbaxc10VKSblcJooi4sxCwTYpro1SkmazifYCq95zDa6jCTxFtWcZrXaETBVTYyFT06PUqw12DM/Q1VcjakW20FXdQmBl/em0Jd7gOgFJ1LQ54rlNjsjSDEyO4wRIX/KTn97M3OzJLF3SzbI1vYxNNwlFguc4pCLH+IpWFqLjFMfxSUlJ8tgq/5XlaSoHXAypnvfG8uxUNjEYZRMMMp0T69hO3orOaLdyGTZu3MCNN97Ad75zBXme8+CDD/CHP1zP1VdfxXnnnUen01nY8WnrF8W8dynAB97/Pp55Zh133323fe2FmfNJJ53E8ccfT5ZlzM7O8sUvfWl3QRK7M6PB7kIHBgY455xz+M///E8AfvmLX9Dd04OjFCeeeGIhVhnnrLPOIksz+hf1EwQB73znu9BaMzAwQHd3N6OjkwvNkqMUF198MZs3b+Yd73gH73rXu7jooos44ogjGBoa4pLLLmVR/yK01uzYsYOtW7eyfPlyOu0OnbDDvLcqGMqVChdddBG/+NUvWf/ceqwVoLUAE8WUfWx6ioGVq1i2ajtzc032OPilzLUi9h6qs/Y/Xs4Rb0256/d/5N4bfoOUvQh6rHEwgiXL1lAqOWghyXJNlmvmw3LiNLECpDxHCI3JcqTIcZXBcT0QduLVaMbkOkVgihxlgclzHGWvsyQpY4xbvD8aeD6JvGQFcUVjIApC+pLFn0Q5Lp5ykQJaTYl0FQ89LdFP5cSdBopuMF102jXWbRaFPYpHmNpHmmoWcgcBf15XYa69+7O//6k1lH3rEmHWYTnBRb4zriHohQDo0VY1m2YpaZaTpAlJmhIlMWE7oVTxkVLRmu7Q21sjDVMqjn2cqJ0yvr0X5cLMLsGmJywXdX4s4jp7MjExR6l8GE88GDA7E9JoDHL7b/pwPYp0S+uQIZ63kbKwsK2tTy0RnHbaOLW6Rxwb3JJBR3biqPVuGzdrG2S/j/MYTzl4jgNaFYpageu49PYvIkqqrF2zB5u2bKbe10eWajrtmIMP3J9T3/QmlFIMLBoohGwpGzZuoOQqtBb4viBXAsd1ChW3QOuErqrP9OQkAkFXtUKa5fzxttu55Y93cMZ/foBjjz2a226/1xpT6+L1AUcddRRXXXWVnSIZQztMQOeFaEqQ5jkmh1QbstwUTbch19JC845TxPFl5HmGo6zFi4WEKyAVaaptdrMxNs5T5wuogkVbC94xgizMaHXGibOAwcXL6Fu0lh27tqO1pq9/iMakIssSfOUSm5yVq5byyMNPWoumdgPlZhxx8It5y1v+BSEkS5cspuY7+J5g06b11KsCKRxS3SRsS+JIITC4nrWXi9ttxsaGEVLQ01Mj14Ibb7yFG2+8mQ+f/p+85Ogj+cuddyGktcNLsoQs0hx68MH88Ic/I4oimjqmUhKUfI8ojkhc6K246DSl2RSkaZNarUb/ol6Gd4zx8P3PEgTdPPrAQ2zdthm/u8pcY5y50THe8O+vo9kaR2mFqghWLE+59eFR1m1dwWB/HR07bNl8h3V7CbrJspR5hxetFbkO+M53ryaJItI0o6vezS233MzvfncD09MTGCzC+cMf/k8x1LJONRddeAlCegV/cn64MAHA4GAvL3nJS6hUKpxxxhlUKhV6enpYv349d955J3Ecc/XVV/Pwww9TrVYZH5/gu9/9LqOjYzz7rFVlz/v5zttt/fu//ztHHXUU11xzDUmSUKlUFrjg7Xaber0OWI7kvEgmiqKF4dJ8gmAcR4RhSKcT0tvbg+M4TExMWE5pgeba01CTpknhAdxgr732LtY5yU033cH8hv2RRx7lz7f+BUMXZ5zxfv6Z459uKGdHR6mWXPxKhVYnIshByhwjBCa1JN9zzvwEn/rc59iydTtvesPJLBlaiig4MGEYUmzkENLybzy/uLaNRAhNo9lBZzYf9q677+Sss84toG1NravG+V87nwsvuoDh4V0L5NE8t3YRa/fcE53njI2PYbTmzjv/wqGHHsrNN93M3i/al2NOPJFvfvOPlAYmSZTDty7/EXvsvRyDQ99gmUWDQ1z3/Xs56qXL8dwyV13+W07+t9fQbMc40kbvJW2HN7z5aL7xydvY5+AhSiWPlkrwuls4xiXLIxIBBBolfUrKQxfRWKpURWQ5uTE4gSLKEuo9XWRRizBpUKrUUKqE41QQWReVcoBCkcSp5RKZeAH2llIS+D46TSG3k0vPqRBnDXI5Q9/SxWSpw1wnYWKyw8xog/b0NqZGp4nbDs3GLMKUETLBZDmVuosXmUKR3C7G+Bohdvut5VmEHyggI/A9wjDDiAzhgOtJuiqSNK5x3e/vxHFTTnjNkfQt7aVU6SLKMjp5RCNukDsdPEeSZu1CoQsCD0kHtEbHunjs+QYtwkiBMNbfrtG2usYkSXAch7lIY1hsn2fxv3anza233sadd94FGEZHR+mq13nooYe5996/EgQBu3YN091dx3EdPvGJT6C1YXBwgLGxCb7//asWYGQopsFCcPXVV/P73/+eNE1pNpuYgjM2D4Uopfjyl7/MXnvthZSSiYkJLrpot/3N29729gWce8Xy5QXpuczg4BA7duwgzTLiOF5wTeh02tTr3exmW0JXV41Wq0WW5fzud7/j17/+NRdddBGHHXYYe+21F9ddcy2O49DT08Oll17KhRdeyIYNGyiXS3TCjm0gBJSCEl+/9BLuvPNOfvXLX2HNgFNcxyMz9hzL04xdw9tZtc8BLO1eTuL4RM0Z+hcN8GwzJRhPSDpTHPSG19DffzB/+GFGrpMiy1vTiXLCJC+6F0lu7AQfdnOUlCsQWpGrxKrMRUCrE5OlKXmWFv6gBm00DlbBq4UkTTIQLojnlbAXIlKATelBW09cpC2oWQJZnJCqjFIpsLyrOMVxXBAa5Xn29gbqlRqO54DJ0JlBte05OxOGuz/3wIFOujBV1QiSrIBeEOTGkMaxbT4Kfo4RuykP1i3AwfE9ShVBjwCjBcoRhFGEYxy6u+rEaUKWajKdo5yAJNGUZAmdJ3iFE4KQkiwDz3GQQuIqr5ge2sZRKVt7jSl8GeenvgVGZ21XrD3Prl0OU3N1ql1lZmdClNNB41ohR9Gcz1vHGGU3EY6w14sKArIst5ZKWPSm1WqRJiHbtm3j0EMP4+Zb/oznuuQm4T/+7e2ceeaZtJotfvOb6/AUeK6iHJQs71LYqah0FJk2pIXCvFQKaHU6dNW6EFIyNTNnOdiug3Kg3WkTRjE61yglKZUW0W4L9n7RGrZsGSZOhOVuG41b8hGyi8CtkiQZUmVo3aK7WqKTpvQMLGHtPvvTVa0zOjJBo9UmiiKMyXEEhK02frnCoFthvDnLyOSojRQWGXkYopOEgUV9hDHUu3rBCZmYbDG4eAladBAC1u5zEFlukK7DyGSDMPbxPJ88L+N6Aa6r6EQRUii27xjl8BcfwN33PET/0BDlwOW973s/X/vaRWg0F194Pv2DAwSlKkGlh3KtnzzNicMmCInvuTZNzRiUSan399C/aMDGBRorqtXkaJ3SaDYJw6TgR2eY1JDlCXvtvRdbtmymNT2Gclwcxydst3C8UiFG7eAlHkke0R0sIsqnCZsp6x6ZQKqcapfDYG+VZ5++lSwcoOKByCKcrhK12nIy0U3JTXBx2HeNzyNPP8XmZ+/kmSjHo8bSwX7Gdw6j4zYCB6nsJDqKO0QTGV3dPSwaGERJh8BzcR3F7EwLR1m6ROERsHtuLebtD/OC82hJeAAve9lLWb/+GarVKnfeeRda5zSbTRqNBsceeyyf+tSniKKIJUuWkKYpjUaD5557jtHRsRfWpCJ9bmhoiEqlwhVXXMEll1zKFVd8h5tuupl2u73g31utVpmbm1tYhyqVMs1mc4E65bouixcvRgjBX/7yF5pN6+l77LHHcu+995Km6UIzec6553LBBRdw11138YUvfIHDDz8c13X5whe+CAI810Wp3VZyzPNz/76s/h+Pf7qh7OoKCColZqZDHnvsUT73mU/zmz/8llazTbVWpdFqEAQ+k5MzONLjNSedxJNPPkVeLMw6hyS2hUhJENIWsTQDP7BxbYv6BuxOwAgOPOAgZmdn0Dkox+UrXzmfX/ziFzz6yCPMs2bNfHOqNRPjE+yxxxpqtS6ajTkOO+zFbNu2DYAsg2a4jHd/8BzSZISoM4I2s0xPT9LptElmp7nvmREG+2F4Ypha/zIOPGaABx5aR5L0kUvoRII/3Xwvi1Z4LD/AcNONDyG7y5hyGz/rpb/XpVwtYaRBSIdGq0Gl1lU0HT6ikeC7iuZck9lWB+mV0HoGmUXsu+cqJqancVRApeJg4pSpyRlLDDceeaTBtYourTWzs7P4nkeWZbhCErgeeIJKrYfOnM/WJ1NGtk0zvH2CmekmZeOj/Jyg5FKSkp6eXhJt0I7A6DI5MwSe3fHnucD3S89TgsUFV8/FkCKkJs+gv38pnWSWLG/jOj55pNB+xmC/D1LywD0PU6mXePkrTkWHEYuqgwxvHmdidguVShVjbJNe9hQyV+ROG6VcdC4gsxMALTTaJAglFvw955s4RymESGm35hVs8xNG+7zHx8cWVPBSKpqNFs1ma4HTAoaZ2dni7+zqPjY2Ufxu97Rp/uKfbyrBUi8sVM7ChR4EAe22VZTP7xjTNMUYQ1dXl4W8Kbg0mW2q2p02zWaDH//4x3z+v/6b7du20dffB2JmgW9p+ZT2pfX19TI0NMCmTZuAArbftAmAK664giuusF5mq1ev5rLLLuPss89m8+bNrFixgqmpqeJVG+q1GhdeeCF/ve8+fvTDH2F0jnQUIK35tNidGjE1Pkwer6GrXOah7VsZWrYfzz07RVNP0V8u8aK1a9GxIRhcyqqVHtu2N8jzHMd5L694Uy9HHLqSbGaaJzeNsXOqRaANUdJmtDGH1wqJZyeZMOD5Gpl5pO0pnHqHilen6jogKqiSoadUI+hzePzxKaKog3RSTGOQqHEwORqFROgmWbaFgrQM7It0NBUHWnFYwPAwMjk6n6xgY9oKWHY+XQhtpxzKETiuj+e5+F4Z37VxcwA6sxM+A8TNGJ2KhcLbmQ2JREqsc7TQ5MaGMIAskldEUa8teqMLL13X8xZI+0IoHCWtojcoE8cZRsPk5DRagKMckjimVK6wqK8b5SjLzTYSZLZAq5BSWM6fkuQ6KyxF5jVnBf98gS4gEGY+y95GyXl0s3TpCsYmHqa7VKMTJ0hHIQouojECIVQxaRNIo5GOhXMd6WCMLG5nCt9Kl/seeJjDDz+cb152ATrP+dqFF/LAAw9x2aWX8exzz9JqW89bjSBKc1rtEIHhox/+MBdd/A1c31I3BPBfnzuXWq2GkpIrv3c1Ughed/KrOf74V2IM7No5wr33PowQgrPO/iDf/973abUTjj/updx22z22wS58NPsHltMKW4gkIuj2UTJg+bIDmZ1tMBSU6VkySBqmrNuykanx6SL9SZOlESXPZ3HfEMozTOzcQVD16XZyent7yHNN23FJXGGnmSZlYnIWN2jj+3Vmp2bRQqNEwPSObaRZG+nYsBaFw87pOUy2DVhMUKqghEAKxV13PcgRRxzEty7/Eoacr1zwdf58572cffYn2bxlK81mi+npBp1OSNQJSdOENEk4++Mf47tXfofR6QbGuFRKii987hyq1S6UUnzve1cRtlu8/vWv57hXHovA8u7uvPPPaK0568xPcMlllyEFHHfssfzlrntwSz4I6+EopYOWGpNrXBkwmY0ROAqVj9tIQjSpblKtVmiFCVu2T1HuWkJbzhFlGlIPN3D50VW/xC1DX3eVydDl1Ncfz2sPEdz64H2sWFVDtzOyTod6XzdTM3XmrQYFEs+113OtVieoVGjMtYg6CeWSx+TUOFESLnAkd68Iln9p64DCJmTs9rFetWoljz32MFu3buXwww/njjtup1qr0Nfbxwc+8AG++tWvsmHDBn7729/a+FfmbQ7//kjTlJGRETzPY8mSJQupdGHY4bnnnuPFL34xjzzyCIcffjjXX389ABMTEwwNDdHd3V1EOlq/6+3bt6OU4oYbbli4/xtuuJHBwQFGx8bmyw0XXGAnpJ7n8ZGPfJQg8BemqEJAueQxR7pwrbquawcA4h+9gn98iL8lmv6fjoEllxmbfXwpyJwTjzuON77h9eR5xsMPP8J3f/AD3vi6k3nrqacyOzfH1u3babc6fP/7l3PxhRdy5VUXs3nzNrTOeed/vJ8NG5/lnr/eRZZCqQyOIzn+2Lfy+pPfQJblhFGHr192GRs2rOfEE0/i3HM/XTSItmn4+Mc/yuzcHBdfdDHnn/81pqYmOeXNp3Lqm99Cnuds3rKJr33tq4RhxMq1e1FbuhbfH2RwaBlqD+gmAAEAAElEQVQoTXfvIpYtHaB/oETUmSOeG2Fc72Jiw5Ps3Pokvb1L2Dnqs/OJV9vJitCs3OdOssoMg70VRDxJ1AJV9jGBQGY5rhugw5BWs0MkM2jB7JykXtVkBnr7KuRujhdB2MnwPI82Ka4jUXmJkoyIjQt5jZHxnfieItMKt5XhVVzmEkFPycHkNbJsFqfkIExKhMSLElRQohE20dMxXn8X1aBGhqIsMmSeEJZKJHmLaiVi+ZK1jO6cJOoIlJOhAp/AkSgnpzWXEqY5iS7RV+3Qbrk41EE3mM4ySEOUK0kchcoSPAK0K5EmQqsA0ATKkDk1DjroNeStGWaTjMmpTUyMb6HsKZQMkG6LKBZI6ZCmMeVKiTgOcV2fUlAmTXOqZesvhttGmm6b5qMzslShEMxOrWLr0y9ldzP5aYSwKjql1IK46wUn/fwV8rwLRWtNkhQWDAZMMUnr7++jq6trAUIeGBggjmNmZmYW7rdarTIwMIDWmna7TXd3N1u3brWKfN9nYGBggbw9NzdHs9lk8eLFTM/M0Gq22G///fnSF7/IeV85j507dnL55d/kwgsvZGJigvHxSeLYPq9DDz2Ec845c0Fgs2vXLr7+9a8zMTHBsmXL2Lp1KwB+EPDrX/2KSy+9lO3bt9PudHjve97D3ffcw91338U73/Vu3vvud7N58+YFGOT2O27jZ7/4OUY7wBk2Ek9DpeuPvPf0UxiZHmdw9Z7sfdiBbB0eodevsbM9xbI1+5GFKY/f9Ax3X5cxMRKhdY4QU3jVm3nz+45nr8X78sT9TxA5EZOjYwx4JVYcOsj7/u09/Oryr3Dln55G54pyyaNW60Ynlk6IFjgqIIpbLNljKWG7w8ZnN+Dk4AqIozKt1iuKD0yi82+hjWB3Q+lRDTT9i+rMNpt0Oh2b+kKRpKJsglCWphidYYwAaTlvWmdIYzCy6Dczu7CookHK6VpwcSh7OZ10t2+mozogM2vNIhwrWhESoy1UjxJFI2vwA5881/QvP5QsERjlEM5upRyUaMeGOGkSTjaoOL3stc8B7LdyiGY2wS0P/Jma28MRhx7Kq155KFPjOxD4aO0ThVPUF5f50Y/u5MWH7k93v8P0TMjsxAgrV6/E98s4jkfgOkCOF1jhH8ph14jkj9cLUA5a+Lz2jYY/3vYNhjdvp9xVQmR6IXJxwYBfigUID1cgtIPILU0ozZYSt9+AkAqv3EWefgPXKTaGgUeWp+SZRqAKaoP1xatVq8RhSJZlLKSlFYOE+Q2gMQZHKdJs3o7FXtuuF6Bc1+IVuaLTtjZtUgiUiklSd6ESKKeJUhnGKJauWMNMs03gO7SNZo/91tAcnebIA17KHmsX09Pl0mnnjD31ILt2buVPj44RlQxLFw3iK8l0NEWFbtbuuYpNG58hbDeLxdkh0xptMoTWOI5LFEXkqbYNljT4voMfuPT096ETzcT4OBhBo9W0GzSxgjh8Da7n2uSq/LvEkVoQfzh+SBpHqEIAKoVCKEsd6equ43keAgg7ISZPSeKYJMlxXOukYCklmVUyFAgFFCiCckgLb+KFwilMEZdp1eNJkuJIh8AvI/gIyhXMzs3S3X8zrhqxU29haSZzUYSWHXzpkOaCcq2OElU6czOYTBBnTXp6FnHUy4doN1v4gUtzLuGU132YKJnikUceolzuYm4m45lnnqZarTI7XWPb1oNRyiXXGcedvI04nGS2tZj60D689U3H8sYTVrJ9+yTnfflzrH/ucdCGJEmIk7CI1rT57rkIwF1DZ+b1ZKkiTVyMvoyXvezFrF//AGNj45x99lmsXbsWx3E4++xzeOMb38BJJ53EE088wSGHHMJ73vMe3va2t/PMM+u55557ATj33LO44IKvFe+rtUKbF91orbn55pu59tprWbVqFe94xztYunQp69ev59JLLy3+3k4YV61axWc+8xnWrFnDunXruOKKK9i4cSPLli1jZGSCLMtYsmSQkZHRIqRDsJD/Zwwrli8vggkUjqOYnJoiikLed/b/8pv/HSPXhjzL6V/yAGPbt9NuvoI4/Mo/1Vb+0w1lV99XjVQOcXgxpoCusizF93xEoWCaN4O1focJUkLgJgSlGNe3556SNpNYG5AOFv7RBq0hT+o0Z9tIhZ2EpcXWQMzvqIsnXcDdC3DNwnSq+KoY6dh/BIcc8RJ6Vq4hia3pq5IBUxMRSgZ4vqDeVabc3UVPMIheZOh5+M8cNfEMOzs+lz5zPCgNWvHKFXdw/NoYnbUZT2vErmKqP+SoyhBbp2cYlTGNnhIrnUVUNg4TH9THVI+LG0aIso8yHt2+pLtcZkpBe6YBsaS712V8UhK7M9RwGJQlKqpNJ/CJpnPiHh8RVBgeHaX53DCyJInqNaQU7FNy6K70Muq3mWi2GPJ6qdJhS5ygYkl7eozxdooyNWbShNXaYcwTTLcjsk5EHtfQJqbmBaRpSOwJTJSTTmbM1GNWqCpZ6jMnIlyZ0usrwsQwjqY7FVQENAQIT9spZtYilz10uR3cvMIeh76euekp8kQwPbOFJN+CI1xM6pDmLVBQLnXTVVFUK92MT8xgpLHJPHkO0iGKc1SeUg4CW5iMQOcSqTImdq1ix3MnLSwmleqlrNljMUrJBYWenF+EFnCNAvITBoMGJGEnYds2C01obYjjuYUCaky6W1XyvAnn33/1f/qBPZTj/s1Ndp+z9ry2cHSeW589pRRJYnN4dx+7F89/5rATWmkVuIUrgON5xJ3QNgRaL6RTKVeBcfG8M9HGWC6w+zve+YHX4fqKRqNJVOlh5fJV7NG9hF1JyMzUGKFymNhueOR3gpkxy+0JKiGucw2tiad4++kfZu8jDiVTLpuf2MxRLzuSjbuaHNyd8ctvXshDoy7kbRApYTsiiyOEiNFphuv65DoijhO6u+ukWUiSJJQqNdJ0iE7zpIWJn9GXo7WdtAIoVWLlii7caplmKyFNO7iiMIXHTu+ktNuLOErxHYnnCDphRBxFZFqTxBm5yXj7297Kq1514sJkzvNL9PZ285oT/52Km9JOPfr6evjZL7/FVVf9gD/ceD3SUZbf1umw115789lPf4p5AeHSJUu48OKLue++BzBa8+rjj2X9kzuZVVDtKTPTnkJ5HsuXrSRuT/PUQw/TmYzp7xniX055G7M7n+WWp+5m5bK9eNupr2afPbuZmpzAUYYsTFm0ZIgvn/dD3vkf/8LgYkmWxzi6i1bUItcROovI86xodG0zqIVmw3bF736zEi1cwijmi18+jK1bb+Dbl11ItdpHq9O0/EHXJSwaPsdzd/viuhITW7Nw4SjSTi+d9qkox2FoYJCp2a+BsrY1wthrLygH5GlCc2YGU1hF1btqtNudwsljdxrUvOofLM/XpqVZSoRN9ZKFeNHBCzyMdokjzyamAUq1SdJgAe4vVTKqVcXE5BT7H/Ziwk5GGMYsHehmerrDkScex+cuuogbrn8MX8zRXxmnuW47d9zwE66/9ylqqw+g0ZylPbINlVXRrsJRsNdeezAyMkIapRaN8T2EMARdXbiuolorU62WWLbcOkgoGRCGmixrs+G59UVEqEuz2cR3XXQ+SBq9tqCTSLLsCqDEfACBEA2ESTCFGNRCujmVWjdBUCFOE4zRxKG9vsA2sUkak6YZSnpFupF1EpmfnOe5LuKTeR4SZD8NKQSeJ+mEYZHeBaWgTrV2LmmWMjszR73nVpJ0C4YUV2nyNEMGJaRK6evqZfv2ERYNLcERZaJ2A+U4zM3NsXzlWl5x4hKicAYlHPr7FtNd3ZOR0e00W9NEUYvGrEeWVDDEbN4cMTJ8tPXPFZKXvnQ90+OPMTcLO0ebHP3yU1i712Hsv4/HjTf/lGefeZhK0EWj0SLLO0gpbJ3NUhy/Rq4HGB97NUIE6LwGfB1jNINDgsD3McZGSHZ3d1OtVonjmFKpxJYtW+jr6yeKI9qtNsYohoYWLXAw562FhoaGbJMvBHNzc8zOzqKUYsmSJQRBQBzHTE9P02q1GBoaYnR0lEqlQl9fH8YYpqenF3iW9rxWdHd3MzXVYPHiAcrlgnIiYGJ8klzndHfXmZmZsb1WZuki1WqZmZkZEPDpi3/JVRdvY3R0Eq0TFi15jK6aw87NBxF1/rmG8p+GvF3p0mq1yNMQTEJBASfLDI50UVKBTlFS4biSSqlSQByaLG0Qh2EBpVi4SAiBKuTbSkGWKYyRRYHLrMKu2AEX/WOxQd39uuanK0V5YWGZNkXBAmv74LhMjLWsl6ujESoiqGZI1USjmGxO4Axn7KqU6Bmvo3uWk2+cpt1q4yMhd+nkOQPtCr1TPoOJZMf2WVYOVTlitofNIyMsVilHreom1C47NmwgGYtZ2zVA0ozpSj02TGaorhb7m36GR7axfe863TMx+482mA0FotsnGEgo7dSYUQe31GGfxT1syAy9SyuowV2c1OhmV6fC3D415HCNQX+Gzv2K8Z2TvOhlPvv3LyW76TmiQYHsL9PSHV6xdYjZkmA7Y6xpJawJ+rg0mmCRjnlF0EOSRTw9aDiw10U/mbGzlHGfzjhyVY1gDn4jXLpcw6ndZZ4cy7iXUZYqxTvkHjzpJmx3NH7WYJlTY7oV0y6vpC9pEpeqdGQXKtZUlMesCZGOIgxDqn6VJE/xvG5KFZd2K6OV57Sb0yRJkdIjy7hAEiV0eyVSmYB2cH1F4Lv4Xolch9RLyxjZKItEIsGFF1/Hae86gAyPkutZq6EMJiLwdIvuuiKlhI8u4vIMRrs89MQmPvjeSwBBs5myY/tNFroxBmPGkcJGZBoDbpHDbqlnu1vDhc2Z2d0cavPCeMO/O3bT11hodIsCoc3zkl92n94Lwot/5ph3QXjeTxBG2rxaqTBFdrcQBd8Qa1vjup5tRLOM0e3b2OuAA9mxaQtdvs+mdQ/w+Og0Q3vtzdj2LRz4spdw25N3EydHUTjKkKSGN3zyy9xwxX+x4dENxK06Ndfltce9mj/eeR8TzRoPbX6QJ59qkfvdGL9MpzVHV2UAz1dIoTFGkCY55ZJHlxY0W02yLEabjDj0kGoRGHc32UE4xftk+YFSKhqRIO20bC0SPomUuFIiTG7NnzNBnKV4IqBcDoizjEwoZKlCxZEs8h2SKOT2W+/kd7+9ibKy0YsvOfZ4Dth/76Ixsc3Kf57+Dh544HFc38UNXMIoQQmFwGF4ZBfnfOpTNFst+vv7ueJb3+KpdU+hXEWegzPURU86xPTGdXYzJQzKUbTjBgND/Rx53DGse+ZJGjsm+c7Pr+DNr3sdbzvuNXzv2p/h9ij23LacA/bYA01KpzNJaWgRsdFMRDM4LZ/mXBsjExwVF9ZjEqWqYCRxHqKsfSxVtwwolOPR0x0wvmuY7soSktRKnqxR8vw5Y23aKGhHruuSxZlN0BGQ6QSnsK0S2MjQONWIXKPzBLIUKRw7PZTguj5pliKfJ3CzKtvnXQOiuJIMmDy3PHLm+W8CwXyiWkqrGYJ2rVhMzs+TZYFQ2M1EuVojTq0FEbnDnMkYWtzNxFY7wdtrzRF89sOX4blzSBcqQZ0N6x7n8edGCQZWkjQa5FOT9FZ7SFKIREauNWPTE6TSsGb//RjoH0RK6wEJVlDRbkdMjTUZ2bKBucYsWsZkeZuok+GXS9Tq3czNzNLb24/QhiwrMxc/v5mzDgPzCXLIHKGKDGpluZGVwAfp0IktZUDnlv8olbEG9caQpTlSQJ7FpBgczyFJE9tIFouoLugR82vq/EbdIAg7lrMblMrEYUSapERRMUzyFSuWGHoWDaGkT1dJ47kdAn8Qz8sYHCwx11xGI+rmnr/uIspL5HFkxacqZWhxH4IyOnfIU43jzrF69SCl0iqGFvfx7ct/yY4tEyxfMVhwsq3GwHVc3v5vp9Kz+CTOOfu7DAQBjz7+K27/44844JAD2XPtKuJQolQbx0tJwwwpA5QypEaQpBpEah0Msozn9dGMj40jVeGWUTR209PTz6+uTE9PLXwHPK+ZfP7PxhaGG/MFOs9zduzY8Xc1fHTUBle02x3a7c7f3FfBD89zBCVcv8rktMBMptT7S3g1n56ldSuOy6FvaS95YpidCMlymJkz9AytxnElqncNcbge8gzPkxjTYmwkJX9+9O//w/FPN5TTk+NUKhWq3V0oF2ShTrNrpcH6mNsdjQZSrTFpbkeumQvYfNZS4GF0RJqDjiSGzCqHjSLL4gVupdF2UAtiAV6xJ83uxXu3PtEez1845+EYY2DTlm1kagJETJ56+MEi/JKH6yuk45FmIUFJ4TZSWrtG2drvcdiAYmQ2Ys4kmEzSzBIeFJv44QM7ODBPiVZUOXltDzesG2d2ERxWrvLoE7uYCqcYeEWJ0qo+Nl73BI20yVx/ytGL+igndf4yNkczmMWblIQTIfc5ig15wKGjTcyWPXhs8xidAw1DS33ufnCUZ4zHHk+v45TeRfx83RbUSzSd2YwVT7TYtrlBS6TcFs3xxk3dLPvpRppLBuk+tI7ZspX9W2Wuv28bK/q6OGrPbry0zbrxiGPqdU5ZtpKbHhjHUWX+vdTF3HDMb58Z5Q1HvZwPVHayvuURhoMsK+esWlRm0WSD5TNw6EGrWDs3xR5ZH/s2GtwWjHDckiX0T+c8cdAKJjrDnBoO0Spp/jDcZEZEOI6DLzNWLq6xZGA5UZjTaUe0wyk6DYnnlFAlnzBsUe+t0ul0yGKrdrPNRUZPuZtmIyLPMhJhaDVDXKfE2PCs5f1hC0pjDh58bMf/j7b/jtYsu+s74c8OJz3p5lu5uqo6q6VuSS1aOSGJJATCZIMwwcxg8+IxM/Zgj2c8g+1lzLJZ9hiMDcYBY2GSEEIEAQKlltQtqbulzl3V1ZXDzfdJJ+7w/rHP81R1g430rvUeLXVV3fuEE/f+7e/vGyiBbqK55+QRBj3NuTNbnLr7AJMKxlc26S91iOKYzc0d6sayNxqztVvT2AZv28HaBlNgsAHJdBYdJwGJEa3Aoo2SnyW+zHl47QDu/A3rG8GsuPN/7oad3c/OO/RcyPAigs/NL/5yH9v55lokUrS8WKkkxpmW+xaeN6mCetNYB4SkH6zl2qWLHH/F7fSWDrCS9bj1nrsY5Tv4yJJ0b2d9aY1XnjzFw6clsjW4Vlryx//+PKb6Vp58sub8uQ7bu9v8yccfopyO2do8h5ALeL4RimD3oZRkNLW41uR9xh3db21dtA72QNY6jBBImWBdrz234H0GVHzjN34t3/Vd78V7wee/8AQ//2//K+/9pnfy7nd/NVGkOH/+Cj/9z34eYy3/8l/+Q86cOce9997FBz/4x/zBH35sPsbP+LNSimCl5VPqttB5+1e/gV//1Q8FUaLz3PfKlzGZ5ly9tknTWOqyIZYh49sKiUdifJicX/7yl/PFLz3OaFSEfVeaQhxi7bZj7G1P2dkccuzkKntuj7TXZ3lxhavFhPUTd0JyiezSPh/4rd/ibW9+Gz/+A3+LX/m9/8qZJx5j8va3cebscxy98wg71ZThFHKpOD+9jmk0xFNUY7CFQAndIlEGHUnGk5w07XAh72OFwhuLUtD4kvH+EK0zDI4kScjzvE3ZkGRZRtXUNI2d241Z79HKIbyktu2z4Bx705KlY1+H4izD7edRUmAbx3Qy4cixUywfXmPrYoiV/eZv+To+89nPMRmXgY5iPJ4aJTy1MYAEGaIuAaQSNFWN1B7pLMO9Cd2uIp+s4e276WY9EB6VPsxo9z5mba/1ozmnn/0tuv0DXD+/iV31bFxPufu+W7j9jjv45Gf+kDT2bF5u2JtKhlc3mVaXiPSAMvcU5RZpv0PRRMQDz0JjmRQ5tjZoFBuXr7K7sUVT15TTnPE0XPNIKZS2oDxax8Qqw4oVsoUK4x3FtAznsg6RuaYOdIAQMWpZWn0jzr2l7TDArS97kjKeUg4FW9vnA/fe9oMNW99irUE50P46sTH040U2dna44577uO++k/z+h/8AZz1HT91BURQoqefiwJmhtlIKZzWCGhFJemmXa5cuc/D4Ub7u3e/kiw8/yOcf3sJYR6+Tsb6+wN/80R/k+O1d4qhHJjRIS10psiSmLmpO3n6CT37m07z/V/4uhw/eQe5KxqMRr3rVN/NjP/JPePKJ5+glaywuRJh6StmUJPEgJOzZD9HtGcb5Ht6n6EhRVYYsilhbPUGmrrK+lLA5bLjn5a/BnyzZn3g+89DHWF6JmE5LqtwRJ2nwWm48IpJgPNZ54iihcsGZ5saiRr5oke7n/7l5fP6LBu2/4AVf+TD+F30JCBn8QfsDGAcrJCklWgu0DRHCWkuk8jgUdcuTnH+SDWh/uTvG1DVCeJwBY6akSY9SfPk7+mUXlFI5alNx8q6vw6UVxSinMSWmmSJMQ+0t0loiIzESUmkRXuK7Cbqu8GWCVI7v/Z7v4swLn+DM2ctcubaI1x7bTLFGo6KSpegcsaiwTkElqEyJlAERstYSxxHW2jm3JmRXRjgfDLi1DHwOpSQGkNGAxgny7Q0av08UJZh8l5F3qLhLmhwgSgbIrGBJdLArCXHa4QudTfy9K4hHOkSR4Gi8yPf/33+blf4uUi9z8PAqz26dxj59GhWfJIlSihfOc3e3S+fkApMtR35qzEq8w11HFsin+2xuxaTScHKhT3J0jWeeOE29a3j5kiFLFI+evs7Sm2/jxDuOcvH6lCt+m+WdXV711uP83pcmPO4v8tWvW0fZAU+sXOD57Smb1ywnv+YQ5t4lfu/JR8jWVlk/ZTi7k3HtkV3qpYLkjT0ef+gLbItVrmwPefORI/wfz5zjU/sVrzu5wEce3aSqarwf8ctf+gSvWV/gn37pWV6nB7xitcdjz3W42uRExYS3RSfYH1f8eH6eWwvJX737COXZnMe2BHfde4o3imXGnzvPU9sXeeCvvA3/njfy0U89wsXTY5ZWDMPxJnm+x1LvICtrS0Go5UJkWy/p0lQSfIRTNaUviaIMVIS1DVHsoAmRj1lmsS4njaP5KkNKyWQ6ZTjqMPJTpgiyxrFyoIMfeD772LOoXHCgV1Fe1PRXInb2C0rb8NnHc6wPhHnnJb2FHyGJ+ySJ5sd//DAve9VxlhdXuf9lB1vzXAFOcXl/Gz3dZzIes5enbG7t8PnHPsvzL1xhvTMgU5p9NJ1DB/nMpx+iLiRJtEjtLU7kdGJFgiRqCl77+pdz9PAS3/2t30qZpvzSf/oVPv8nz/GJTy9BywE7dfvnKcbPMBkHPmBtqmAZUdRIqanrGmfCBF41Nd/w7nfTX1zgV9//AZJ+htMSJjmmLKGlBYg4ItVRGy13BDeSc4pJnGXsbe6iG8Hy4gIXLl7kSxsbqE6P47ccJhYZH/mDP6a7fBjZiqAQMOjFSOlJBn2K8YSqqen0+uztbdNNexw7epiNrR2qcua7qDC2wbtZjjlzZMu1KIw1BqV1y4Vr/VLnA30oXG677STf9V3v5Ud+5O8ymTgWlxcAz59+7LN86MMfRQnBj/7o+3jb21/Pn/7pp4FQOP7PP/IPcM7xgz/4HZw+fY7PfPoLN4pt73HOoqWk36ovb731Fj7/uS8BHq0lP/TD383f//s/zXd8xzeio4gkTZA+cDHLskYriTU1XsCb3/QmPvaxjwf0Q3ikc9TFiKYyLHR6bNa7TIqS1RPH8UQ8f+EK3jekaYdbbruTenHI8nOn+fgn/oTrl0f8yLf/CL/1e/+ORx/6BErFvPAMbJy/QJPXXDr3LKu3GapGI5s+hdlHq4SmscEL0dYISRAyuJrpcI2iqPB4lMx4zWteSz6UZNKgU7CFnfMlZ5ZBQVMkQuSos0gXvHG1VtRNaCFZHzoCyhdMxlfAOpxJiRUkvYjhsMB39pCJxDqHU5bhZMh0WqF1Oyb4BqFjhIgQSmJNaNMb2xAREacRSseMd3Y4euQWltczHn3kOsJ5yrIMvErjkIJwvznH1Wc/x+pijBYK4TW6UkzqitNn9jh/8SF049jPd5nWQ7RZJmKPuixxqiFSCU73aGzBwQMHyXe3aaxFikBVSdOUyXQUnDkQxEqzuNLHmKq9f0P+e+PAUGDsPsomqNZlQ1rLidtu48DqCp3erXzodwCvAidXdhCqi3fB7sUQ8/RnPkoSxTgbitZ+vw846rGjLCyemsGBdcAzmm7T6S5y250vI8sGnDx1K889cxrra8pyElTuLbrsWgRZSokXEtMUOKkYjfdx3nLs5CnOXtxisHwC73cDp1NFxFGXVB+nmyxgaairGhEJ0CmVjphGBU9dynn23D4Hjx6n34+Jk1u563VvpHvwdn7s7/8HTj93jovnL/AD3/duXnHPcWS9z/7eiL29Ids7e/QHXcb5GEQP1+a6Ozyf/fxD3HqLYa23wrNPXmC4nWNwLPcUadLBNB6l6oBQNoF3rZXGlkNQDZFeYdQ0eK9fVLzFUZe1tQHj8ZDhcNTSMW5a44tZkemQMsX7RW5wugE22uHKIsSNNwVQ4kZHKhjeh/NvjW0//8+Xa0p5jp84BTrF1qYFO0LBm0hBpGX72RYvQPkbwNyss6OVQUsYJOX8GJASYQVlNf2LO2v/ne3LLii9VzSN5fLzp0nTIU7dOPBpY0gaCwoaa6m8pWjKYC+xr8lkilFj8kkPleygdUw+kYzzS6RZH2cbss4CUkfQCIytaYwglSlp2gkGr90BZRmiAHUcBpQZcimlQgK2rue2OtZadJKxfOAwSbZOesuUvd0RO7s5Ds+hwwsYX1OUOxw9uoizPRrliFxMM/UMT9xClUfgE7zV5AU8+NiQ4yfX2LiwwZkzD3Pp9OMUwzHpwcfxUYR0QYThS0ssEnrrA2gK/BciGhwH1g5SignufEH87IQFlSCiBQqpsHaR+PYTmMzy3Kf30WKVl7/+FJnS7LqM9Td7/koHhkXJepyyejzlFe8oqSiwlSfza9z1Y29m9+o2yWbEXa/oc/t9Y5T1rC+t0n/FiCPDPV6ZOdRUQz3mr6SWk8tHuXhhTF5dZSFOOHTwIOc3x3zPG+8k8oL+wkHyjQ3YnHL/nWtsTSY89Ox5lg6+hcXFlBfKfZ7cK4mO1Hz+4mNsXtwPN2qyyJGDGcOPfZA333E3r+mcZG+0wbA8wdLqGzh/wXFl9wz9foZxEl1neO9D26tpSJIIYwKfSXmBShOyLCBoSnbI85KFhTWm+4FfFQjGmjSSjMZ7ZEf6aOMZOeh2lykmu6ykMSJuUMkCBxYto8mQlfVVrFtmd/8zWNswSCPq0iKjiP7iAkIKjtz3euLVHgcPxSAUxkiEMxjg4Moa0eIyRinuNI5/+S9+Fpdbfuaf/mMWVhYxlFw4/Rjf/9d+gje97a3c99pX0+t3GF68zJGFJd7+Ne8kl9DxwZg3HSRYC8V+yYF+j8OnDpF8jkCC954H3v4Obr3l62msRSNovMM2FYI6pAx5jzEBWajqmvFkiNSa/+1v/w3Gw30ub19jd2+bJi8p8pxiNGG/ymnKhqacUjYZ3jbMhpu6yrl2+QrXLl8lkQ5ZljzxkY8E/vE738npp77E+/7uj7F50fPFaIrSDmsFXmpkFIrB2hpcY0myGFtaqrKgjiVr6yvsbm9TVQ1V1bQT5Q0xQOBjt/Yd0IozQkvP41uhk5rvq5QRr371vfzpn36SyWQKZEzGU4QU3HH7CX7oh76DXrdDt9uhqur52Pbxjz/EDJb8D7/066F1Ou+0hqJ1Viy/+S0PAPDgpz6PdQ6F5Zu/9T382Z99mtF4iveeJMtYXFlitLOH0hqhBNZ5nPX0Oj3uuftl/NQ/++ct900ilcTFFVVRMDi0gr12mo3tkhLLwuqAtJOxt1dxcKnPodXD2Ds1/tV3svCxz/D4U0/xn/7bdf7Kd30HDz74ZxTRiO3yMkm8RqEaNkbXGW2UdBc1wiu66QAhQ/QpOiVTaRvE4Cirbf7sU5cw03cjlWK/cnzk01c5dCKiSWMyX1Eig2CKMMYGZNK+2NJMa6xxgV+JoZ7V/LZkNLqIMxOOn7iFW0/dQT7OefjBT9HpCqrRNDRSlQrJJs4SJTFxInF1HHiSQgUlektxEe3iwpgaKWPqckhVF7ziFV/FLXceZ2n5Gh/9fYfWEqUkkVNMjaHb69PrLvP2t72bo3clfPQPfpfHP/cocbQMdkK948EruioG6ZGxJ/JjpkYSRX0qJGXT0I8WmOZD1gddNuoJk+0ClaRBoFeVCOmJ0oi6rHDWIqsGa0Qr+nM4XxHHGu8VkRrgZYMQHluGJKwrG9e5/c7b2N+bIsUC1jV4Z5lMdxDs4YBaSq6cfxKXbyG7fYrSo1LJ9niDw0dPEntN1hQoKSmmBgxY3xAnEV5F1EaxurrOaXF2Hg+cJbp1BFDINlULQKoILQ6Q9iM2N64xrh1xpjlwdJntK3ssLURUdUirq2vD9Y1LJP1dalvTy2A4ViwMNOcuXODXfu1TWJtx/OSAA8duYW+0R7NXc3b7NGe+dJqtvTGDpRhna+riOnvXQ2EcJQ0be5c4dOQ44/GY8aTEudBFEEIhvOW5p8+wMwpF697wAmm+hMsq9jc0B4/EqASwmjhepHKT+aJRqE7w4VWi9Riddb9+AIFh7dg29z9wgL3da6wsL4NvSCKFVKAj0DYl6mjy8YRff//nqN3XAv2bKymSVHLk+BQdh9a6cx4tPeW05NqVq1SVYWX1AFGWEcWa7c2r5CMD3ImUMd5H6CjQtV75mj533ncKESkund5iPNkl0REWw1u+5iTrR2O0AXSEL0oKB7s7lo/98Xm8DQKsd3zDEguLMc89eQljGkCG8cEpslhR5F8WfRL4ShBKGeKhinJMXm7jUAhrkL6mkTWTJkzq3nu0D5O7lwJrcybJhIVkldxXmNqjhEfKPbqJp5OWxCJFxAWVtXSijKIckmUdYplirCQVpuVjKqy3SDzeh6rdC2iaChVpIhVgeq3CRBuUvhHWCKLFNRaiNXQvxwnPzvY+WI2O4OqlbVQKgyilu7LC1qXn+eTpCUIcZLi3jvcO6RW/81//mOWFEbvNNZazHpqIsajweyXdZI0kjRkXEzqNZtJxFBslifAYs4+XCXsb1yHqU0630VlEVGhymyOilOWeYVhKRtOCW1bWKKRid3OI1YbEhRWtcJ7ClCxHC4wSy4GldfKiotgdI2NFt5NgrSWvIZICkYKpYxYy6CYxE9ch7kk6UhEtrjDcu8YjasTiYAnBEme292iunSdTET2xRNEx5BsXObJ4nO6xiodGJd3+QY6+/BiDxT7dRcnlyNEZOlYPLqDylOPvKCn1IoOmIF9WLF7d4e7bXk7vtSusrq5y6MARPvmxz3Hl0n/kW77+TQxHO6RZhBER3jVUVYFEEcsEiWL9wCq72xtsTTUrayl1M8bYkiTt0NQaRYenHwmFJkCWpQx3dtnOQ4xmqhK+dOE6fZdxcEkgo5oLFyWHDgkgpdjO+f0Pf5Ktx55mtR8TZxkTZXE+FLreeCabOSs9C+YAz5ZT+j4OVknO0qskk0RS1dDzkr/6v/4wv/yrv85DT14m6jmO37aM7hwjSeH7f+SHOXTLMVLh6CuFs4GXtqRakZowmNoRxzGVrTly/6u5UA9I4nOYtqhJ+wfwsSBOIpQBKX2bptQO/rScNRWel7IsSJKEWDtiKfGRIooUTREWfNI4tvMpyuRExvPk03u8/79W4BxeCL7um/4GB5dqlPXsFTmHjy/xwAMPUDe7bJc5X3XyXfgrQ4prAu1gcdlRV3DffWOK+jwb56+g0110R9Ms9TixsMLx46uMnSQynle9+uV0E8VgsMLG1lWeePwp3vrWt/H0M09y5cJlytxw7uxp9s2EyThnvDclSntkWZ8oPoytX0dZVfT7PTQp8qaxT0iPVCVg+Yn//X/i//g//wkbG5t867e+h4MH1jh2rE+SKPqLitUDWZhQJHjjaWzwz9Q6Yjoas7K0gK1q3vXO1wHw3/7L+1nQU4Sy3PGyO7n7npfzPd/zLfR6XZx3DIcjPvKRj2BMibWOunZY53nLmx/g8194hLyoQIXIuzhW9AZH2dp4los7Jfg1IrtHZj19lYGC9fVV9rYN0/EuSawRA8WJd70FIQRfePo0H/yDT3DqxHEml3d4672v4Jlnv4iuBXa7Yri3D+tLGDMlz2Kqekq3m1IU2zQ1KJUwmeZ477nr0Cv44lZoM2sZ8Tu/9m947WtXOHngIJvb14njmLqu5ygyBL5voChJBAZnG4x3AX3VrfmyCPSkFI+IMr77fd/D5595hgde9wAXLj7P9pUrrC2sUEmLkjEgcZbAY7YOvA6dKk/r5iLmhWySJAiRIiXsXB/y1je/lWzQ5fEzT9HpHgzRht5TVTWN3GnFnAIhEooowUY9sm4P8OhuhZ3mdFRC6ROmfoqWApNHFMqTJg5vhygSnJSUquFlX/Uq9vcrVo8e4dQtp/jMZz5LknVReEzd0NQNEhlEeZ4QdxjLkHvuBd3uAKmDyntcVRxeO8DW1U2ee/4sx++8A5+lbJzZRukVMA4RJUQ6Ih9NEMLROIGp91lcPo7RNb1uQpIkHDl8gv7gIF44KldSNA5x8TG60RLXNxsaUdBfGtBNuljjkVT4GrwhRGxaF7oxWtAUDXEcY/wEA1w8nxPHGeVkj+l4xHB/if39XW657QRXLw0C10/A5Z1dfJrQVJK9/U1KG6N9RWP3INqn0xPsbRVcvbBB0o2ZFptYUyM1LCzHdBYNpoy5cO46ZlyyP1SoqGaaF3iXYk1FEmfsTaYtSh6oFn5ac+bxZxGxZm3pIPmoohl5si4sL60zzYftfDNCS4UxDo9FqITaGrRULeI5QyAlHsWVs0fYuZpQV8fROgReBEpuy9FtuxpKZjTmHcCNqN12VKIqPS+cfunPZ9sRALZvpl2yNv/bLB+8qcN897kHSz734NNzzWjYwmL513/5NGF9LlpePwT6iW8XzWEh/rGPTIhiiVT2BhrpwVuJlQ0vZeH/j7Yvu6C03rQk3gIVT4gbg/EOp2Jin6ATh5Mh9zOyjtIYFBFdlTDxNVW5j7cJUoKrLL72jHauUY08iV+koMEKS7QsiFRMZRqM8zgXclOdc6hII73EOdOuTIOZbmkMygV/wqYuQyvMhcGoKiqm5S5pZwEpFoN5cGrBSc6dPc1iv8fWzoi3vfH1HF06wH41YddPOLASTIGbcQekwXl4xavvIqm3WGKJrAtXz11lfVFy9NUDDi9mCJ+yv2EZmQib5zS1p6wzeoNlynIXbIawFTo2+EygSFgUi+TlDs4fJEkqDomUQjhi71noJkjZZ6AFSXedzeFFumoZqQXrWrG3cxXpOwxW+vS6KcNhQVNW9PoRRW3p+AV6yYRRlePSHq6And0Jo2Qfey4Gb1Fpl+v2BYQZglih24fNRpKP9zl08CiLETx7/hIkkgXV57I37G2M8LHHpZ5OnSCaihxPHWviGoy1ZEmGs1PGDTTuAwghWVvpcXj9Trav7vHGdwlEWtJlHe9KvLHE8YA4XqQ2Bi8cFscL1zfQUUoU7bO3ux0M2elTTxqyLMIWIdFAyADfrx9Y5JWvPkDpoOendHTEIxd2uPtUl1pLXC257VhFpFdA1Gxd3uXdX7XC4Ju/k7/xI7/EcCqoag/e4oVECMsumuLRZ6jHW5zZ20XsKo7eewJByrSe4CrwEkSd0Bto7n/1G3joE3/KwoF7mfAyDq4v8vO//sfs7e2g98CicNKQxp4sjrCVpaMUTmkWfcUffORT2KjDp3/td3nquQmVOY5EUVvDxvXLRFJRC0cXTeUtsQpJ6cY4Yh0FcvrMHN47iqJAD5aRTQNaEAmJlgohgmWOSFNQkkxIbLqE0DvMEidWTt7C8VsypIRl4fH5mEGygEBzMpWoGio7IVINn06u4b0g0YKX3z/glffeS9rRXD17moce/xI7Fzf43u//evTKOtNCIusCRMzC+nH6vRWOyoY3fPu3s7e7xfHXv4xuHJOphFhDJ5GM93d4+vFHeeLRz/OJP/sEX3zsCzT+JEppFhb7TCYFjz3yJf7h//MT/OZv/C5V5bnlyCpFM6HTSbF2RBI3vOF1r+HZ504zLKdY55gWBVWbu2yMRSuJtDVeKpoqp9+PWF7tUeUFx285DsBTj34B4SFKO/zUP/5nTOoOIPihv/6djMY7/PYHPxTiVU2FUp66MVgneOub38yHP/z7lKVp06E8dVPw2Be3WV0+zN7wYRK7zWhzwtrKSb7pvd/OZLiBM46qqXj+4nPoeEA39mztXYXDx1i5OObCc88y3NzglpO3cfXZDf7B//aj/NTP/Be+5m1vRInrjCeSYdGw1FvBO0WR15BJJpMJUiuipYyqcfze518gju8OY6hxRHrIa776nZw/9whq1AURUEipQm6xtQEpbmrTFqECZESsFaW31FYh25abU8E799777uF3fusPue/+B3jqC4/zNe/+Wv79z/88i6mEaY2ICXxlrYOjgzfEUUhbiqII4cOiKZ8GKoQWOhieG0O32+XixfP0lw8iooTGKKIopq5qVCQh6kApqFuF+pOPP8Vkv8PVi8+iEDB12CgDEbEgM6zOsE1OL7N4NIW3xE6HRCEhkD5if2vIgaVbGO9v89jV54j7XXQU0ev1SJKYxcVF4igjijLSXspwf0xd1xhjGAwGyChQVfKiZDDosTeeYILyhqTfxUiJzrJwP9kK4SXjyQTpCbnpKuKWkyeImg3K2lDaMTpR5PlVtjeepsyb4O/rGvZHUyJ5FaET4kZx7doWRf8y4+k2nWSZo4c7we/YWjqdDouLiywsDIJoKlZkcY+qnvKFL36BZ5+6gjQrTPdijp/qc+niZ7jrltfy/LM5pqlx1nDx3A7b1zZxzuDpEOsaEWmqpEYdGXB1d5NmPCRekZhmgkhzUhGB8jghqZ2hLGOMVyyu9dHZmEHvCC+cm3Dlyjn2hyOGo23S5BRTKYGaKI3pHr6HQ4uniKTg3pe/k9GoJDc7dPUKD33uY0ynkKgEbxuqpmnTjnxLnUpp6gWypI9HU1UKWJzXQlXpgZS65iWF3Is3IdJgRfaXbnNZ4Zdbjt20hffd/O5Z2zwEh/j5WB63CWRCCkxjcW0xvLu9h9YhRtWYwF/Hg0oKbJUF1O7L3L7sgnIWySW8wDWSmhgx68/jcDJ8lLYKjyLRQbVUeojRNHgSHVoYjWvwtSMBUt+hIid2AucsSvYwYp9IdMMkWRuEBIcJA1dj5kbTENSpxjmyRCOkp6jD63SqqPIpuxtniaKE569HJCsJC4srZPpW0q5jtb9CNdojiWI+/8hpPq8fYTIuiXSHUydPYuliXIWzoKVkvDVly2wyqfcR8YiFA477H7iVu192imLaoJQiTW7BOIsxNToKfBdnGwa9B7h+fRPhBGmWUBRTJpMJQiiKfAHrHcaEVbhrPEmSohAkccz21i7D0RXWkz51BXGs6fZiikZhXB2I8TuaA0ckvd4ao+keSoFONHXVx7tFstRhjEOKJfJJh6Z2JEkWTHWdoMpXsNbS6y2g44SdWBAniolziCQiTfqUzjEd51hVcnjtAMI70jhDISmwlGXNaLRHqhxp6pmMSuKm4Oj6OmVdYsuC00+fo9uXSP8A+7slebFFrzdAe4EtA7LR6/dxiHAtI0ecWJaXDjMc7eG9o9NR1FVASnQWzR8r5+D0ruMdd9zCc49e5AMfPUN3ZYX+suPh33yEN7zpXu5/0ysZFzWNjNndrylPHmP97vv5g9/6EvvTGkFQPnpb45mC97zw5FOc3HmIy2uvpafWqZdKnn/0SVaXOhxb7jBYSKmdJZcFopSoSvPqe+9jbzpCbD3J1XMJZ7s94gxOn7tMv3uIwUKHvf1NpF8g7mh2qyFvuPVWfvOjH+Fjn32C73/N3XQe/hWqJyom+n3IQY+k22V3vEeyHUyjJ2jwDYgGJTt4b9tnJAgmpA9ihbIsqYebYYI2HmeCMXucJmS9LNBDXLBg2d6Q1K2/HcCFM89RTHyLGgUunLWWTqfXcpqD9cXWRrB3EiK0uYeTgms7Izafvc5wb5dI93jD17yKc1cn+Gs5Wdoj5MVvs725F7ifrQggiiKa1rjXudA6TXoxKwcPcvDgHXz1Dz7At3zv3+Rzn3iK/+v/+iJdp7GjCUjPufMX+PXf+BC/8Is/g3Oep774JX7zl9/PB//bb/AzP/3PGU8nXLhwgVhJOk2D9I6ut3RshTXwV7/3r3L+7As8+vDn8U2Nx5G6DvlOwf0PvJKHHvx0O2gHEZN1hggZihER8IxEwJIKYon/99//Ej/2N38UqRRpt8M9d9/BP/+pn2KxF7WctDABnIy36OplTrzyOFV5hHMXrrKzf4GHP/0Z1k/cgl7okq6s8uYTR9jbbzi3t8vRgyc4+ZqYr3nve/jtf/6v2C9rFk4e4Opjz/DGB76P244/xrf/wN/F7w2p6imFrSnzmlgKjGmw3jPoL2KxOGnpLvZ4/rl/zcNf8EgERVNz5fTzbHzpAqurq9STCUVRhPGwvc+8txjjkUpgkeAt1pUIHxGpkFJkPHNB2HjU8IY3fAPjpuCFzXN0Dy5wbXtEt79OMdpDKUUxmiLTDuVkTKRSGuXAT3DG4mVIkpJCI0UoIvOJwZjAWSyLnCNH7uXYoYNsi5LtC6EVjyQgV02D0oqqKvFlzrlHHuLcI1dBSVQUYbxCmmCzVeoK34YQ1Ba8b5DO0uBAabSMMcUYbJc7XnaUxx7bYnV1jWvXNugNesRRSj6tmEw3USoKyTN1ePaKIvAcJR7TVKSRZtDvcn3nKvU0J4u6LHaWefyhJxhv55w4cA9+HjrgEd7iXEBoo1jjrSAadFDK0rVdJIEaMuj2MN4gZPtaucjVq9e5fOkKuvDIakQlOuR1zFvf841IXRHHMWkUI1rBk0MSxFaOWiviziLf8777+aV/9wucee4CTz72MS6de4Jb77yDg7fegn/YIHVEFGm6q0scP7mOb71KtfRBiS8EG498FE2OzjrUvqCyHid1SK+xYJwhVRrUCKMEa7fchdnaY2F1gf0vPoo0sLp+gLxoqPPgzSuEQirFn/6Rw/t4XnBJ0cXTRUeKve03UJY5M39T33qc+ht2GzinUHJhPrf890C6/5E+8ivT3HylxeSshBQ3OYS8+JsFrlXkh+Oz1gY6ifdY23ouYyim+0DgyKooAA1aD9Eyx7Xj/pe7fdkF5WwTUqKjaN5imx3ATJEqfWiBzHKnhQ98KBUp8rEJPCkhEAoaZ1HWhAHNBdf3qqnpDJJg8kvgbsxUlkVRtO0NP+dOFkV5Q/1pPJHSwXBVMld5G2OobUW9W1Hs5yjfJ13tsHDwGFcnBbF0eFHT1BH9foTSlrwYcsfdh9nZVtSNBetZOjhm7VCG0AkqPsDCco8jxw6zuz3CuhilFHujgl6vh1IpZW1CRBAJ+9MKEXfC/kuHUJajR1baCdpTNhPSNEXJiDROwrHGMaaqOXDHIoPBAk1TU9cltQlcQ+cFUjmiJKLMS7rdLkLAcLRHnAbxUicb4JwnH48DNyYNULsSgqqq5qKDuqyYJdE0VcVC/xTj4YhOkjKZTJjuQm0N1nSCP5kHKTRxnDIcDmkmHiE9zi2B1zSmoCrXqHNJf5BR1YEXK9QxLl56gQcfepjVQxnf9C1fh3WGnekEoSSDwYALm9fpdDJ84okGMVNTMsynyKQh0j2KyCPSBuXBJHbOLwTLlefP8uu/WXBl7yp2YYBDUY16eF3zn37lt9kY9bl0fUQnjijriqzXxVQjLj/3BK4q8Xhs7RCixptAUp7uXKJoRhy0NeN8wqTY49DRNd729W9kdC3n6cs79JMlFhcbep0+hpL1Az0iPJEvKEehUNqZXmdqupw+fZ1HnrmMd4I0iwFLPVF84uo2z79wllfeeYJPvf/foOQFjt13gmfP1BS7+0zzgp7u0BQjXJRQiWAirkUT2uXGtL6T7b3fctq895hRjUoUaZagE8/zF85w4NBBBD1MWVGrMBRMJxHWLjHzejXllCI38KIiwuOKSeB4vbDF3vYOawfuxdl4rmLf39vg4qUtyqoAPJ1+ytb2dTySKIrZl8FawxiDHmuyNG4tPxJcrVpbmqCwzjJFYkomF8/z6FNnKJ2j3+3QJEc4fMtJ3O4ezfVdWFQgJR/+8B/x4Q//EZHKWB8IEuX4k9//I/7wQ7+HXuhR5wWiMkgl+cm/8/fDvezCtPlrv/gfCfFrYW7RSCppqYbbfORDH0DPxEoyWEJZ4/BtWwwP//GXfp1BNEVi8d7yv/zgD8/RrKqo+L73fluw21EzdXJwynidU8jdHKsynBTcf/triLsZRgrEtQlslRTVNfYHKR2tuVMLir2CRy88STe2rMkuT7/wHKOLl3jFy+7hH/6vf4f7lo/yud/5GLoT019eZHV1mf6BFfY2t1hZXgFj2Z9MqLwliyK4PqFA4bzDGsvS0hJv+9rvx4oSZSq8C5ZDUmjyPLTIlYpo7IySFFTtzji09gH9bj1ghQwT38Jqyq/+5i9w5z2v4VVveDs7O1s4uUOSyhCVuDnm/jd8FdQlxXhMIWt8pNif7KMziK0EGYAEaxuiNKGuLEJE4MDUDbvjKVevbCIGikNH7kSIq6EZbw2qtf8Kc5MkilOSZIGs18U5A2bGH57Na7TzTZjvRnkDQmKMAxl4d7ffdSd/9vGPM5mOWOv30L5h6+p5VldXWV1exNg6jCfeM7U1veWUpcXj9Ab9uWBnsNCj2+0ymjZsXLzIo5//AspOWRwsc/utx9jeGGFdlySOw4KRRQwxQkqqynPtmkHoiijUDmGuFg3eg8PO50ytdzBWIu0S5Vjw6Y8/MTtSrpx7EFtLhBTIuahZoqNA8ZFKorsaZxxpFBEnt3HLqRV0LEFEWJvxyMMXEPIYpg5xl/t7U4wriNNgt4VKUNITJRovu1gnmExGNMUEkDeBRbSLU4HWMZNpwe7+lHxas+332d7ewTYN03xKmsXIpMP+qPXCbp1hbsB2HutnrV5u8oaZjVazlKybyjRhAx/cSOTsZLzkfX++CPzzKOPcSe4m553+Itx+F60Y7oZXjccjxU2FaDsICWAy8bxwuv10AcdPWWJd0BmssnTwCGvrmsV+Pwj/vGNxEWxV4GvDZLRHXhqKQlDVsL9/AdPs0l9YoKtL6rqgqg2VMzz3wlWaeopS+wjfCoX+/9HylmrGUcvQURBQiFZ5ba1BaDW39PCtyWw4kaH1Vo8Mkeyg1ex14b0oQhXqgsfZjGQ9rapWoRQKH2tpszVdW/gIhJCkaYppgkEuBEPbsixJlKTT6YZVqVTIqqKuLESWc88/xQl1ivUTr2Bzd4jYu4qMa7xPwqTmUyZDEDYJA6iMkALuu/82Vg5tgwLrLXXdMBztolVCv5+ECb2yuGaMIgreXlWLINY1dd1gsRjX4L3DDicMJ+MgQrEpUliyLMO7gjSN2c/3WVhYwJqa6/u7wSoBGxCnakiUdFs0CqauwJhrpGnant+mNSAuMY0lTgKSV9dDADqdlLLK24lA4HQWig9jSbIuO1VJ0u2yeGANPRqTrllQNc7XGOuJoz7eKdIspm56ZFFCUVREkSJJwiCvlMLUNd4FpHU0GpGmKXV9COdrvFMM8zG2ieh2+tR1TT2tWOktIVVAuEXtSVWEXuzhTAQ+xdoiWBv4BmygPcy2JGk4umioiwkjn1DVBlONGCSLKP8cTXUBJcc4J4i8JN82lOMhVTMKy2Jm6lVLMKEViAiiqM+dy8f5k+evEMcQZTHVXsE/+Nt/h8efPEt/rUcUSwTLKG3xVcna4gEGB5ZYOHgXq0s94m7NoYNHOXr8CCqVnH/hEt3UU1SezuGIq+cexUyuEcsRL5x7iMV1T75f4jVoLbBVxcUvPs/d77ydYmsPdEQdA3WDkC2iZ5mL0pSQ+FZMUlY5se6wt7ONdRWDQY98Mma4N2J1ZR2rQ8tHON+eA8BDU02xRY1SEVVpQlukrkmzDhtXtuh0UhYPLXD52gWcPREKCCGgnlKNq/mVEU4ynYzQOqJpBXWzhWBhLXXWD3YcxmMNJEngA6dxQhzH2KyHjCzZkqaDYjLZYfP8Dnu7ml6/j81zyr19HGreorHWUo5LXKyRWhDFGjcpUXh0pHDW45REeodSGttYZJLQPXwA29Q0VUFxfUjsHM10gnCext6k+PQOpTWFurGK9xDGpihC6LDanxX2pq6JsgTnQjEqnKdpDAg498xpmmmFrGosDcaUlKYgSTO8VHR7iwwWlmkmBUlpccuCptvjvuXjnHnwoxwabfPdp+5l7XWvINYJ9U6OG9U8/mefJzIK3evSLHdYX0iZmAZST28xQiqLFpq008dLweteewfnTms2N3bY3xvxJ3/yMCuDMa+4M6Pb04xGHk9AN2ZociQjqqpVLjvodDpMpwVRFNPUrnXo8RhvGG5OedMbH+BVr32ARx//HEVRsJBqxns5K0sHyM0OX/WOd3D5zNN0Oh2W1vpIGhK9Qm0atIDBYIAXnqoqOHj4COcvXuHihasUVUFvocPzzzzFGx94A3tmh88/+HGcu5WskzGdTvE2ULfC8x0Q99vuvI1+vx9En+gXWaoopeZzkhBB9ZqmHV44e47r16/T6fT43EMPhtZwr8/O/oSiFijd57Z77icvi9b5QOGlYAETjKWBYSlpjKdxluvTEcbsstxdQHeWkUnGtMgZFyV/+Lu/C+oI/d53hcUI4OxbKabxTEsG7p0I67DtAyduRqxmzyTQ2HAten3fpuGEdutMrRwq0hvj6WxhaQHrBM0oIPF1ERbxUqh5lbO7FQR0SjFXiEsVImSruV9oDY2lqQ2JkmRJiuiI4HrhoHEGa2Wrf+gSZ1G7gKm5cPEyTQ1DUbO8vMqlc2exzlLXFYPOkDjep66XsMbgZg+jmCF4N0R21tr22IKH7KyYnBVPouVD9voWfECr5+dE+JvOjr+ptBS8yBd71sV+yba4At/2PaCi1n7oJq/hkNYTwAAhPUrptpbyXL/i2NkMIJuUkkN3bdDpjDl4wDAaXgfRhewwyBrnJNuNRXqPkI47X38IooJpZfD5EM/t9BYGjLchFxpD4J96O+Tcv/13eCokwTqoNYT8srevQOUdDtoYA66en4hZWyrRul3Szzz03Lz4C2kGijhNiHSCaULmZtbpkegM5yuSSGKMxRiHUhHQhBxjL1v1bxxsKeq6LTwleZ4jRdtqb21EvPftRGVpmgbjI4ppST/KEL2IopiQiIjrz5+lt3qS46dOcv7hiwgR4UWDEJok6TId5midIohw1iCkZLIHy4NFvPA4LBpHEmmsbRjtBkPuOOlQlxXSSpyJiEVMQhc7HLOYLIHReCRxmtE0DR29yuL6EtN6SmND8Z1EMdPplKXlNZqqQIgEiSFWXUAgk5JEV6RZRGPD+cryCXEUOBtZklKWwSsuVjEykqB8EG10Al+nqCsOLxxBCEHTVAihWquZYPIcRTHj8ZgXdnOE0EQdg7fdkExgLC6SmLqicBVp0mPHT3A2RqoQo5imHZzNqZr9IDKpGjpZn/0tg/fBzDhSAucqvK8Yb4XFSl7mxDqiaUJbaLG/yDDPaZxHKUFRj3GuppseoSgatl6I8e7YfND4+Me+wLWd56lrw87OR4PH6ECT70w5cHCFJ574EtNakAiNjmMGSz0quY9zBc4bnLNYJ7A2FOpJGuO05KlHHmT76ee44//zt9g4d5ZireFTn/okiG3e8XUnkLLH+qF1ummfvc2LbG1s0jAFhmw+9wRbVYrqRzyqOjjd4avf+Q30eqvsbF4hixP8TsO03OPO227lud98PydKGF1XeAWyTYyKvCd/5gzp249S6oIeDaYWCG8xNpoPUAJQbREjpURYRRTJMLj3+tRVQJ11pFhY7uAisGUNrSn0bCYSAiLliJRFC4+RhiSKGRUlwgkGXc14vMNOnqP8ISQuFHMeVD0hbipEqwYWWtGVorX1CsivsEFY4eoaS4gi1TqmNlVAYW3DpAiRdaVQ9GyMSjW97gK97gJbusJbw3SSQ6zxrpkPa56QnlJraFxJNbI0vgxFdltwShFyriOhMcKRCEWiMvauBtNh4R2VrehFPawU+DpYxcy2xhlqb7DezlEOCK6lwlqEa5Nb2vxu23ZgfNvFkUIgRRjf3CBGdhSxXsHXNa4pGeAp6gIhNbVSXC9GIA1L/ZSJN0wuXGJ69gq6KyjTRURuufQHDyK0Zm3tAJeVJl1eod/t40xNVJdMtxsa1ZAsLEFzkLXbTjEqt3nuqafJugu4ndBKTtOEyWRKuX+Oiatx6k4mOfMxP/jc6TkyOcu211pTFKFtKqXERxpRAgS/zrQT8aVHv4gQCQsrPe5/+V384s//OxSG/d0L3HbrOioOlkbf+I3voZYlJp8APbyKaZrwPWknA6Aop9x57wFe+drX8qHf/SB1PSZx6/zRhz/Jra88yubFK3SyV+CBTqePEEco8rQtEqDXO8m162POnLlOpCNs08yVvb4VL8zag975dl4J9lXerzPcbYKH3yTMd9YJtFrG1JaP/9EjiECGQOsIQejUxXES8A8vETpCCDVPH9qT+2RZxMk73oj0QWASJym1XeCF0yK8R4BzNxbSQoDUGqXbe1C8BCcTL/mLZD5WzGC5kDIk5v9+yew//2NWqIZvkjgPwot5cRl8MmGW7a5VRKQleImXAokB75BxxuHlEywdP4jyGZsb19i6vsXO+edxjUU4T20KJtN9EBGJWqbX7ZLLhmoyZWdni9UDq5y/eAEpFZNiwsEjD5KXS7z+jW/FzLPmZyikCGLPTPD4gw+yN94kijR1USG0xNtwXXWk0VJRlo7Xv/UbGCwexFpLpBRORPPFVDjvL/6OmSMErXXPzIVAtOlPSsHyKngFxlmCZa9nFhLgnAOhQ3yscfiZNRcW52WLYEqsNSzbAmrB1u6Ijo5pxiljcw2fCsqmpCwn2NoT6R6Hjh5ic3eCTvqkaolMRdhRzaf/6OOcK/dIhIACpLuMcwalArXROIOS6itqxn/5Le/ZSbNBvSda5GOWP9CYZn4Cb95mqzspYmIV473AGNuqlYIVUdgLgRQaayzWhFaKcaGtKpTCW3dTsRiq9DhKWz9KMUfqtFTEOqI2JQcOHOKtb38Ply9f4cnHn+Hi5RfoLXRxE4szNddeeIZ7738lW90utdU0foy1DeDwYoqxBTM/KI8n7sYQQ1U7nNUkWUpjDd7HWG9I4gREQEIgmKPqKGY8rYg7y0yqClvVSBVRj/fpdzs4B+fPX8SImjhOGY/HpGnGeDSlm2bzSMtIxQxtjXUS7y11M6VupkRxGIykMXinEL5VP7YrMa11WJE5O7eTUSq0rabdnO3dHfI858jxk2xtbwIOlEMnkrKcoOJwo8edQNZOopjFfoeN60NSlVKPJam2NHKLqqpYXDjE1saIrBOFleNCD2N3kdWAQpf0F7vUZgzK4GKFs5I4ykiSmKKoWFhYx1pL1o3pD8I9NlhO6KiUKGnQScxgsICOupS5J8LwmU+WKCVxzrN35Sk++MhHkPEqHQ3CKZKki1BjNp9SPPSHf4TUEXU1pLu0zN/6u3+Pa/sjTFG05sFB3RiiD6GpSrRL6N//1awsddnfuEZtKvqdhF5i2d+bMli6jdU77mDlxC0MRzvsSEn3tvvQqiEqDPT26Z1KOXzobqal58zZi1w6f4FEG2IdUZmaODlAkjuSjuDc+S0iv4LaaRh6RV4LpAx2KsZX2P0N1pQLk5+OkEqjXRGQMdkOYloExJxQVCa+xpUFcdJBaugsdWmcDUhYMSaJOiil0X42obR0karEVnlIc9GaSTEF56jr0LZa7SwBS2xfT+b0CQDhG6QPasNECWrbAMHeRXiPjhOssQTHPUEsHJO9bTqdDhJwtiZRAUnyWAbWY0kY1xZT7TMZLrG33QHXwzUlkY756rXX8ukdza4LNh2ZPsVf/85Vmvw5jr/xPq5eG2IGEZ0Grjx1lmvTHbaeO88DR25jpy5ZvLwB13ZQwrDnDdY49CtfzQ//51/il//VP+T4b3yYJVbnY9tnfc0ki8knyzzl3xlQKAGvTP6EJX+FpioxOqJyMYtLa+g0Y3+0Ryw1op1EmqZBe8HWmUvoWDF0hiyK0V6gZVgkGFeS25ppUSCBUsegUjANVWyZ6giZLSH2S6blBMqa8da1kFZWNlgB2eoySZLg4iWWjh5AxUOcv0D52JPcdtutHOuv8tFP/CH5eIFb9k5wSkcUcYEtSvqJ4chejs8NzoSIurKwSJW0IQAWJ9Lg8ShlQIeMxzmBMTWN20NKjfEWqhiu5IwmzyGXlvjYb32Og0M4KNbAFBzOD8Nj28gXtrl49gIijsiUpqw2KW2Jtg4hFPteYI0LbVghmWjBq8tbmI5HoZc4vcDO+TO8fOk+qnI/jGvS03ArT/mF8Bqgqd6Csx7hPaYO7W2pRBvXGuYlHd1cqM2KOPGi4AHXIl0zD8DZXOi5GbKatVPFfDqdzZfOQmXDv6cFjNuF4SwMwXuPqRXW3jS/3tSBFTCfi1/akZ1/100Fz/zXPqByIV7Rt//+818xa716L+ZtWcEMiJu1ah1C6NbSCZZXPWsHQUpDFrVKex+oYVprLlx4jqdOP0E9qnHFlFhEeA2madpYVEkSy9BJsBW4ikjDpApgSe4Nx44cZXdvGBZpHvRCxfETCXlVhaCGtnC21uOFoN+TnPvSPkVzreWZe6zwWAydJKWqp6goIRaSQ0cEKwcSqrIMSCFNq5oGJ2bRrfoGGusl7iawDcDLWdE5S5dS8/b3jOkopAz3jQ/m/VIJULIVx3ikF8GqSwBYrGnoppIvfObP2DddOssRSWeR/c3LuFqQJl1UHFE2hvvufw1HDg+YVBOKUrG21nDt6h5J0sXUE7oipxdHWNWhbFK6vZrpeIJvUdz/oeroL9i+Yg6llGI+cYTqGXAeqWbyev/nikrnHFVZsdBZmnMvnfN4Fx5SpRR1ZVAqAfy8SJRShJPfmh33sx6NNa26MLS3lVJUZUnccivDijJ4o9XWsL6+zvPnL3Lf6x/g6IUVHvnC50B2aVSNq8aocoIXsD/eIcvitg3gyNIB3klMYzDWBW7i/pDOQslovB8yMDcmeBqMqelny+ztb1NVgTTeNA2RDvvnGkea9MirkizL6HUHJFFKvrvPeDgKvEUXuJeyNpjckTjoZRl5nbPUGdDvDUKCjHEsLK7iGtCRQMiGshpjrMBbz4EDh4JhcVMRRdG8CO8kPcbj8bzVmOchwumu23VQ4Nmau46fCMTtKIJWNa+iUJBWpovxQxo7RAiLXY1YXT5GFvdZW1sj0RlxlpImXbQOaLJW8TzFRmXxPHVhdo9EUYxUoQUoRbhms32WorV8mrUYTYFUGU2lqF2FtRVRDNeuXkCIh1sPMsHXvuc9XLywy2R7i8m+Z5gP2dq9Tl1mKGwQDgBKWr7xm7+VxgrirM/KWkZ/0Avt4dKByOh0FpBS0unG9LpLiFuOIa41ZL2Ihz77Ka4+f5mF/jJ+cUI/63BkZZ2tzesMpyl547nleEhgMMsrxIMlhB6g7FVef/dh7rvnHj79hc9y4dqQWklW+yXXzp7hV376/+E2mbHT1fRdjaoPAeHaOhxja2giTyQEZQNeaaLKYSJHaOe0VAEh5obTSim0D5nSWexxxuB1hCAiURHdbInpaIz0M1/H2cQDEhOQRyHwztDvd9nf30fKGIulMeG7aq+x/kY7yGBxBAGPlRLZdg0AhJdY24T4ShnU62nj6cYdfGOxOJRwSDRJEp5Jg0DakoFMsGWNT/fpxyDphhwjYSiVws4mch8mtZ4z2KmiSm/hnlcfROgamy5y2/2GS0/8Mb//zBmOHTjBsthi98JFaq8YdQQrIuHqcAu7d4Xd55/lvXe+i58vfo+NZHTTuAbHSslgeRmxFVDRMKlbIhxSeowt0V7QjDZwrksvaUiEwHpHIzxZL8bVTcgNF5bDOqY2ZeCk1Q4VRdiqYilWdKKaYlLj8pjGX0dligl96iIn0RMi3SVuJBExPhYMRYWMPaunjrFx5RrJdo2R1xhuXuHeV97Ld1y5TjO8innwD0nTjB/qd6jtDqPohTDZdTzpcoZ3BvPkeWSL1oXcbI2wAu8FIR6RMOG1QEGYLIC4walP3WipzsqVahN/bTPMV53Ze/qwv4P8wz8I6IzzUDH/7JfObWKeMQ3UbY24mM2LMCEyvDuPiy/c1LEUXKwP8sPi63BCkmUdkvQm79N5y7gtBlsU6UYxOUP15hXhvFScW8jcdJh+Tp5r3z1/a6u+9bM26/yRY/bb8H4//67F5Vkb8qb9az/7De+A/pJFCwnWzXzI5gJdIUPhHcYFO0eZ5+EgMghIrRCAmhd083N9E+omlWrbxq0llAzFj7ENWdrj6uUr9AcdTtzao2k0YhZbKzySDq4qSLKUPTNgYb9P0/cUI4GwDlmF/bTWYKwBHMJbtBQcPLCGdQ472Wu9aT1bW1sUZc3y2hr1qKAzSFlY7GP2HFKreca8NQ6LZ2GQgrCBu6oihLAhJUyGeU7qQF+pjWmlLGFcDQt6gWkKhJRtdKXEmQYvggAKFXLchdAgb1y/cK6DpsTdRJm50Qm60fpubI3WCc6EO0AK1YZcgsC1Sn/Hlu/zrX/zu/nob3yQvemQhTgi1znXrmwRJ4qqKakKRaL7nL73ArndJ+llbG51iAYHiBdqLl99nBdOXyHKMorSc+stxzlyap0XxpOWH62oneOmPf5Lt6+4oHRt/vHsRACgXnzy/qKi0nsfzKptSV1XNE2NwKGkpDY1SmtMUyPVzIaixroaayRJFLXt7opy3vIOCExoJwSxQJZGFFVNGme4OqcqG5wQPP3sM+STKe/5xq/n2uYWly/voGJFPh1x/sxZjp44xZFbLNe3NplOJ0RJRFN6VlcOoqMIIT3Cw7HVWzix5qkGNUomyBVNmqaBt6iCdUYcx8RxCs6TJAlNVeCcY1KNGE72yfOc3f0RvXiZtcOHOHH8JL1eDyEC10QoBV4SRcmcC4drEVgtEEIhkFSVRUuFVA7BDe+2cpq35z6gkcYHPtr+3oRTJ7o3EEsZCnel22hLqYmiBG8dqh14QM4jt/AFSZxivaKqwr5YV1GWU6SCulQ04yg88G6E8CmNCYW3NxHVNPAnXVMjhCKNE/Imx5oRkY6xYmYg6+ekbFzgyWqtqcyUJO5SW4PUnlgvMMmnTPI9AkE/FC+KJX7lA59GNCPM9hb7oz2+eOYqG8N9qnyPKEsRaKbDEUJnbO/t0x90KXOoGgsYmjpkATdNMKp+8osjzOQir3zDlKPHTlGPHJFYYvVOMJ+9Sq9zgJIu00nFgtKcWNFYkdHRmirNUTrnxMHb8d7x+JeeZnmxyx//2adYWTpK1NFE3jIeDxmcPMj3/d0fJd/ZZzTcxe9sc366DE+HhZbzcPy+O0mkZqsaI62kM63JpaXJR3MhXJS0gxeypX3UOK/Iqxw3dCyvL5NPa5x3KAlx5JDC4J3FzshHAb5on2cXyiTrKHKLdwZBEG6EidAh2tfc9MCHaEoZeJx1ZYL5r9Y4b/EtUh5y2wXOTYJqv50EjDFcvHiene0hxjiSNGOpl3Lw1G10shX2Lp5nOGmom4UwuQqNWVuBjZnPIKhI06s97tASndtfS3ntBV5551GG1ZTe+iE++6GrLJYQdxvi7YyLwxEucwgPprDsK8/Xftd3cO2hR/jQz/5bDkWSU1E0P8SzrsIkFfVkO5iDCwHO0+QN00QhOksMlhc4dOgQm5cuUE/GoBxWCKxocM7SeGi8RceSVEYUeY7ScSiqOwl1U+Jji48giyLSfkpuFKkSxN7jmg6JdDRRReMiItUh8h4/zeknKSPXcOgNr2LvMx5/ZpOjJ29D+IbqhcucWz9LdcSG4t5uI4RCao1ZvzGF6CgUQnVdzdG3F2kPbnRD57SxGWrVMsvwDOcI35e1/fc0D+3v5vSCL/Pz/EtmxGR6nTtPP8AzrKH0Tbnh7YLIz7/jpuNpP8Pd9O8wzPp54SGEmEexhteHMXT2Qb7dmVkBOitghW8XQeIGbWJer0oxP5ca8JGb79MM6Vw7IHjtGzqkadzqCUA4gQsVSfsMhxarmhX7KlDMAi3mRrzxTNUON84J3EA/rStxVqHVIKCGusT7QE0TZKChbjwrKxmRiMO87i1atcdoHCrukCQZiUigEUgnUS5CGrC+wpgG5w1ad0A5qtIwGU1IoxShHXWek8QxZVWhCHPjaDKeo9eLgyUa64MXrwg2gzYGa0J0aNM0c06sQ4AKtKGqrNA6zKPCObqdDr1Oj0YGBxmHRCY3/CNdm3Tjvccbi20sXggQ7sY95drfS4+Xwdt2tr20ZS6AXtol0gmlLzHGtu1n14qkgkjMG0e9lxB3j/P6r/9mXnj+HOVkzM7mFRbWF7F1KOBV6rhw+mP89P/5x+2DkIZVl+wTRTmuHtNPOxgd3BMwh+imKVpImnn9dmNR8eVsX3FB+SI4ty0cb44LeumJmrXGhXBtIoBDy1BtO2eQkcY7G4YIIULxJmgjFUXgoEgfzIabhlhH86VqkiRMJ5O5qrysK7SOKKuKLOtQFAWf+NQnufvue7j85OPYJOLu1z7ApQu/TywF3ku29/exSYZt6rn6DxTOKl553+t54vExxoQH/WV3P8Btd7RKbcLE4nwd2uTtJBb4WTJYXHhPFCnwNgzW0uNEhRdQFjWj0QRr4cqV6whipvkl4ji0fqWUdDo9+v0+3nvSNEapCCUj9vZ3WF9fZlQU4ARKJXg9CUhommJMHexcFBjnKcucuCtoKEO73jmyLAPpmeZFUHsD3W43eJA5R1VVLCwskBcFWZZhTYMZj4miiCSNyasGpTRx0mN3d58shcFilyKvsVYidEMUKYqiQogc4WN2t3ZJ0gik5NmzT3HixAlQmnGdI1VKkgRhU9YNN7WUIcLNOUevs4yxOWmSYLyjtgVCK5yLQ7tDK6QSPPnYw3zT1/wqNloi6SwRJcvoxNHTDYvdAVv71+n1O6QyZ2PzKgbB/vYW+xsjtjbC4+CtRIgdEAUIuPD0k/yrf/u32N2b8OSzW6yuZ0zHe2ycOcuR+1/Ny+98GQeWVhgcWOD48buQzjHdL9jerdjyFoSiVIamilg/8nq2d55i6dgC5XSbKMowTQ8/LugdWmNt6TCRllTDy9jJlORazZkrnfnkduLOw8TNFitC4NMgIHDa0RdHGY8nbGxtUdc1ZdW06GTwwxssKg4fWWdra4erF6+ytnYgoAY2ZzKahBa0tTgn50jQzROx0gLXOKIoIc8DAiKsb1fwQGNueoNgtJcQ5NO6nWhnY4QPMZIyZDE3jQ1es5GgqjwLCwt86YnHOf3cucC1trIdpjZBxCTJFbTWnLz9PnorpyiqcI+kSPYO3o+79DTst8Vto7lwpmRwUHH6Nz7M1oVn+MTSIZKox513HOb5p/ZZWbsHaU7ywrUvcSaPWFMpUxHhlGOzUVyqjyNqy7TJSKN1hsUNDlUZeSIfU7pBwIucwStJlHq6WYfb73oVW9tD8kKydus9bF++yP6100S9OCiUhaQeTVFxAqVFuoZce5QL2djKhHGgaSqc1zgRCgWlDcZppqSk8QTvSpzrIJsGk02xQNZR1HXJwajHE7/9+8Sqw5F772C4M+TR/Suc9BNecRSoBZEKKJL39kXcvNlNMGvhzm+HlxSTsx/9xUiG58UV6Je53fxyPytQX/LJ84n4f1xcipfsb90tObm2Q714kHe9WxNF4bmfYTFCSoTz8/Sf2XdJBEIqnHV4H+YKT/unazlwUVApa6WCN26r4nYtj5aGVrzUFn6tgbYjoFFWgPS27RM4rHdIHehfwkukM4SIVIF1Dqk8a+stqldb8BaJCo+iu1ml7sAZEHH4TCRaCmpTIWn3ydjZ0xyQ55vmdenDn1ZoiAyWfTwSSYJzEUJLjKvoJBGNKdAyiCadd6EVTEvxkD6cF+XxkacwOXVhqfJ9tA3fq2OFNz4Y6GtBrzfANpCXDZ3MESvdzokpvqnpdFPSLGY43CGtbjgL4EIhB5IoUqgWCPNOECcZZetsEnLgG+JY4xoHatbFCnO88AIlVEAlrQ1JOiLc8UJ64jgiSXrtgj54OYZa4oYTzqxr6qx80XkN7XgbxlMhSLtREPEW5bxTW9Ylezs5Ak8USabCsXXtBT71kXM4q1g7NGD96HFe9crXcW1rk6Yc411BUzbkU0vtCnaH1xjuj/FmipkYxiNF78gd1PsThsUkdE3EiLquCVTaQP0S8oYK/cvZ/n8oKEG0RPJZpmtQMloEcs4LmD2MM/d621iSrsTXnsYUQfkbSUpbI0QUHkhvEDZByIYoSijLMjzmQhC33nRxHFPWFUpGAZXsdKjreo5mBbTN0uBRxjLc3OEt3/hGHnj76xnueg4v1Xyu9zHG4ymuP0DjGQ/30RJ6SZe0t8Tm1jVOnDzIsWNr1NUus9GqKArKXJBPpkRR9CLREf6GICggsVULcbev0bQrwmBr0O/0WewfnIe2t70iyiqft6StCVZJo9EI5ySmcWRZl0cefYhXvfLVrKysBG5NkwfxjRJMxkMAdrZD+z1NUxYWFhgVZVtIdhmNRpw7d47VtQNIHVqiztacPz8FoK5rxuMxWRbOrSQQdJvWBiRNM6TQjMdTlA5+kAuLWbg2VUEca6z1ZGmHqgrXEq0YjfZJYk2adtjf3+fShcskaURdt3FfWYZzkHXCClbrOFhLyQhfW1xrOeWBPC/w3nP+bDUf9AHe9JbX8fXf8F5+6qd+ik/8yW+Tpl0m44L+AP7jL/9b/suv/CZ/+sHn+LZv/R6e/NzDXL96hlsOH+V73ve9/PKvPB2UwZUFYqIowxrHK+5Y53c+9Bk+/B9+k2//se8jX09ZWT6EPlwjUVy9ZlnoVizWguNrJ4m1JT7qwArKwtB4h0RhEkVz5wIXd07y+U9/lr39MzSyRxxJtK158GNnUMlVNA5X75BFFfs7knJ6D7PZ8MrlbcZLE5TUeF/gGkfTWDauX+Ts2bOMx2Nsi1JILXA2TGQL/QErK2u8/k33E/dyLl85y4H14yEtSmuMCa2aGVF8tom23V67cAx1VWGaGi88Vrg5hcKpQPKZ4TWPffHQfJ/DNROIl0BFcy6ZD5OnlJI8L5hOX4MQD4SWu7xRkHgEdeWpS8dTXxQobXG2g8dTVYbHP1HQNCehFeqNfcoHzkvURUf1yU28Xw1dD7eDZxvFraQq4uHnJ9jmOE59Jy8Ij/QeXzpkJ+VD/2UfXxkq8Z2cM4LmpmNwNcjqpk6vUAgnkFOJqEounz7DmeEul+yELNGsLXRZkh185XFaUwuLkD6I4mJNLlw4g94SxSC8wQuJE+DQmMaivMc5iZcWJSc0hGJGCgeqoCosqdRMmxIrHGNR0U8yXCmo9is+tnOWqRe8YrAAjABPYw3/51MG52cKbj2vEFX7V3NTHOdf1gN7afnoYY5G/+VbUN76tqCzrl0ctJPcHKG86e+z+3Tesv4L6lfhQ2b6bN//l2+TvPGBTsgJv3lf54DJDLlUL3oewgerm35+A+EM45AH9Hz+836m5p0BK76dF2fi1Zk9lpyrj5WKWguwiJlYQylFZRpUC2SE17d71LagvQvtYaEFxroWTQ3PmFIKJ0KbePZdQgjUS8AfxU2AkLtR/MzOZ+iCa5RK2uOVRC0tSqs0tJh9ilARUSSwJkYpPVfVa6EwwuCQpPR5xR33UVQ5070hZSkZbVxkPMqJoz7dTkLdNAiVIGxNXuYsd1MqUxDrYBE13g92eHZYIJyn1x8QZ4CoiJJuWMj6OHReRImuo5D+Y11bHAYLQxTYuglFp1RYoUmiDssLEdOoRTnZo5v2GU92QZU0rmBvb4/1tSOk0TrnL23S687qFsni4vK826LEjENq6XUHZHHGaDTBexMKVC1QMsLVE7aHO/NCNIoUK4tdRksCpaZorVBKcunqk6hYUlYTnn22pKklxaTGK0UUg/MNSqYk8YBOd0DSGZAu9Eh1n+SWCGOGJKlC2ATrAWHQUUaka7q9hN3dgmDHZ/hLH/ibtq+goJyt3mywKHEO58P/Zw9KKIwC39FZN3+P1hqtmZNxgxo5tFx1ktIYg2zbrEop0lRRFaG13dQWZ8E7i9bRjeKhncyste0gUKNURt2Mcb4hjgdYa3nFvXfxyCOPcuKee8mv5CysaxbXjjDe30b6hrXVI+wVQ5rc4+uCUuS8+lWv4B//k3/I1Su0FzbwRjvdlG4nPDQhfixqUR3XRt2F1vvFy5dYWV6jKeu5EGY8HbGw0Gc0GgGSujLUtWE43KPTTWlqS2MqptMpg8EAgDhK2dvbC2KXxZTpNGdhcZFbT93BU089M+cbOufmxrHGmFBgExDcbreLunadpqxIkqS9HjFxt8e4yDHOkqYpTVXjW0GPUgmLK92AbrU8mjS7kdUbjl1y4OhB6jpwTUwdrIoWO4Mg4oljJpOQmz4tGqwt0TqjKWqK8RAQNL4hn0zwQmAMRLKct7vD6kxSlmVo42pB3TRUdTAEFkLgLDx/OsG5G0KJRx55lMbUvP71r+fxx5+kKi3LKwsU5T7f+z0/zitfdT8LqzG/+oF/T75fgI74qq++m43tXaq6aVWrGuc0QiiUVvSXltk8+wx5d8TW1RfornWYNtvE/YRpXrNz7XmeP2sQ8eNMiwpsRTfTCBWTJAtIAYNOxsL6gEhMiHLPxsYlfKpwl6+w50uW+h3+97/3w1S14aGHPscLz1lOP/M02xuCsgqCFrzn6Sd3kHqDosjRWhJpTVEU7O9cJssykjgj6obF1uw+1VHI1b1w4QIvnH+er/v6t3Ho8FFGo3163S440LKDsxbvXjwkzJTipo0/NbULBs34diAKiJaUs1bczTP5jb+LGVeMm+Z7wfx5ljKI9abTPFiRSHHThC25UVSGBS2e1udwxjVqDZPb8xS4bQIdRSSZIh105wWHUgozGzfaRaFy4OoGaxqoGrRxREmMShJkIig7HSbjCaYs5sekZ8XPbEWPB2tJsOxmkk/tPA0yJokUV4oJL0x3ONlZ5pYoI63DM4tQCB32NfYho1oqEdSzSlLWFRDRGE9EjIpTnA/BCW7W3hQKZIz3GlnXlKYizWKqOqehYWAlXkgeuXKaPAoG43aat1co7HdlNEPnOXj4CBfOX6at19oukW+LF3/TTPA/QiVfsnlQypN1MhYWFrh+/fqfe0m4HwIw4Vr+q1I3ouYWBgusra9z9uzzOOfJsozDhw/Px4vxeMze7m4QOLRKYykFP/ezP8dtt9/On37kD/jVf/Mz81tycVmzo8SLFqNzpS030KObfzcrDGeF38zz8Ma/4+BPCa1biZj7wM7+DJxwgRD6Bp1EBONwCLoCR3BFcbNWOMEmTwsdeHvOBcDft9xPFwQhoW1+o3idFYvWWrzxQbHbonQzIcmsgwjMAZJQrMsW3ZuplWfXMiBtwjq0CNxCa2u0mB23J040sb7R6Qi8+ZmKPIhY08STxQnFyDPNLdNdR+23KMow9kuVY5uKvPA04wLhJWk3RRKxeW0bKWL2h5tU3pJGMV4rqlrQSxZIdJ9uOiFKOuE+Ng6hInSc0pNgTBm6o1Ji8dgmtM6dCItwKWJiLblw/kl2JiPG4wmulhhbs7W70XYPO3Nz+qr6zDylSbRm+zciSiVZPCu+Hdv7Y7rdPv1uD9tUdLsZtHSJpmlA+xYZFRRFhSdc7+2NjNH4TWG5oiSv/qr7iNQWAsNCP8Y3nmpimZYljXFMixKtU5rGYKqKcremzCvyeIe6sEitGI9yRpOmNQZyPPDa16EWDuKNRNge1tQotfgVNRe+goJyxvyIEMRh8Pag5Gw1Fgo/6z1O3GiLC+/xwlOUAdWbJWJUTYP3IlgD0a4McUwmE4pCYJsg83fGECUR1prgNNkKN2YPMe174yihbqrgwWYgThRFWXH+/GXe9p5v4rc+8GvUV0q+9ye+lz/93X3q2tDsbHNub4LqepbWF0iTHr3eIt/7fd/DI48+xrPPjphOs/mxPPH4Uwz31byIq00zL7JmiSHG1GxubdDppMzMZEMbOhRZ1jbtAx+QocaWGBsShKIkoj9YAQiiGSFZWe0jxACcYGFxhbo27O+PWF1dn/vAWWupW1Pr2Y08s22qq5rpdBxalWVBWVRz+L0sS2hVwXEEk8nMrDjwS+I4pixr6qZBibAy3traotfrBF5jFYyOjbPgg1gqkjceqOAzGlZo+XhKr9dFazmH8oPNRlDVSx3PZw9Jy++UgQMYRWFwnSXABFSnwTvBeHwA71fm90JZTNjZ2qWpcr75G7+Oz3zms2xsbBFHPfCKLzz8aZJU4ozn5JEjfO3XvIW77jnII4/tE8WBkO2cR2mFamMMRdTjfX/rJ3jF6Y8j9iQHl26nP3DU+ZR6oSFPr2OmGbHr0xQRXsSUucVUhuH0PE2UsXGt4vwLFYIG10npZQNW5DLlgkOaKZu156EnN7nwwjWKiePyhiXqrtNZEsjtODBZvKe7kKLjlLQnUSJYKY3GG8Rx4P40zY17cqbyb+oGT0XaiYCIP/3jT3PX3bdy9JYD7XXyxDqIsPB2XvF5D0VV0XjAa4yFxgiUznA+QYowURprWVjSLCw2jPZv2Oq0T+e86rh5kvZAy3dv206acTEOXyzan4XeCzKEN4OQYSE7h2ZuICvOC+RsUm7PlTMNk+t7FFiEkgjVihRkQAPaf+CFR7XZ58J5tNL4SFJYixtP20VahbH2RYPrjCsoWnqGs5a+2GHQzfmj8Qbdg6uo2lI0NQc6K5R1w9l8l1SucFu2gIwkIskQyFAQ2NClmVmFGOFQrgKliNCkUUztCmKl6IjQbrVNWFQLWkVrTxFZj5mWJHEPFUf4qWVXGM4mFalf4S3veg2vzreQ40fnHntVY/BKtcd000JACP71v/7X/MzP/AvOnn2Bm0vI2fLgpRzFb/mWb2E8HvPRj370xvkSQQCCaBcINxET52OBtbzhDW/gZ/7Fv+D7/tpf49y5c4DgB37gB3j3u9+NMYZ/9I/+EU899fR8jLx27Rog+NEf/Zu8/vWvRwjBRz7yEX71V/8b3nt+4wO/zbGjRzi0uvqi4tda1w43s6LqRtF4M5J6M8Vrts0KsJnI7MZnWrRWOGeYsS397H/tfa51Ni/cZsc++75ZUon3vrUlmlHL1Pz3QgiwM3GsnL93VkRKwvg7n6+FIG49osNCqhWsSf+iQlIIQaQk1pt5Aenc7Hm9cfxaxUTRDT9oKQEZgCLnHEliqfMJWXIMIUO73zYGrQMKWzuJcwVSZvSzmPHwMvujDcZ7Y+pakXnPQqdD0zRYW5NpF9TXE4eqYHP/KuPJXmhnu5pOBGW+S5Ik6GbC3mTCpCypfAj4KPJRoMaNxlhRoqc7OFeiVHj2TdWEboszaKUwtUbEBUI7rm+cIZvWCNMh1glpplhdHNDr9VhY7JPnQa8ghQ4emk2Yy4LuICLPc6IoCsXnZEqSJAz6HUASScXgwEHwnrpqcFbSX14gjmuqqsI4T7K2ivOCoihJ6HFOBXqEAB57+BhCHJtfmThK2qGzFV3h2xCVIKbSUiAzCyjiXqBBDKKG3qJvhbOSKxcVRdVgzLHgtuPsTYk6X972FftQNk2N862P3E0/n92YsxYY3IQE4NDS0emGtqgTsi16gqmu9cF2wdvggZSmXZrC4n1or4YLpOfQf5KEJJkZ4lkUBU1t0bFAOkUsU+q6RGpNvzdgeWWRsw9f5sf/zg9x5PgpNq+Pef3b38rLX3WE4wcPcdvJY5Q2R4oEJUPWqsCzurKMEMW8zeqVxhBMidOsg3Ih5cYYM+ceah1x6ODx+crZGo+pA4+jmFqs1S2q2VBVIW1ka2OLqirmgplZ4RQsltpIKdmgdTxH7GYD3TwdBU9VVcGeo1XBv2iw0aEI66RhQKvrel44Nk1DHKlAJVCKSCfkeR64HE0dTGYllGXJeDxCiKWAHrepKUopFBECsC7461VleJikNwhhyHopxtfhIfYO05j5Sk/IgG4aEwrkNE2hjRWThMLW+VZ4lSXz7NumaSjrxRe30tQBnFrh0vUxQkgOn3obW+MvMbx8BfDoaIliqvBeYeURHnlqwp99+ouYWrC3G+E9LTppECLHOssTT+wwOPgMuh5wcfs6/++//mkWFw6itGawukCiY7p90Fmf/sKAbizo9zKiNEFnq3SVJEmnqLhLWTUUo5xyWnLm4lnGOzknT91FPdnjVz/8n5mM96mrMUJWCOVoylWa6g5o23x5DrHLUHRwUlJMa6bTAbFaozGeNAmJFjM0BSnbCDwo8wlaRXhveerJKWmWUlUR3d4CZdtinORpUM8SXBZ6iyukvTKkPOkIZy1VXZNlwXYoVrqdKODrvsOxtx1CCERbMMzU1sJ7kIKmtqhWfOCamqYu0VKQdnv8/of/EGfCwlMqQZLGNNaTT3OUNKA1/TRFOMPeaIQUR7Hm7egk3HsHVr7E1Q2BMz0EkMmDvKr3DKtyjHCOwuaICKppjmwcKRGGmoYaK2Jq73AyFK0WgVcqqGbxxJHAxqHwnG0Wj4s0zlh0A95ULHemPOpy6rVFBoVjfXkRtdLl+pnL4CVJmnG9mbCSxJhhQe4N2kEkg/G1cB6BD4pcGRiqRSta9N6jROuC4GUbrxeeYevDM99IR6LiIMqQEm0sIonYGu4SLXY4Mlhg7cAB5LUSJqLN2XY3FX6CI4cPkKSBy729vQVAFMUcO3YsLCCM4dq1azhnOXXqVobDIf1+D+/h8uXLfPCDH0RrzdGjR+ddlI2NjTnaffBgWMjkec7Ozk4YD32YAL/7u76Lp556qvV89Jw6dYo3velNvO997+P+++/nx3/8x/nrf/2Hb5qZBHfceQf33PNy3ve+9xFFEe9///v5/d//A4bDfZ544knW1tb+XFFoTENZ5jc6PO3C5GbU7kUqZ8+LCr/5z28qCL33VHXwI46imGCIJZAt509KSWNLoF3AaNmON22rW8o26cQjpZ4jfELcoFYF4/CZKh28d/NWu/cCJwRSi3mBN5+HjQm0klgh/QzBnRWsQZQshED5dH4sUv75ore2JUpF+PazTbv+bGywAhS1xLkYY4MCGq8RSuKFoTYW4TvEaoAtY9I4I0s13q0g64QpClPt0zR14CJaj3MR3XTAdnmdmpJpXjBYXmEyGpN2MobD4PAyno5ZW1ujLs7za7/6L1FREHOWxQhvbOgsRhZXGpJY0DSepqwC8qsiJCFkIIqjMF5ZTxIJel1JL+2SRB2MLYibAcWkYmwajFFMxlOkEtR1yGEv6pzppCBLOsGL2Qi8TNFobAX9KKUsyzCuVIJer0sjGqqqoZ5W2DrFunCdi6lBSk8n6dNL0rCwnlFRVECAjXVIIWmaGghpgUKERXVTN0ih0Eq3qn2BsTVKhra51hqhPFEUxpEir8LftaJpHMoFgfANh4a/fPuKOJSivTln3nZCBF7UnLgsZyhA+Lm3Qf3kPZimRItQxed53raSA1JmWw5VEinq2lDmFc5pirycy+1nD75Qcv7weywzPzGlPdZotPZ4LEKmFFXJ5SvnyMcV7/3m7+Qbvu89/NJ//iDVtOCrXncfx4+vc/nqiI1PXqL2U5xzlGVJt9tBCM+1a4JpvtYiNp4zzz3P/m5AEGarDyEEtXGodgV6owXv22KrRVNFPW8lzIrPGbqYpik6CqvBpmlI06wlx4r5AC5VBJTzRIqZvU9RF9R1Tb/TbfkxHhVJkGKOmoYBTgUjeKWQhEI8TVMcoSBs6pLeIKy6kjQl63YCXG9tK3pKqJqGw8dOzle0Wdado41Ij7dBRRdFCTNlodY6tAR1BLjWxkEFXmTblrq5JaS1DglH7bFHSTgGb12LvAloLS2apqFpcqQMXDCA50+vcv4F8P7AjXtG3Mnaetue9K5FxRx7O4bd7VkyDtT1jQEWMiDc408/oXn+sWfoRyk1jih5F5PdsN+714OZd/Ch81g7bT9vPwj6RIuASYWQe0Q6CtdFRujoKMI7nnrcUjYDqvIBrKkCV7RFBrzXWJPNn8Gd669BStPyooIbAsZiHCAERXFz6gPA7NyGVWlV35hYv/SYbicekJFGtYQ572bv9Dx7fsjy1KO1RSIoq8DpXVtfB8A0VbhWyU1GvjdNQEKCFpLUpzjnkXGMISAbqiMweUOSZWSpo+E6Ks2JdESShIVkpC0vf/khqrzg6s42w+mUlZUlYjfFVT2saVCtUKKmQCnPLC5ECUNHbCKrixiliYWGRpClEY0sqao9vHAkiSatCyIVUduQXKMl1MrRKE8cKabao7wLecrt5hxIF4VCMBIo55l4z6XIknrNW9/xdo7ffpTf+50P8uq3vI6HH/wcSSEZa0spYDnOUNIGz0mgpEEQo7wDG9AdLzxoSZwm8/FFMrN/ca2gI9i2iUiRWolGUgsRVPgOxrFk19Z0fJdru1c4d+4ct3aTsKBvC48ZUvnOd76Tt73t7TjnePrpp/mFX/gFAN773vdy5513EkURP/3TP01d16yvr/ETP/H3EAJGozE/+7M/y8LiAt/y3m/Be88HPvAB/uk//aecOXOG++67jw9/+MM8/vjjXLp0Gecdx44eZWdnF2sdhw8f4pu+6Zv44Ac/yLd+67cy41G+9a1v5cEHHyQvCi5duoTWmtXVFabTsOA9fvwYa6urpGlCFMUhpauuKYoc6xxRHBEWYy+ey9I0od/vzuc1YI50v0hk6m6M5bPt5vH9pWO+VCHxLYpa/qWbtYsV3guSpEUobYjLUVIiW16lN37OzQ9c+xlgE/ZJq9CpkfKmffQeKdX8ebM3+R0C7VwTjjKorD2+tZ6xzQ3RrG1fK+WN1vfNRfZsX4RQOOPbsTuooZNZS1cLvLLsTfZJuh2q2qF1ihKSvNhHRhLlt0AqvJuSdSBLUhpTk2URXu+hmwRrWv4pKdOJwZqSA6td8sk17HgLN9lAu4Ysajj+8lswLriOJJ0MV9c4UYWgE1dzdGWB5cECK4tL7A+3uLa1xcVzBdaHOVKhqeuCOA4G/d7naN2nyA3ra0e4574Bw/0x04lB1iEauCgq8jxneXmBbrelicleS9EbEB+ISZKM4TA4mwgXCuM4jiiKgt5Sh163S1GMwXsiFaPSuAWnGjSOpi6IIoV0FjuZstxRLPaX2R8FAWBw1VCkOg6LDBnuJddSKG44FrTzpgr3jVYy+Lci2hTDNprXNCAcSSxQSmMaOa9RZvPBl7N9RcbmHjBljbGTebtitklpQYp2RRrQMS0VSaxI04hId+n1U+q6nseq2XZVE1JwXJu2IBGEyvhGCzcgoioKK71ZsSVEaJkLHMY4dKSoTRgk66YhSRKuXdviQx/6EO98y+t56NEX+Mj7P8mJtYzx1Yt84skvQubo6AwnQsxiWZZMi9CmrpoMWMH5gPDk4zGTuAwXwFmsCWq6qqoQUhPH4XRKJRCE1pxORPAMLKHX69M0lt5c0GOxth1EBERaY4sC4xVZbzFcIB0sC6yDSIU2cpIk80JtrSV/62S2CtZzsVTUWpwIIdA6ZHDj/Pzns+i7EA1mb8pmD91A2xaTs88L1zkMRq79rigKA/jMSDuS4TPmr4/ChCXczBEgXHMpYlyr4MY7dBuz2RjHJDdUxrStmYBMplpiuYG6hlvSM60cM784gOHwxoQftrA/LbbAn2Psz3QG3uJ9ftNnK4QI19faHqUz1FVo83mj52KAmz9IvORjAw9tFlEaHu4GyGnmvGNEKHStNS0nKuUv3Xx7BN616lEfuHjt99u24FcztM3adnUb3BZoF3DGmPb4Ba62WFG3x36jybV5/lmund/ACo1ovUmHoz3OpymegFDiLUomc/rEzeiOlEFNao1GoOb3lRDBrDe0jTwHDqyTZCnFeI9e1iHPcwaDAXfceYI0kZw6cRtf+PwXub61zfb2NitLa1wrArLjG0OapSjlg3VHu+9OSurVRbpHOhw/cJyLZ84jao8pC7pygbou8bWjzEuK2FHXFrzCCc/IlCjn0EJTYMEoKi+w/sZFjoWiaQxOh3b3YpLw/GRE2u9zdHmd65ev8PL77uaF58+xsL7Ou972Vn7nw3+IjSLKSYOSGUJrnAfjCd0a6vAzJRFa09jQisOBsqB0irU+8AylDihXuzB3xiCVCrwr68h0SifKOFeP2LYV77r/NTz73HN0+gnXr1/iFqle1DY9deutvOtd7+IHf/B/ZjqdcPDggTAhtr//yZ/8Sd72trfx3d/93fzsz/0s586d4yd/8ic5d+4cb3jDG/j2b/92fvEXfzGcmzhmf38fCCDCD/3QX6fb6/I//fAP88gjj/CpT32KsqyIIo0xwcblzjvv5Jf+w3/g277t2+bn+MiRI3zmM5+Z/3tre5vV1VV2d89w/vwFrLPs7O5y+vRpPvSh30Frzc/93M9RliFLWsmWyvASdq9SirhFT+e8QSCacxtfXEzNiyrsSwqsFxdcxpi5EAMIqWrWo3VQf9dVSBESetapcnNwIDgfiDbE44avpfe2taQCZ2cdqZkI6cb+Ce+J4jAez+zfQurcze389ljb8zIriLXWqE6GsyUzCoecnbv54sXR72ZMp2OkkvT7Xap6ymi0y3C4R5IkbO/scOyAR5iL0AxxNnzHclcymQ5JokVUWhMJizU9FrM+qtHoaMR0b8rUV3inyKIF8tLQmAJrDcI7PvPpz9NVE0bFiE6nQ9JJuLp5mW63g/GO6fVLrGQLDJY7SCE5dugEB1b7mLKmKQxL/VWkcnzenEYlKb6eBEuyOMNT4SxEaYz4/3L239G2bfddJ/iZYeW1w4k3vaxo6SlZtrFk7LLBgI0DMpgyVBN60DZQLhgDRkFVUTVIxSig26OLatNNt2lCmWAbupqmCHYZ29jGQZKTLMmyFV6679377r3nnrDTyjP0H3Ptfc69kszrXtIb95x9dlhrr7Xm/M3v7xukxZqernZslpKHrxuKokTgeXCnYW9vSqIkzboln5REOsMaSRznRDNDW9f0bc3B3nQs0A33H9xnqgpms9n4ffZEo7I+zzKEUEQ6JY46rDWU5RMUWYLwgDMI6/nK52P6QWGM5/6Dh2TZDI8MaLiwTMoU5xyz6ZR6UzG0HXmZYYzFKwVSYcxA33msNSRZikAymIHVesnR8S0iHVrwk7JkvdoQJ5L/X7Jy3nBBGcUKPFy/PifLL4uaPM9DZFoUhvE0TUE4iqIgT7PRlzFmGCIODo+o26DoDcIRGzKCu44oGvkcxtF1A8G+JKirkiRmGMxo6jlSSMf2966KRtIMNWVS4iwgarrO8iVvfx//h+/8Tn7xo5/k+//J32F9+gIf+qPPczgtSKYlXgy07UAkQKCYFCWbTUWRTmhVyEqNlMYj2D+6wc1b40puLMa2xVmcRrtBJaxCAS/ROsL7YPXhRYSUW2Nv0NGo0JQq5NRqvVOvb1eqWysd5014zAYVdpZlO+GFcRYnLhHSx1syANaGQXv7d2stBpBS0RmDlin1KNwJz4HgQ+lxvdm1uIW4JHU742j7BqEkrgmrHSMC+dgYE5DKNiSOqG0BxXZ/glJfSBBeMNgR6QzjJlqrHRIZRSqsypwCcTkQIjyH13u07jBXqR7bj/HbXx4XD/jP+/FRBOLRa1+I0P7cZr2GNKUrf//8dx2nr3A8QoQkBKUUgxmQzu1EKhDaVNt93ponP74PV3fOjV5mW2QkpHoEWoW1gb+aJAnOWpTWDENP27SIEREOxX6gcYS6312qPbeIDB6lzrHdSwjhiXUaVJAklNIhXFDXZ1Iw9D1KVmG1O1bSYkvLAJySDG70oBypDkIohBEk3gSe70LRb6rgViBDvvyTT97i5P4D4kTw3LNv49O//mt84Ku+mp95cIJpG5JoStV7kBKhNG6oyOIZTR8WDlLBIDUPHix56f4ncU2D22yIvUBohVcSLxw2c2grSSJNOwzIzrBvFMIrhjhhwKGNxccKf2XIbI3BSU8iBLGUNHLgJBPs5RkXmwXnZw/54OqD3Hr2Tbz3vV/KPMlJsox+qOic4VwPWCUQNjTeUwRWRTg74JEY02LHMUAYhxQe3xscfpfR7o1BeI8SEh3l9Hag8uCikJ7V+55F34FUPPvUM3z7t/1ePvKrHyVaL1C1ZGu87T28773v4yMf+SibzQYhgtBlOi0B+Pmf/3lu336Fn/3Zn+X97/9S7r3+OtevX+fP/tk/y97eHkIIqqp6lH4ybv/+3//73c//7J/9M+7du4cx2zzlUNB+93d/N9/3fd/3yOvlyGs9ODjgmaefRmtNlmXjhGx3d9l8Frw+f8+HPkSaJPztv/23+aVf+iXu3r3L+dk5Sio+bxuRG+9CzNz2djM7XuSlXVDY0UtvwS0XePvdXeUiKl1ghqum3x6tJd4FH2bhk/He9eiRRw+h6O57A6Ibkb9LQRwCtAqL8SyOdp8lZbDz2Y7pAZXqUdLsqGHe+x2qbu1AmgVDojCubtEpiKLQsbI23hWd28I68FvDPifSkUwKwOFMRyoV6cE+R/t7eG959qk5eXo0ClaOcTbCGY/yFjkNi9o7J69SlhPiaU+RrNEZvPl4n+7pgnrjeeX25/B2gdMRDzYrsqzg7W9+E2kKvfU8/6Y3MwwDX/H+r+Dnfu7nuHZ0TBxrptMpe/tHeAxSRAGakj1VFfLX4yLmwbnC9AaLQekYSQTC0DQ9cTTBuQZrLEmcoZVgeX6PtlpxME+5WC3ZmxYYUyG8Yn9+jXicLyd7BV3XICVMjvaI4xhjDPv7+/RDw7VrexjT450KMY7OIfFIEa6PLZdd6f1d981JzXw6IxlpNbHSTCYCLVPe3kywrmewls06aBl01KFUzLS0bDY9WVKgZExdt0gJ5xcn1GrgmadvAZqqrVEyp207jq9pqupljEg4fuKI1eoerTtHqYw4e5wX/8W3N1xQFnmB9563ve3N7O/f3LVKoigK3D8T0EI1HnyRZeHxbmBTd1TdmulsTt8Hj8Q3v/WtvPbqC7SNIYpi3JjDWxQFWZawGRWMwzCAkFjv8YNly9nrtiu98cK3vieOirHIkkQ+xcmBxfIh/+yH/hc++pF7VGe/wB/8Ix/ifb/9d9KfxhwVksiBzkI7zAmL8/2OQPzgDvzix+/tKoXn3v4EN2/Jsc09EMcx0UiiHky/I5Zv7XW2rvjhxhQj92RsV/khmD57GKyHKGLwnnaweCcJfnCStgktbpygarvRn1Gx3nQIraj74GspvBgLRrErHrct5NDKCIPwFhlVMhSsxgcz2LpvSZIkKJ1VEERtByqlFEMbjndHxvYyUM59UKb6sW0WSTnC6wpvBoTUAWHxQVhhXBjspJZsvUiF9AiCLdR2RSwEOOsw5rItF1TLw85sXQjB/r7nre+IuHfHBJFA6O1e/rt97NEqcwsfjg85jOlYL1/ksph7M0KEyXQyEygZbI+c98ReImRwOAjH5WGMPbzcgmWIH9FP5wPJHenJnCXBj7wsRa0kVqsrIoVLuw6P3VlPKb2kKH8mnFsHbdvRth06joiThK7rON6b8WVf/mW07X1+7Ef+HQj4mv/kt3L68AG//usvMZ3uYdxA37cIL8iLhEiD9dGILgbuljE9koe7ydiYFhUpNs0qKBrjKc44mqYh0preqLCoGblYfhSNYB1SQuQtTR0Wkw4RUnXimLrpAiqzeIDtWtIopq4qDo8PqJsVdV3zFV/+1Tg8q77hxTsvcvTUIScnJyRMgym0jhj6junRhP5s2BXcxkMnFPPogFtKIicWe82MPoCCJIrBtkhh+LUXPsMq1bzrne/E3D5h9drrRFrj2p4o1lgVFnJ7xWx3hnsNNo1J2gGpLLfNEutDJnjXrpHTks/du8v7nn8fmphPv/A5nrp1jU+/8GlskfNQWbI4Qw0RK6fojMMKRz/0ZHFG2w3IOMIpSWMHnPNoY0bD+HAPAqFItx4tJYPQqEShOsu+iEi9QjUDx9N9/s0P/zD/9kd+jOe/9K1cS2LMyuzi6RjRdKUkaRLT9T3TaUnbtmMyjidJUrx3SKmI45jv+q7v4mMf+xj/4B/8A5555hn+8l/+y5ffTd8zn88BaLt2Fwm6W/SOHLDtauxtb3sbf+Nv/A0A9vf3+Vt/62/xZ/7Mn+Gll17Ce88rr9zmmWeepiwKHj58iNIaG7yu+Pqv/3o++9nPBl513/PxT3yCt7/97bx+7x5t14Qs9cc25/1YTMKj5s3bNnDEbl2+C10Ix7BFH5UaAzZsQPbjJMaxoWlrJpMi2OP5kG6yXoX2p1bb5KqxmLMW5zxpkXI0naLsHlrHAazQCdEYGqJU+FzrDHYwOw781qt3i3AKul0hulMOu+DZCGDNpS/iNjrSx2EcsoPdGZ9778FbBIJIRiRRgogVTV/vKE9CJEipaTYdWZYRaQ0+YrWUSFXSdkvwltXiHKh55aXPMS0VOo5ZnPQgJV/yzLMczDP6VUOWRBzMZ5yfP4uUMJvNiKKMLM5wxjIvi92CrjeWYbC85T/9/ZRliVKKs4sLFm1HVS3J8wlaKpxvKPemdNYS5RHDg8DVLyc51SgC9NaTpjld51CpQsuEalVzcDDjK778rdy/e588Lnjv88+xrjZ4J8jLgqIo2dR1GAOTmNXKUZQpZnCsVhVxkrLeLEiSlNl0j/Pzc5TqKPIEaz3Xj2/QN/WuqyOEomtqLi4uAm1CC5r1BSQRWklQEWdnmjQxONtjup66q9k72OfsdIFoEtJoYOgtQmmUF7i+QkWeTKcc7x2ikph1VROlEuUd5+fnLFdrbjx5nU23YZIYTl+/i1IRs6IEIbg4XX7e/fPFtjeOUKYp3nn2n3gLN28+ZjGAY3AJyShO2aIurXMMDFhrWS03+HQP7H288eSTFKmnJFGFlQPxyA8xvmYwGk+HHUAlPV2vkfIyO3O7sjRDMN60zmIGQRQPY854hDU1eRFxcvoyn/mXvwHSM5nu8+M/+lH+zb/6MEplRFFCmuaBYzgVJHFGkmRjoRjTVhPOzp8INw/wC798m+v3QztHq9CmyLKMIkvQyhNHaYhWnM5J0zy0ZKuKzWYToHsXBh7rtzfsFSTRXbY4BASvv7GocGO8nRACVLAAcd4i3JWVqRTjYC9QKr4cFGCHduJBXYmE2ra4nXPEo8G8Vo8q/3ZcISkY3BV17ZX/iZEkHNSpQQSEUGMrOqCxToRBWEo5DuJjPSc0W6hMIcA63JXCTIjAUfECuqEPqjUEHhfU2JHg9/6hmPOTKBRsX2Dzu+Jyt/uPbALByYOBn/iRBgEMA+A9Og7fxdf9roi3vGnK6bnhIx/+D7z+6mtEUUaRzRAKrDUYY1mNk4ZSEdZdpsEgBZFQ1G0QXr3/y95PtVjRtS2L5ZLzs4ek5SwYmvc9eRqPLUxL29V0XY91A1G0BGcJ3FRF49eo2KGjgIJnmeDd732GO3c/zG//bb+DT30qcHaeejLjmTc9w8Pzz3Bx8Tlm5QTTr0FAFJV4b4mTwBs2JviixSNX2owqYmcVTlq8cWRRjmss3npUHAVBWZThhgGlBEoHzrTddhGMpWNsJY4qU7zHm47I9KRxQm/7kBhHhLGG/aN9fvtv/1p+5md+HiNSfvmXP8aXv/8rODw85KWXXkBbjYjSgBibHh3FfOPX/Q7+/t//4ZEP6Km6lg9/9uPI+AxsgyCEAAAkUYxpG+SY8NPnGXMxpXn1dRIzIOYZsRfEUqPlWGQMltOm3V030yhD1AOD1Din6EURct739vnP//Sf4nMvvsDZ2QW9t/zAD/wgH/rQ7+WkfJ3IarTKsW6gtx1pHJGhiQbF4Fvywz164UmmMZHWJEZwo5ijrUeOSNm2BSmQ5OkkjLvOYEVLKwWx08TVgBWKRoDREX3dM5tI3vmmd9D+wk/txhTnwj38K7/yK3zbt30bTzx5C+eCUvXh6Sn44O5wfHzMjRs3SdOUJEkpipKLiwsAvumbvumRe2q9XlOWJWmacvPGTV6/+3o45zuKjdgNAh74U3/qT7Farei6ju/93u/lf/xbf4tXXnkFpTX/9X/1X/Ev/sW/4Mknn8QYw+npKfP5nNlshvfBq/fd7343UkrSJOVL3/c+/vW/+lcI4J/+43/MfD5Ha80HfutX8Zf/9HdxfvqQNIIhHfCixTuwNmR2I/pR4ABCyoAq+SAGbNsWJwacJNjMeIk1AzrR1PWGoekxQzCHX9cSqSzWG7wTDKajsWB72AKm1ni0TrHOk/UpJ0uD7wTT6T6TfI9pMWUwYREtiJAyQhFzcHCAVoq2DRSdLMt2FlLICX0/EEmFjgStCM/RUcLQO0S8DSLYEoAsyjMu0iUhm5pRaEpw5bAdxoZzF6t0TH0RrNZrhDDhOb7l9PSUqj1jcX6BswbXOTIVMckFe/OYdzx9k8G0nJ4/BCx9N3CQHTGN5kyfvMXh/gGbwdDYhDiWdF1PZwUqSzm9OOHO4gLrA3UgVkHYm01LlrdfIIoS8jynbdYkSUzrVrQ14xwd433M0DY82KxwIkfYDGHXoYsxzjNOOGRv0IVmcDVJlqKjnDw7YG96gNawX6Zjx3Wgr3tcbzk9PUWlMb0ZWG1akA3rTct0dkjf15iVYTY9orE9Tx4cMCnLkOY3GKTwHBzsc7HY0A5hHHvmLW+i7xzrVfCOdnjMYOmbAZVJ1uuaRAVhVZRMuX33grjImExKYtsxmJr1wxa/f0icQWUsv/rii1y7dZOHJw8Yug6MYVqk5EXCYVHS9EvKWYIjYb1cguuZTffojSHZv0wH+o9tb7igvDhb4ZzjX/2/PgbyHG+GcVIPE5xjNJjetghG8UNg7wqwMaZXPPPkMSf3JL/yC6+iZEokM6zpg7pShKJmcRGjpMZ5gW17EBFDb0bEbVRhAXYIRYqxdscdcdbRe0EUH9IPwdLj/e//EqJEB7GQ1oAkzWKUDko7KUAQoaRiG/YulWZDRBJvETPJ8dEeR0dutzJUhAQCY1qsVRjb0PYDZxeLUR11xRJCbXl1ASUT4/J8Gx6/RdXEF+Dl7Abh3bYVWlySp7fPZfcZ7B57vA3lRVihC3f5ukffn0c+Hx5VNj6umLxKUN8WoV9sf+AykWL8pB0FYMdL/AIkYKXUroO9ez8hcCIIPG7cevSYHz+GL9SKu7qfHkkcbXlSAXEcu0scHgniuOJtbz3m+Xf8Purqgqpa8y//5b/i7p173LgR8rZnc82DB3dxhtGFICJJEmId03VtUO16xYc/fBstFXkaE0URKqq4uLjNpCiQUlJVYX/DBObxKrRvLeBd8CDcXoOu7YmSlL5peevb38pLn/kcL99+lTe/6a286z3vBmDvxiGf+dQnuHXjGptVyKKO4zSo5JueIs8xfXt57nwoiAcnxus4QogavMMLQ9NviKMUoRR1W4FwNMM63DdeYrtAZZE+mC8HtDqId4wIEnDnXEjJUpr10BOhkdLh/BDoDl7zyst3ef6d7+Wzn32B1WrBm9/2Zp577jk++cmPIZXDMRAnMbP5jDTJg0Hwrr0Z7qGyKHD2nKNbT7C/d8gnP/kp8rykqWo+8MEP8NY3v5mmaThfnnL/9du8+OKL1FWIUFMxdL0FQrZ9GiuuH+/vrp+Luy+jEcziguuTI5btQBRbNpuan/jxn2e1XpMVkv39KUobdGSpfY31PTf2DkmdIFHQ1xVWgNDgVcrJ2VmgDESSVV8zn85waUIrwsLZi9C5MabHmA1u8zAQ/63CNAqTW5zoSTxEWUGfSJzvSSPF13zdN4AOaI6a67FtK9g/PODll1/mB3/wB/mrf/WvYK3lF37hF/m+7/t/ACF68dVXb6MjPbo9rPkn/+Qf8xf/4l/iW7/1W/nwhz+Mc2608QkL0jt37tC27djiNhhr+Y4/8Af4zKc/zc/93M/z+r17IzFE7ArX3b1LKOhefOEFPvLRj/KDP/iD9H3P3/ybf5PACdf86T/9p/lzf/7Pc+fuXZ577jn+0fd/PwA/+qM/ygsvvogA/vPv/m7qquHmPMWuznZMyt7e42wpqOoFSZxRljMsBB9UGRSwSqldcQkxi+X5riuXJvlOMNnUA4MNnoNJDEMfujZxlCBVEhbuImW9OSdSMUWRjara4C9b5JNR6W4oZ1Oc8azW5/TthqrahKJvFDFCy50HW/GRQYuY+Xyftu2RQmPdwOHBDZZnFc4K9vcPx8CIAa2C/d4wWKQYC7MkLOCyNMcMA2Bo69AVirQIYg0XB4swFYSWUoagiyQOCXVZnrBYnHL68D55dEapBjw92V5OqjU3jo/C/e4Sskzw1M1reAnFpGS1DjzGmzducnZ2xp37D/B+oG4Mg7Wkecb9Vz6DxWC9waCIdcRiEzjWD87vBteXocHVFdpnWOtYVxukKqi6lkVTcX6x4cmnD2iqlq5rUPsS0UsG0+1iiLMkwdsI00vSbM75sqUxgvXQszw9QUce7yVFltN1DW3XjG1uQV1XDM6ghGd/7wZJZjC2ousumE2POH94wo2jJzhdr1m1jjJzYUErJRf1PYahIUkysljxa7/+KWbzfZI8Q6qIvetPsjhfUuQ5OvKsFyucVIF3r+H+ay+R2ymdE0yymN5Ijp55itPTc05ef8jgPV4LPnfnBZb1koP9OYvlKb0+RHVZWGgoyWazQqsQfuJ8z+L1JQ7FYDafN3d+sU08Xhx80SfK/9LDJew/wkuhuBNi5HGNwoQrP2+fb51jNpuBd7RNG+KfRiUagpAT6xlv4pa2adgaGgdFt9995rZoDUbJl8WFH03Wtzyw0FqGa8fXUTratcK2x7F9D2D0sHuca/fo9u3/WcbNJ9SOT7NFCSAwbraw9dXHd8dvh/Fn9Ujxtd2uWgFdLShhLHjE5c+BCycv/+YvVdKPH98j77EtHLc8Onf5WVeLqy/02i9WYD7+/C9WuMkrX+zjQKL3fqfev7oJ/+hzHv9OH3n9G1SiXf3sq4Xwyd0l//Kf/yoAw+DBP08UTwHBh/5TzZNPiZCfPpLcb9445hc+8rP80A/9EEMXLIyuXbvGM888wyuvvIIxhs3m8kYUMiDbddPuUDslgvl8kmf4PqD7gzUjN3J0sttahOBHGoPdcWur1RopBJFOcHbg7W9/Kx/9xV/gLW97Kx/4qq/mo7/4C+R5ytNPP8nP/fRP8f73fzk/+7M/TxJnrNcVwgd0oyxL6maMME2CJZP3dkeb8H5U2eNxLqRcbCkcxoQAAmHY2V7pMQq1b1riUQHq9CUfK/C2wr16aSwt6PsGqcIYcPOJp/j0p36DN7/tLUynJd5I3vy2N/HLv/yLlOWcT/zKr/Le930Dq9U3YIzF2IFv+j2SH/pHP856rcdh4iZa/Vt+34e+gvP1hrt37/KZz3yGyWRGVVX8X//O/529vT3aPnhtKqW4OD/lzqsvo6UgSxKGriUvUtygyMuM2dHe7py+fu8OUaT5h3/37/PqZ18mmk3Bdtx64ik+97kXOTw+5nf/7t/NS6+8zNvf/g6KouCf/uMfoDp7yJGKENYSRwltZ3FJWMiuug3vfs97eOYtb6I1A+XeJPDbYk2IqpUhRSoOC4o0moAPySoqMkzTnNbDtVtP8Pf+b3+P//DvfzLE3XjP9YNr/PHv/pP8wA/8AE/de5HvfI/acRH/6u09Xnl4SjDuvryXlBIjZ/jzxxb3Bu+5OI4vx+Nt5wNwI6XmPzbe+HEluW3NbwvP7bj16Pxw+T4Cwf7BHsvFmuvTBNWsdkfxF/7o17N4+5swpuX4+DqrTQ3CkcXZ6NsbXdKqVMTFxYL9/X2ENzv/XSEEeV6yWCyIkoQkSYnigJzFUUaWTrDDgLUdWZ7uPGK3wRNaByeTIGxsg1COGu8EbdUH946iHL/DdORwtzRNaDELFE3To2QULGocOBMoQVmcUNVriqIg0hlaZSRxgZQDRT4jTSZIqXccfq2DiCqNUooigDNmCG4ciFH85jRe9WH+Q9ENBj2m+sSJGj2GHVplON9gbMPQGRCOzWbFMFh05LG2o1pfkKURTb0hVnp0i4A0m2P6LohZ/JZvHRLkIHSxDg4OMP3Acr0in5SoJEUpxb1798aOo6VuK/pBYLwhSjTWa4qJ4Fd+6dP863/xSxwc3qBqz+nHMAvEgBnCvFaWJYtVw1d91VfxFR+4jlae1bImzTM6oyjLkvV6zWw2YbNZjXnwsFxeIJWjyA4ZTIuQlljPwAm6fsXQNfgEnEnox+S6vek1yjxFSIcSOVV1lywtWa5X9KbGIpjNriHQ9EMHCiIZ0bcdXdeQTxL29vZYLWuytKTul9h+9BwdaoQcULFAioj1xhA7T54XFPmE1WK9uw71CLShhtHre6SXDSC05b/9s3/vC7f/HtveMEJ5cHiwQ9i26rJd3NXVbWwP71oa4xjgXEAX66oiThKSNButVrYGrZ6tj+NmE9SfWjIWiiMlbitZuFJU7jYfBhQ3EpC38Vzb4tJtLWOCSmJEYi733YSY+Ctj0vZv288JpOit+MN7NxYn2zQENw7EZseJeAS5Uxq5fecrA+Hl7n/xwXk7YErE2M67fA/4fPTw6t+uvsfn/X6l4H+8IN1u0o/t7fFv9rH32r5+Zzp+5bOFCKRx7xzmCxzvFp0VQmD74Tf9HhRXzrkQjxT+gZr4+df71Ulp25JXjz4BN351Qsorl+xofSW2/CWF8xIdSZwPXqnnq1N0nNI2BmcEWZpQrdf8+q/9Gk0TbLGyLKNpmtEfVGL7gcEaBqDMcoaho2kMgxsQ43XbNA1FWQKSwVl2CRPW0Y78YKFCIWbxmMGQJjm9HzASnnjuGd7yJe/g3r17PHHzFqZvufPKy6xWG4oi2KQMfU9ZZMGyKk+w3iBFwnq1oe1qijLwmLt2uBIgMFIr9Bjz5gXW9nhjA5dPSbo+JFD4xtJ1NVEcM6z6sbgL508qFcyRtw4DBGcIM7bdJnkRIkOjmCTPee97v5TXXrtNXqZ8+Od/iTTNSFMN0nDziWdY/prH2AElNF3dUZYT1usmNEZk4Gv+6x/7MTKlaKqK/UnO+eKMb/7W30OZZ3z2s59FRTpMmiIhyVKeftNzJFk6FmoOKQxCOdrBsDKXV15xeIs3Pfk0X/nBl3n5M3+fiZLUfc7FomX/8DrvfP495JNDfvqn/znnFx3PP/885+en/Lk//1+TOMn3/K3/ibqqiJRiaCsYDAfPPcfXfsd3hCxvGdFsGpI0oq43JDq0JJt2g9t4tMq5MIZyEsRzk/iI00WNdQN5UvG1X/0V3L/9cd7y7FM8fHhGlk54cPdjfNl7b/Jb3ncN7n4EIUax2NAxtpS4WpUJdoPvlXv3C96iX3Db0moe7xqIKz9fDryfP66Hse/KmH7F1sdzuV9fKM/bc7nQDsZMl0rvLMnp0pKiuMYwDEyLPVBgupZr127Q9z1lGQyfvYPZk3OapkPKjL359eBzLEPxOJvuY33gWTcbh45juq7j4nRJHMekaUzTrLGDJk1Dik5AbDdkWUa1WaJUaItHcUzd1HhnmE4KnAtCyK4Pgsm6bciKBOuCfVcc68AnbEc3jEjTDw4ZlagoJOosVqe7xV6SFNw78SiZkKXpSG0Q9EPoME4nB8SblOVyxWq1ZD7fI01KhmEgSSO8zQMHVMWUkznWheunbSze9/SuQ0vQcYTzlijZwxrP/uFRGBt6SRxL5PFAXW/w8xCpa+0QWs7KUlVr0lHUokawaTIXo+F3z7pzLM8roijG+4Su7kMRRMS6OidONB6DN5Iyy0nihHpjeeu1p/kNfZ8oimjbMEYgNW3fEmuBlhorwIuQ6R17zTufexsvfPrjXMsSnJEcIGjPLnh2b05b9RRRgVCSuq45OniCHosQEksaOqzW4lRPkpX07Zw4WdK3CjELsYl2WNINHd4a5pMJaTqlqoOeYTYrMcbQddVIb3G4QdOLijyPmU5KnIPV2QVSStrNKfPpDZzyGLOidYY8niIJ1AwlYkTmSZIcMwims33SLEIpwcnJabCqMsGDUgpJrCdgHdNpxhvd3nBBmWXlo2ONh5D1uLv1efwJV4cGpcaCwvkdEVx6ueuKh7bjdgUb4ty2qlc/pvIgHkUQrxpBbB+XUoZ27pW/OB8I69bu3nYsOK+81rP7vEe38PvRNcHBdbDbglFJFNuB0u/QHMbjuYqmhTQRh5NhAv1CKOQXQvauPi55FJm75D+5z0Mrr77uUfXy+LP94i3qq0iiEIItfnSVU7kt6Lz32HHQ3pnoPlb42e3nXDlPu31028/wo4E0nz8pfJHZ6+p+qyuTyRdGOwIa8shnP/4853a7veV+7orosYAKiLfEWcGwtjz97HP88T/5Rzl58Co/85O/SNd17O3NOTkJhu9NUwUDYG/RQgbUb1zodG0dVJjAZrPh+OCQb/7mb+b4+Jjv+Z7vwWN3k4BzjjTOSKMIoUdS/Uh3iLOUzhl6b5ke7PGBr/mtVFXFJ3/1k5ycnCDxPPnULfb3D+l7s2uVn1+cgfcsVxcg4OBwxjvf8wxKKR6enPLaa3cpigkAWkZY73YuAZEMKL0zhigOZvvCDxTTCcYInnvrW7n1xBP88i//Mu981/N86Zd+KbdfepmHJ2e8+uqrGGe5f/8+Dk+a56PAwOMZdguTxfKU973nbRSZ4slbN1iv19y4FXhzL77wMkIIrl3b41OfDAO4Fw5jKxBBoDCYAe8daZ7gvaXuPDrNeLhY8Du/8Rv5Y3/iT3L3/n32b97AS4HseoR01F1L3yhM3TL0C7TQ9J2nsUuEinaBBRDswuyiCnyjoxldV6E95KLky778qynLko/+9I/ygfc/yzd8wwd46aWXeN+XPM3Fyavcfvllvva3v5vJNEZ0Pb/6sU8xjeY8++Zr/OwP/d1d0lTT1XzZV74PSw9KMstnnL76gDzPiTLJ3iSjedgyL0v0xjMIQREpVvc/zVPXjvhrf/FPI/E0tqL1NfQRB/vvQf/yx2nvCbaj4ff84a/jv/mXH6FpHaenW+Nswd7ccHb+AKlkiEV2Hq2iK12YMQVm2y0SI3LpYTINqSLbsSMszMJk5b3fBSMEdfElT3zLSd6igKGg6oOv5s5TFawLqWnbe1Wp0bvR+VF04/nD3/Wd/Ov/z//GH3rTdd58/pP4sStjpSaOU5p6RPRyRVv1WBs8AMXYBhVCkCQJQz+w2TRMp3O61rBe1/RDSxQFT98oCQu0pukRXc+kCC4o6/WazcYGxwuj6Duz48VNp1PqusLaga4dI3wJPo9SaKTQdP3o5GF78jzBD5rBCZTKSJQiT4KryjAMTOcJ2IhJLvF2oBka1usKqRzd0OL9QN+uA1IpOow3eDx37tyjLEu897R9Q98ZVqsVk2lBZxRni3uhYGnD4sYYE9ro90Ro73tBMZ2E8U0YtJySpjlZlmBNjO0FSRTOTZyVrKsOnCXPc7SMcT4dTd49yisme0+FhVPfhTlm7CymGlAO03eUE4KYUziatiIWniht0W2DNS1ZBsXBHsMw0FQbtISu17x+9wKQ5HnBxaYHHFEUOjxBpa8Z2gEcSK84v79B2ZiD2R7LRYuSA3iD7XvSJEQkqkiRTAq8t8yiKW1bIUTEatNSFhKhE4YevNScPdDk5SHObWjb4JphfUhGWi3WWNEHoaoTVJvRrzrWaO1ou5o0SnDCUdVrVJTQd44kyRFSMFjHg9PXmU/2aPqWYrJHke0FAaZpA4JtPX01YGlJspzqtKJpGqRwTKYZVbdBRzFni1Pm8328ktw7OfmCc/AX2t5wQfmt3yFxTozmmVwiJ4z8NtuzdVR/dLJ2u0GimJV84md+jfnBAU+8+Vm6psdphbhi+ZIXGa+8cMq//Tf/K7NZhB3A4XCDGHl0wZbGj/F4zo4KWi9I4oiu79FK4bFY5+hbx2//HX+AJ597O6vVCqnk2F73+JEbHkQ3alekuCsFFkCsBIc3FFeAwPBcQTBv/wJIIVwWWd57lPzNC8irA+rVYk+OnMF+tNV5vMCSniDSuRJ3eXXbPqaFBO+3ns+X+xFO5vgdPHp8V/99vDW1a195jxQSd6UY/0II5nYW+IKf4fyOV7l97u49tt/FYxzPqxm8V4/z81DQL1ZAPlZo/2aoS9gnjfUNnjE5aExoeurpt/DBD36Q1XLgJ37iJxic2dkrbVX/N27e5OL07BGz+W2aUd00vPPd7+Iv/qW/wt27d5nP53z9N/4uPvrhnwfnSNOY48MjXnzxNt57EqWxeBgtg/q+pygmqLahWmz41V/8ZT734gu8+W1vZf/4iHe9+51UVUUel2zWQaE5DAN/7I/9MQ6vHYIIXM2nnnqOZ555JiCkRcFf/ot/hRc+81myLKfeBOGcH6/RSAXuURLFXCw33Lp1i//2r/6FIB4ynv2jY7Ks4Bt/37cTxynGGN75jvfsEq5msxnf+73fy0/++x9HjosxZzRpNEWTYZyla+Hw4Anu3nnAz/zMzzGZTPnqr/lKmqbhzu17FOke145uYG1LMJb3nJ1r2tbiEWMiUEKzKVFiH4fG+4H9g6fR6jm+5//0g0gdBW9bZzG1wPiOyaRgsThHCIfSEEWKs9Mzfsu7v5TX79/BDpdZ3u94x1t4/fw1Yp/xjd/4+xC94WD/gDTNGAZLXmQcHu1xfO2AJIl48rrinc+9n84N3Dp6kk3TkGcFkzzn+Xd9LbHyIDxlMQGhGIwLhbxUtH1D07UkUcx7PvBOnDMMvaBvLbMSVCQxg0EMgNcMruG1exbJQBwlDKbEUxInmocPLUenCZMtIAmcPdR81//+v+C1ewP/9ocz/Hhevuqrfp1/92P/hOnePut1xdBZDvaPMMaxWq3QWoYFhZJjHKrk+s2nmE73OTq+xvve916atuL6cUircYQCUmvN7du3geA1ub0fsizh4mLN4vyCw6MpWofC7979+yxX55xdrFAqQWpB39fs7e0hZcxyuWR/PiNOpqw3p7hBs2nOOV885NaTT3Jyeo83i0uvWGvA9JauHsjzlNX5AqlinBHggi9uswmdBTeEMSBLcppqA8JTb9ZoLamazWh5FvyU+75nMpmwXnU4K8miiMFYhO9JspihbVFSo6Tm4YMTsiJDolE6QgqNED15lgCSoTOkURkK367F9BFg2dKqIp2yWXdkeYJwEauLlijusX3IiE5TjdYTkjjwCr33rOoL6soxneRcLCvSLCaf5HQmJM9JIen9knQCKha0ZoOVG5wQeAVeBXCoGxYBoaWn6zvWp5ayzBG+pG3OKcoYe95hO4EiQgJ5NsUogXOGLNJ0r3fMJvs4Q7CQEwIV55ghWAdev34Ta4dd+IcxjkSWaOcRyEC9EZoyC+IsH3v2ygSExYqOroM0VxRzR1X3JLMZD05WSDVGQPce44O4M9YpURqRxgnWO3Q0MDjNZO8pjBVUrsMnOWkRo4qB1WpDZILfNBic6RDSU0lDXW/Y2zvixvWbXJydkOkJ00JTNfcp4p7IbmiHiqNiivOSfmjAd/RDjx8MyIGmMkTxhPnsEPTA2fkD0jyjHXocAmMTVk1HHGsuTh/QNAM6Slk39xm64Fd5cBCSg4ytSZMYZ0Xw70yDWDPNshCu4gzWDix9ixscWaq5qAYeLu6hdULbVl98cnxse8MF5ZPPbJXCEqRCimB3aYxBoIj0ZWvj6kS+nZAHIJtJyo8vefK5Y24+F7N42JDMcugNwkkG07I3k1Rrh+cOeZYzKD2ScT06GjN9uUQCDQ5vR98oLfF0aB3TdQ1xLOjajmm55E1vjjk9FyRJPLbrLGZnfyB2CFTwfd4Wy1t0UNC7YAOz3batwC1/1JgQK3i1gLnkGrlHkD/vfSjAHmkPXxZRYtfSYedRGEm9K6webyuDQDg7do0ea6NfQSqFEOjHXiukeOR8fTFE8CpC+Tgi+vg5F4995tXXX/2MR34X2/ff7vyV7wuP9OLzishHNrFti139DvxuweD9F3/tFwSHr3KxJHgZ+IpaxBgTovm0jqnbjpdfu8sHPvhBfuqnfoosy66Y9geT45OTE2aTOVES8s/rusaOBsjOOb7lW76Fe6cPWdYVRsCHvv3386Fv//3gLYkOTgL37r7On/tzf47F4gIpFdPpFIUg0hGm61EqYr1YcrE+4/z8nMXZIe99/5fxw//mRzg7O+Obv/53sVgsMIMjSVK+5mt/GyrSrDY1KtI0dc/HP/UbaK2ZTSZ815/4Tv6Hv/bf8/DBPbI0RngBMkL4IIYriglN13Nw7SZ/8b//6+STkvbhQybTCcuLmvuvX1AUBauLc7z3PMSA98Q64uHFgv/dH/nDfOj3fAuTMud7v/d/4hOf/A2EUDTtBik0dbXkhc99hv2DkijqmEwNXVPzqU9+kqeenvCmZ9+MIqQ9eIIR9C99ZGC98hizXeBN2Nv7EFHkMcOAEKCk4qM/Z/HsE4/eqjrSBM9bN56T49H0f3steX7ipyOcnz2yIPrMSwEFQ+whlEIhsC5UaWH95sHvj4vvASH28daH5wrw5OBlWJBRIEdqQEDI7UjNEYBFEOOJ2fLEnYsuRycRCmrvVXgPBJ7JSBG6XCzJceEpRcxvERHfdO3yaP7Dz6b8/E+pcYXNeFyWV+/eBano2wGJRClH1zdEOohNmqYlijReSNqm5uu+/ncwnezz8it32dQ9Zvxw4yw4j9YxQ9cT6wglJGma0rcdUqsx2GL0dtQqIPfHx1SbhqODY7q2Jo1rmq4n1TlpnNE0XfC4RdEPLUJm1NUCa+Kdz6L1DtMLRBrQ00AhGejrNfNyn6peIlzDbJJjvWOxuCDKc44PypAQ1nVjl6Ijz4L9zjQKdmzRbDqi1p62bZns7eGcoGstUaZG4U5CmkVMZzmbdc1gQQhFmU3w0oc44yEgsoyxwThDFqe0vcF7i5CGuqnG792y2ayJ45g8i5Eu2A8J53BDj/eOxWJFHOXB2mazQUSWfqiwLmQ3N22FjoKoSJMiREysY3pXMZvcZDAd3o8WbWKKEBKVDihRoiVYAm/8eO9GiHWUgihWuKFlfxaxqYL1TVoWLBcL5tMZm2WFMhGJBtcNTPKUrl6wXFc0XRuSz6IQljH0jk987pe4ef1oFMF0dHWLTwRplDJ0lvOzDW0zUBZzyqxgNpmyGc44PNyn6WrS7ACpo3GhENHdO+fByQV5ntP3w+gFKUZhZEw/DNjOkRUp0NE0kBdPsdp44tQTDylGOJwa2Cs0ph+IlGdTrcnTIIiyokXrgV5EfPbOA+rNKX17nzSZkKY5R5M9jg5v0HU9m80GKTXx5JC2bXju3W/i9bt3EMIS5xG9tXStJUmmHBznxErRmIGqGeh6g0oEAkNlFOl+gfMK4kMePnxAazxn5w+YZCU6nSIE5HPNarWibQX780MWiwU9AfioW4sUMcZXY+CFx/qwCNDRF592H9/ecEGpfIwSgN4WTYGgL6Un1o7ebQusMMhxpaDy3hOJnETFDHWLchLtI+bZjNZ4YhHhNCgdo3SI9NtyKKRIcRakNmM6BJiRSGtt8FDUKg6DXt+FInPnUxjaX+fn5yNhWWAkl36MY5VnbI/g0phcjCtZeaV1rbQAby4LpyttXYQYhTKXkVdXt20G59XvQ4tLhXd4T3tpLv1F2tZXi72riKZzl1DD1Tb2I+9zpbiErTHuWBg6N05eX7ywvPqe2+JwK7DYbo+/9pHWvhg/w30+f3RbvT3+Or89fkBdQW+3+/NI8fzYd779u3jsdVf39Sp/dEus3h3ryA8OPCxBEpfBpgpBHFtkiDAI6EHT8eSTT/It3/qt/PN//s85OjqiqWsGb+nHttlitaQd2p0RfhrFrNdrfuc3fgPved97+ezLt8nyHOc9D87ORsU0u9bi3qzkr/wPf41mteHBvbv84+//JyEFRErwQRF8cnHGu9/9Ds5OT8jznLsvvcTywSnX948oioJPfOIT9H3Pc295Mx7Jiy+/SpaXwYJASvaPr4ENx5gkEf/j//lv84/+53/Ij/+7H0PFHmcMwksEmsWiQcUp/9kf+eO8cueCuw8+S1mWSHFBFCki5blYbpDSIWQoYoQQXDQrokhx7+QBkRI8md3g+S97H+nsjGvXj9hs1sxnB0Q6J0k1k5nm67/5G9HikE11wfPv/wqmxSFJkvHaq5/A+ZsoHYU0CdpH7hspIU4iIh2Q3rA0CdwWMdpXRbHismErxwVTKATCo+Ff4zqU1p93oUVa4wm53jiPswNSKoQL58YTYhHD60I0XbgUQ6cE6RGMSSj+Ml/Z+bCvUow0HufHzkooouXIMDI2tH3lyCcUo52EtQ498oa3yUlCgvLbWLvP38LaNES0OUJHaLm8wFiIohQ7dGjtGYYOM4TrMnAkJednS778y7+cLC346f/ws8ymh5TTfdarinKScXFxwWw2o6nWzPb2AnUiVugoFMRd05LGCQaB9w2zWUFbDziryYvAn5vPjjk9fUisNN4MxFkwbbaipywnJKmnqVfk+YRq2dFWUehWOcPQGXxyOe7EUcJeOSdNMuZlgVA91gRrsqNnn2NSzkK7WGp63e+8hZNYkkRjepB1u9Q2pbZdr9DN8N7TtOuAyBf74Rg3hmplKYqSqqrpTE9vq8D3s444jun6IIiw1rJaLVFj9nOUBrHc4INvr8SxXq5Hm65AJWuaBus0eZ4GbuWwYXOyQEYabST90GNtj9Yxq8XJLjRDoCiKCZuqoTMda7HEmhAb7BzE8UBVVZTFDCXO6fsWpQTCS86GDiEjoihiddHT2wEvgt3QZrOhLGJAcLK8QNgYrRVZntB1DX3fBVeVNMX1Fp3E9Is1rR9QKiL2ivuv3ydJQ4BKXbW0J82Og4rUbOqKZXMWFoNCInXGpz7zaYQcSKOSrmvC+Dk/5t6DlzCDCy4SjPcTEOmY3oRAiCxO8Azo1POZFz/NgwcPEV5Tb1YoGWN9RaQ0fdcEL2atKSZzUCC8JeWASS6xtmc6OcIaQAyIyNE1AoPnXu2oKtByju07mmVDFCXc/djLJCjOF+ccXJugtKHeNOTJlMP5HKRhIhLmc4GQodNkmp75tSeomoq634A74PjGnLPlhmf2FTIyVJseJaYMgyEv9rGu4/TePeaTMsx/SuNzR297BsK92ZseYyxKKKJH/JV/8+0NF5RN316aWo9m1+FfjR+LOjVy6bbu/YLLibvr1+xPb4BOSeI8WF4Ii/QehwqQ/LjK896jownImrbdgI9QKg7EXWvwwqH0mC89clKsqfEurPzDfgqUCHD02elpGHBH4Q1wpYCQqDHu7jI3ezuIO6SSY/EWeJ2PFifbgsqPhtCB7wNXiyM7TkzRIwibcw43XHKLwmS2/b7kaCoePkuNKS0wIsI7uFOAD5F7zo8JOf6y1b4tGIORMHjsDnUJFjYmcIq8wws5tlyuFml29z7Wmh0tYHt84WLcghp6F+FlzKMCHSEE1gVzdeQWzRwRSxVa8cJdqoS3+yhlEKbgPU44UGNxbtl9b24s8rfxUJd8zvAdBWNgj/GXKOmWC6i1wjkTTLx1OGKE3L0WAcKL8JlAEof3DibNQYhlTEdRxtw7O+ebv+1DnJ0/5Kd//N8HNFNr4ljR9TVC7JFEDYrQ2qmqBV/z1V/Ld/+JP8Irr9whieOQ20zw9BQiRF5JIYjSlE3Xk80m7B3u8c73vYcf/fGf5sH911AMeBsR64j1csNnP/MKv+Wrfgvr9ZIHD+7x9ne/ha/8yq/kIz/zUTbVAovng1/1NTQd9L2mbtcMQ0+HoKs7uqalrZc09Ybl6YKua3jimWfo24ch0UqXSAlZ5pFK8EP/8/cwmeZcuzkhijIQmps3b5JlCUqPdBgn0Eow389Qsme9bLlx7SZFGbM4vc1Xvn+fL//Kb8UaTZnNabuaerMhjmMQjtPzB8RpzeH+MV5UdO0aHSWU01OOjg84fQje2d0CYHfyCJwhoRXO+4BOIVBKjkBYQPc9wTrMOU8cxWMRNsZVIsZFazQqoq9s3mHHmFmJQGpFMhZxW7R9ey6ts2ilAb1bwFlnMSbct1KEyMyQ+T5SSqxFyIA+urH17d322g/IpnYKqTQQkC5rDUKEaD43GojvnMn8SBkSn4/Ke7Zoa9AaewdP3Io5X54gpcfqDZ0BrSCJc6qmDRniWnF6vuCdz7+bp555Kz/2Yz/Cm970Nu69fp9Ue5zpwEUsl0uuXbvGumkxxtC2LWmSh3Y5IdUFoDk7DU4DbUOWZHRdQ6ITIqmQqQMKynKgWiVor8iijrY2pIljaC2RlagkYWM7irIj0h7TDDTdBc6xK/xiJVm3DbNiDh4W5w3WeoqiQPqIFz73OdIso+ssRV7SmYBCNhuB1gPrakXbNaHQqTdMipKyLLEmxtg1vWnpzIAUEVVzThRrlHToPGbZrPDCMjDQG0cWF0itybKcWHk8FhE5cEuECEbw7bohSVJ6VdHVkrxUyCHFe4WzDc5AFCVEQlBXPednKxCGPM8ZahPMz2MwDgZ6iqKka2usHdCx5v6D1yjLCRpF1/fEuaauKlarjjQtiFNNYxb0bYfSEttJ4ignEoa2WeGcoR/WmCHEWTZNw3qzYTKZIUXE0IXkNGMbsjyiadYk8YRIFgg0g2lJInAuIs9zhARpIBEp1nguLsJ3vXcckl0enp2hFERptIt7VkohZIxQHqSj9xVRMQk0H1HR22DI37UDSZpS1Q1ax8go5nB+yDDYoMR2MUUSMQxrfv4XfpS2vyBL5kSxIoknJEnwJI10TJHnRDrZJcwpmaMjgdJj/LSK8SZCmDBDaSlBOQ6OZwjh8aPHbd93IYqxFxw89Rzee4ztSSeOuq65s+pp2p7BKowxpGlMW/cY01NXL7Ban3NwsM/QnSOxCOnpmpa66lFacHHxSdpuzbXrTyKwpGlMP7jgmdy2zPem5DKhSGJWqxXkOYvlmtYZbP/GIco3XFB+5Qe+LBBcm4a2bWnblqZpwn9tHfKsUbsCKFJiRGOCoCZJMqp6QdcvyHI95ppa4lRhbQ0iDpOCCvYD1g4jAipw1u2KWR3poEJSUTBWVorBBtHD4BxeujEfOyGKgvqqadtHjiXYBLiRV2KQQj+Ctm1RuKvcyKst2zARBQXg5cB82dp9FA3bcgCD6fj2/f2V97PWXrZst5MRfpfw4K0NqQ4yDPhbVbz3gNh+nhvb8PKKYXkotqS6bFGHIniL4AWzeCGCZcD2uGHLK91yIv1lKsMW4fB+LCa3iLVFqm2utBmLrpD+ErJns/F7YJemst13IQVCbZXiijgOxO8w8MejqKkZCfgCp1zI/9XxSCUQbBOcnQxxgmIsMe1YVEdC7pDrq217KXUwshcdWw5waBpyhVtqkFJhXVj0KKEw1iKRaJVghsC3e/XVV/lj3/nHec+7nudH/u2/5qWXXmLoJIIJUq4xxuJEg/WKb/uD38Hv+/bv4ldffBEvFUkTjeblocjFCzpj6PoW7y1Dm+A5o61q9vb2+bqv/Wq+/x/+P5nsH+AjS3WxZD7NWF/c4+Mf/SUms2PK4jpDZPjhH/lpHtx+hTzPmRQZP/Hv/i0/85M/gYw0e3szTh7eZ38yY7PZcPPWEVLVHB5NOHzGc3Z+j6/6wBPsXXs/3g+cn6+4desWVX0K3hLpkjTJabqWopjgnSJJ0uBVZ3oC38uHzOxujdYZeabxqqHuamQmqUzF+b0NKmo541XMoNCyQMUNaW5RSUuRzqirFUIoDmY36fuep249yR/6QydcLBWKOaenjv/3/wIhQtozmVi+4w/CwX6DlA7rB/JkQtutA0KmY9puQxTFpFGGB4ypSeIYOzpOdG2P9RBJT9sY0uSyA9F3nqLMQBgiFTGYDYNXRDrejRlt27Kpx9bWaKEUkKge5+1Y9CmcMSilSUSMVAJjOqQUxGlKXQXeptaBN2aGHkbx4v5sjhOGwRiyPEGiuFicgg8FStOekWUlQydQakB7Q2d6jl9t0S+MqCaCb/p6x/ufWEKSgpfEsWD/aMVf+iun5PmEbjMEoYg2tH2DUoG+MQwDX/qe9/DEU8/xoz/6I3gfuHS96cnLgr7v6dqBvdk+3o6L9r7fCU36od1xa5MkoZxOMKYnz3O6bsA4SZmVGDuOXc4yn+9jugA8xHHM0BuWyyVlWVKtlxTxJNjO9JqzxTkAzz57jDl/FSmDkMJYD0pzfrEkTiPW7YbpdI5xHVUtSJIZeE+aBhqGNRDpAkSPxzKfTfFiStc1TIpjuq7j3v27HB9dRyiPRgfLJTcQ5zFVVbFqGvJsjlaCujPEWhGlGd3QI+3Aqm9RSQw4TBdM1M/Pz9nb2yOKASx9bUiiKWKwaKFwTmKMJE40fWewriMvsvB7H6gwREFgtKlrkJrJZMLFsqbMMhBRCIxQKatND70MhV8vWG16qsqyvFgwDAN1U9H1FklGUeQMQ/AZnc1Lrl8/ZujmDLZmPp8BKfP5Mf3QMZlMLjn4UrI3m7BYLKirjjgqsYMnFwXOD5jekk/y0XopxCkGpFjSdQNt1+PxFGUI7ygnE5RSKDUKVLWiqg1JVuBtyvlpw2QSUdct9+/WRLFkU23YP7jBagVCOtJYIfyAHTqUEpTzoNLfbDru3LlDMWu4WL6GIB3n3XDdBQeAcVHqfUDRpwdIkaBkMFqXyoGzhEAWyOOQ5930HdaGwrssppTlFJBEidwJ/4wxIY4TRmP5iMG1DMayOWtG4VqMSo6Yx4fBBSTWSAVm6IgmlsOZIs0ijm49TdvVDFXDMLTEScr5xYYoCgWrsYooUphIoUROJATX5gmd6Tg9X/JGtzdcUP7yRz8WOBt5TpZlTPM5x/vXgwedEDgf2gKbpqZtW+q6ZmgbmibEYQmpUXHEZm05eXDBs0c3cKZGyUAoFtrSdaH1G8d650EVZRFmEAy9G72VBgSB85WmKR6DiraogN/x16IoWH6E1sFq1yLe8gqvtnelEAgtdoWEc48qfEPLWD6yqhePwcCfh16wbRMH/p61l6jdFqjcejCHYnVbvF4WrVdbw35EFQJSOdof4ZBjS86NhWaI3RK7z4eAniC3yOq2aNomHV2iFaGgCh6D4orIZ3sc4bhBXeHLyhFNGQaLUjEh0UjuMmq3nnYBDdwWzQF9lVIH1JSAMEVZQEiNNWjNrsDz3pJu/QxdyAVXcXyJRQlB5BMYhU+WkEW+89r0YHw78uPMbnFghoFIJwxDx2B6Ao0joFjhexZ4B/fuOAYTcoqNCS0pCCrfLT/Tewde8+sf/xRpcsStJ76OxcUeq9WCoe9o+4brR7fouo7FsuUzH9vwX/yb/46ja0fsH2fcv/2Ag4N5aMcIR5YFCxtnDdYPLJcr4kSTxMFweTIp+fqv/1bAMSnnnK9P2N+bkSYhBOCzn30ZZyryRPOed7yF+H1v5+Bgn+m0YFNt6PuWSEcksca5pymKnLKYjQhfKISkjpjuvxWDZWhvUDUXdE3Ba7cTBjPHe0NZ7LFed+BjyjLBuRDTGGmNlIrlcs3+fJ/arBk6T5ZHCJGBFZSTUHBEMkIOKbbzOCdI4hwlwDQdr5+sKYqbnC41m+ocQcziDPJiwoN7A5PJE8xKR7Vpxrz4bedBkKaK48OBw8MKITxlmbNZr3hqktEPlkmZ4XyYqMo8RkrBpuqwpqEsJwihqOseISVOOLy/zF6GYC4+DMHLLUmDhUR7viZNc4ZhoOsajucR1m89eftRfBImyLquKYoMb4PfYRJJqn6gaSqiOAi4uuacp25N0To8vzU9cRRoQHGsWS5fI04TkixkDBeJ5qmnZyyXC4RY4b1EylB8WevpzAVH2QS9rNitn/DsHTvk0wLnK2aTG+gYluuG9aqlnCri2DFgkSJFJ6GdqZTive95P0pF/PRP/gRdU/Hsm74E6yAtclbVhr3DA9brNZPyBt4E0Y1zjkgpEMF7c5s8EjKMDdssY+Md7WYTFMjWBCs5DRfnK4qy5N69e+ztT5jvlZydndFFLWjL0CvyQvPKSw85Pz3hv/nvvotf/Tt/l21uuPeeWAvSSIYibLDMyxyBp+trlBiQIqZpKpJUgQ9K6yQBb/VOlS6lJC1CYRPrhOlkwmqzxDsFhMQyYztM37NeVeT5lHhc8KskoWo2SCmZbFvPQiC8RUtFVM6ItWIiU7Jsez117B0GT8LFahkU5lnC+aIliTTCS6QMY7C3jiILLU2vLcOgSOOUWRmKqziLyZIIqSLq1jH0gqzM6IaWqulZLRu6fsCZnqpqcDZkSOd74xhPysHBAV7UDIPD9DOMGbCD4OzUIITi3NYkSUTT1JdxlS7mYdSOkcWSTlakaUy13tAPHdYqbt++N1IhAoCSZSG9zliLRqEizbPPvmkn5GrbYOHU1g1tZ7Cm5N6dJX23JstyFucdJycXxDqh7yv29g6QUjOb7QXwK4oDnclLolhzcb5CEDGf7fPyy7c5Op7RDxVpojC0lGWBcQbPwGRSMJ0VIzhT4K0hSxOsrVGqR/gQQyo049htOVnex3lB1w3gBav6HskyC4u03oXc8Tgeu5YChwzeo0WOM2A6Nwa8aKzxdCbQLqJI0zpHLCO8ylBC4pSgGRTWZhg3JcoNZRIxmJ5MhQjPYl9RO0O7qrndNiQ6QmDIsxilNYv159P4vtj2xjmUSmOM5eJiwcOHpztRyha5StMkeG4VOXleMJ8dkkbxyPcQtMOKn/6Zn2MyKSjKjMHUdP2adbVAeM3oF02SBlJuHMdIGbyxjAkO9UIEIcQwBI6FHZWuW58/reOdNYrWmsHUZFmGsT2D6RBjYWcdO0QQYLBdyCC/wg3cZkVfbVNvW8DArvV0uQU+J3wBfp8PljFXuV3h30tB0FVl9xZBexQVBbgsFEVQn+yQNgG7tvvu5Er1SOHsH1GaX14k4TO3bf1HuZtXuYaXyOu20LzcsygOx+W9H9tq4XiV8ME03vsgmvIe5+TlZxOU+l4EE3prwmC0bUtvB27vLAKFFAJr7JjHPP5rB6S3Ib6SYCeCUvidJyl4JNaEgty54EspxrzywNfcipoCf83amj7UmPzEv9uex+35CbnlQuy+CrwDYwY8fhQXxOC/gjiOkNIQCWj7NKRhTDz37gXe1eJcsjj3CPlW7t4TY8tS7NqiWgerFaX0yP31Y762QKpnxv1X4J8GIXHOjolPb2frxXqJXEusc+D2rpzP0UfUBKXqJbYej5y+MFiG95gg5Wy82uZbNuK42AgtWevsI9egs9mICo8JVyJk3G/pHFKWI9IesW1BBwgdlMywdjJe9gI4DmEIgpEyEQHD+D4xfV+xXGxPSlgc5mnC3syRJiX9UDObJHg/0NYdserpzYY4yliv1zhnme+V9L3h4mJJ1w5keYLWgq7psAxIkeyOrWlX5FlJ2w4kWiBlSu8HYuVpmhaUoMexqTYkcYh6VVqg4xjnHMc3j+nbDmsNcRaKFIdFaoH1Lpjce0NvOuqmp2ka4lRgB8d0skfT1jxx65hNXbHeXJBkJQOee6cPqOoLjo+PkT4N348ewv0xzImiGeVsn/rKyLKu7tP3T+I8vPzqZ7j51BG/8EufwjiDjAymT/BW05sOISKuHT/B0dE1Hjx4wGuv3d5Z/OztX6O3FhVF3D95wBNPhbjEosjo+gY5igC2Y6xzwQJl2yUqsoBW9tZRV2ERuNls0FqyXFdcu37AetUglSVPg2VVlgVj67YObeG6Ocd0BXnpidUT3LxV8k9/7TO8/x1bJ2AokwgiifEgdaCvaK1pfTa2NBuKWYExlsnskLrZUBQxi+WGerUgT1KklKxWK+Z7U/CeYTAwWPbmM9bLDc50aB9GoIP5Xoj6VWpEt2CaZ3Rdx/HxEefn50RRNC4yCqqqxhnH8eHx6M865eTkhG69RGvNNInxdDhvuHVwyNnFncBRjBKqejmOpxFxHOPcgGlb0mTC4tzRtYIsK6kGgtDVRyjhsH1EGucUScTN4yR09/qGLEvwXlBXLUk62QE0XgqqjUBF0PcLuqHfXQcAxvRYOxAlMU0d5ui46Fg1FtlpQtTrQNNWZGmBcwJhJFE+5o7bgTiOWdcdi9cfUBQF3giiNOHV1z9yuRhpO4bBBrpaBIuLjqEP10e16TAdJElO3xlUHFxi7r4ertk4Srm4uCBJUiaTCWaQaCUY+oEkFcjI8eDkDvO9YNae5SVSBpW0lJKzocKMiwxjgsvBJm4Q9KON1IBpA/XAY+n9fdbrdUgByuKg/xjnXGstprG7XHbr/dj1FRgXss2zKGY6nSJkggS0jvBdEE91rceLiHZowUv6tkbpscNqFXGU02HZtN1YgEajl7YErclnBclkwNng09o3LV549q8d8Ua3N1xQChn4h0oHCHWLYO3+swE6XS5XdGYY25UhMUMrxf7+lGlZMgxrbt26zju/7LewWlQYN2CGkH25Xq9p2lWITRIa6EZkMFwE4SK7KrpR4wojFJdKKaJ46wc5tsi1Hlcy5vIkyWB6vCsCg+9QQLOk2PEtpQzCnnCDXCksd0XW1aJSP/J32D5nW7h9IQubLf9SILksaHcFJZcFo+PSpHebh321AEVGl7zGnQWSfeQ5jyOeVx/TOryeK/sOAmcvi0k9+rxd5Whu/2aM2hWHkgjsVa9KML7eqZoZC2g7bFsBbkzksOM5ZYdYaC139ADvLd6NsWhaIMbCRI2F9BZZlj4ILISK2KUXYen7MEEJD0L6MQ+3JU1TIlniR+GDEJ716sXf5GYI5U2YKy6L60umwyWHruvZTWKr9daP8wuTnLfvdxVJv/Kmv8n+iCv78fh7X6l6t58zFpJsS0Jx5ViuvF488prLPz3y8xc5ms+/1v//2cYggy8gIdnSMa5yJnfPHffPWkPfLem7HjO0FEWGxIXEDi0Y+hqlHUNfIVVK13Xcvn3Gwf61kB5RxORFjMegI4d3+SN0lkn5dFDx96+xXFVkecf+/h7L5QrvLQcHB2HS0wlZWnB0dI3PvPBr5GVJEgcxT48lzhI2fYuxPXvzA+oGyjKnqhrK2YQ0imnqDZNZTp6nGAN9Z5nv7dH1FdY7jq5f4/xihe1ARY6Dw2M2mwEdWZI45/y8Ik4t9dqQRT1puzUyDzzd6fSYiyTBiB5lBqbTIz75a3dRkcB4x3JdM5vGzCZHpHqCGRS/8esvslzfJ80kkY4xJmZ/75jPvvhZinxC1wVqFMbQNA2M8Zti9PAFwDqWyyV5Vu6oLWmaIU0QTq7XIe1li0J1XU3TNBgX5oS2HVitF3TtgI48UTTl9u1P0KxL3vnuKZul5nMvfBp8hpTd5XWJYD7dQ/qQRZ+mCZtNQ5pNAEeWZyRZihShdSwSQaw0e9OgVu/bgWFomRQxbbWmnE5Ik4hZcUjbVWRxmOSTJMcjqboKZ9ZYl9GakJQTKU0Zp3TrmlRGDFVHmWq6akW7Xgd/Sx9iBK31QQDmItIoxZnQFSzyGUIoyms3aWuBjmLKJ2/hHLRN4LSfnt1jcu0J1uuahxfnxMozyZMdElwUGXUdolfXqzXGh9Suum6ZTPforeP09JQsjbm4dxedpNRtRRSn4AQ60ngdrGeyLOH09DQsfpJwTzXdgJSaLMvYrJYU+YyqDciz7VpynaCsBK8ZRE0/hJZy024wNnRAZ/PJrtCPYoeXY6crCot1GUsKnSMpmM+DiCjVJV0XYmpNPwASpKPrOo6ul6OoUVPXY3qZgzzNmM5LrAlJQHmeUlVrdCLQ4gAjKgKtb7Tc8Y6mqamqTUCZdfDlXC7XSAxJEubidbdCOA2xpmpaojhmVdU4txnrlwEtFc619GP60XbBNYygysXmNllcEj2M0DoEOaRpSp7nu9SpSAfnCLyi7RqSJArgm5F0XUQiNYM15HkJKEzb7uonrxR+FFA26544TrHeUzXdGx6x33BBuTVYhqsI1qXSeFsAxIkmSTOkZESYDE3XcHpqiHTKtDzilVde4+7FkjQuiJOE+WxCMSm5desWk8mb6VvP3/+7/xdoGuI4ReuQNJBlGcKHVg8ERCyE2Jsx3svu2icB2QqTStu2IBxKi9G2I/BbjAmClG37eotKbtu7OI+WCqG2nMVHJ7arKmLntr9vn/d4NvY2fuxRK53t+2j5qGjnqqAFQkEpAnw2PkESPMm2Jcpl73pbKF7lbG633c+h2guIjwc7Cmm2z7HWsu3ibzmZ28iwHQd1uFRJIwzOBxP6qylB2+9RiQhvAC/H50sQQfgQRRJjepIovfK9DURKI3BjrKXaLQ62CF5QfjsGMzBJS3oTJkkvLVIYwKOEx9oh3HSZpa7XwXerH4jimKKIkdIRi9eJlMPhiCJHFGkGwyPV0yMF1u5EbX/fnhdx5Zl++3+2VZt/pMC7/HlnDP3I+XqskLrytpdF56PPuyxEH9/Tx4uyy8f9Fpq9gg5u4dr/WGG426VHa9HL51+pOMVY+F7dk911zlVBDbvfHzmeK+//uFPA5e+CEb7n+jXF009p2nYgz2OapmHoJVLCfL4X1KK2IkuCXYwcFLmeoeMYhsB17PuWtm1IJxlCKborg6s3G2Ji5Ig4OiRnqwV7+3s0m3qcbFoGO5CQ8/LtV8h1jBrCfdvUNUkUsb44p5xM8AM8PL3PcrmkamriOGZvfkAcRUgJfVuTZBMm5ZTFcoUQgt6EdvnZ6/dZryrSNKecpEit6G1LNzg21RIdKerWEOVTmmFg01kSEYSAxjg+9qsLRL5C49HRMffu9Ny7d5+iPGZSTinTiFhpOtNx797dcVEfsb93BDjW9Ya9oylmVBuXs33M0HF+esYTN24ihKAzobOEC2NN3/cjT6xH65YkzsYFXrYbQ5w3LFcXIaI0S/FtS5ZldMMKoSWRS5BCMZvlNF3NL/7iL3Ht+iGTIkPqiNlexId/7lc53L+BEK+M1zgYqdi0A7iQzd33A04lJMrgnKTatDghWW/uMQxB8ZpkE/bnJXVbE8cZSRr4e1VVIWXMbDrj9PSUobccXTvGW6irjm7o2Z/uB4P4ROPHDkvXDag8Yr2qmM5Kehk4jPN8nyeuPUnbBiudIi2IophiMsPYQHkYTMfQ1SRxSde06GhOXwjSPCKIBQ16XMRPn3wWrTUHM8+bn32avh9ou2EEO4JaeC/LmU6nVIPFmJY0i1hXm9BtcZ5pYciLmGn+TBDgGIlxjk17gbGCs7OGITH01vDUtVtAiJV98PAEIQSr1YooickP3krTdAjbI7wmS8fELKlRMiIbCz7lBYlQKCfJsgKl44AoUgSHBtMSJ8H3c2hDPOpqsSSOetJMMc0Luq5GS4tHItTAwVHGZhmhZDjnfpxvJ2Xo2KzXFcINYFOqqiFLS7xRgVtra6rNQzrTBs2G1rQX7ciHbrDOkRcZXWPwLnhWl9k8ZH0r8N4gpUP6jiz2GNeQRhFSRtR1w7QIxvJe5aSBMAsE9whtJSoKxX9rDNZ4hO3Iihjja06XZ0ynJc5bquoBeVYG2losQSf0VoDXbNYtZRohhGJTLVkul7sO8+HBMXUdkF6tYgZrAlKqY4YvkEL3xbY3XFA6G1qqV1Epz2WLNM9DNR04iMNYOIRWuYoVWaRYbdpA0i0zptdmVJsNvek5O2t5/f7dgGomCtOFjOFItwghaeqGJIkDdy0KF2cSZ9R1fZlQI0JrXCp12U4TCiVEyE1WCq0Ug7nMs4bgMSmlxLph915XuTbbgumR1vdjkxkEsfLVAnPXZnZXJuVty3krwLli1h28xq6+95jw8gg3MyhD8RK/VTerbWqJG43f5WXbdNxFOyqlt+dKCIH0wcR214Z0miv0TZxXeBVeo1RApLdiHCEDBcFBUIYSVs/WWrABMTTWsBXzeOFxdqsCV6FljSONgmJNeEeiPVJajOlGjpJnGNZ0XUOWpTRtzd5sThJJ1uslWkuKLCVNY1arGskSTItUDqk8kRpRZuHofc/QxgEtpyWOgEgCFZFIWC8rnnpKcuNmxL17DQjP4ZGh7y+Lu614CL/7Ksfz464gfOxa9aGYDzm51lqkUEGJP1pIeR9a+BBa25eF2bYQF7tzsa2ztm320MIARCjerQ1o6yWCOCaFXEVPr3iuBkT4Mrt+uxjZOhl4Zy9Rz92+ud21ub0OwkRw+bnb+98TFPt+tKcJDgFbN4MrtA3C9S2EDNc+W4/FLYq0XfdcFtshnStwiLeowva8uB0iL7lxveC//LM5w9Awme7jGfAW9g8OOF+eo9MCYTx+gE3ToSPP8c2bgS8GpJOEzWodWlUCbKOYzVNSlV65HQeaqma/nGNsgxQZuWjRgycTGm09aZzidIwSPrT+jOP84oI4TXb2MFJo2qYhThKEEKj5NHj4ZQVaRwz9gPOWspwgRcJyuWQyKTk7uwhix02N85bZpKAsSwQRq4vzMM74URjpBF3VY2JHGmVE2uysw5yz3HntFb76ma/i07/+YaJ4n/XmlPsnt5nMbrA4v8DT06xNUMVKh4ockZJ0nUHKGGM88/kcocfsb+eYlSXrxZL06Tdx794DDg5m9GbMZ+5NuD+l3BWPQnqUDIjgZh2ESMYYLi7OiOKUsizJspR6E9I9vBMsLipu3LzGyel9miZ4Cx5fm9NWA/fuv86t6zd59aWadx5m430brvh127OoNnTdgFae9XqJ0jltfYKSCZFOKbqUtlvTd01oPa4vGOz1wEUdNkwmU7ou5GrXVcPFugrXJDF3TlZsNhuO9g9wQrOqW65dfyYUelqPXskdXdeRz6dY6cn3p/QmoRWedVXjTVhU111LKmLO7p9g+45rRzdo2mVoqZ6uiXSYL/Ga8wdnKBmRJAlCKzrTk6YZTd8TxxHrZs3F+ZIbN54Yi6glWZEG6kFXkcQRSRTQy2lW4tMcYwxlkofMcTMwyW/R9RVtt6aYP01VL4j2FJBRlIG6kechWvb5Nz+L84bzxYIkSVBOEiUxZ2cX9L1hs64QOiLSCZtNzeAk2fw4RBkW+yAV1oS5x/SGVHQkcYI0lvk0AzKiaJ+mqTmahTb9Zt1QFvukUuC8oe9i4jwjpuVglo9iuZBUpiTEUUoURVw/PGCwK5RMOb4m6VpLb1xYiBpFOcmhadFWUhYlfZYFmsaYKCaEoMwc/dDSdR3etaRxhsNjXUOWa7RXOwAuioI1Yj7Zo+8MWI+VhiQJNJWu65CjY4PpLJGMiZJQnAaBqsGZEKPZtT7w1zuBM93O9aUWNd570jQmygwbYyiykm5wFPsR3oS6Jy569o7mtIs1XdfhnGFTnzOfHHFyesEb3d5wQRkn2yLL7NCvgDT5wO0aLWeUkEjvEFLjvcUpgXAOKQqGfo2Uoz2MkWg5IUkkAkecJjgXQuY7aYHAs9Haj2jigBDp5WQ4TqaBCxBeC2LXDrfWgjcoGYVc1YszkrjA+hBrpaLA4dBRhJIRkVDj5HyJ+m35e87ZHXq33a4WGrvCDggCnK1oJhSU21b2I4bgXBqjb8Ux279toe4weV8+L4hutjxIsTsPoIJwwHmk8Og0tK+tD6/dmggDCH9Z8F4q0kORbQazU5hteRzeixEF7nfcn633Wjz6sfV9jx0CeqkiRZqHPNBAxHYMQ8tsGs5dliU0VYWQjixPRj7OgI7H860gzVKapiaSBuPWtMYxnRzStAuM7dms1hwc7LFad9SN4uThfYpJSlVVCKGIdEIchUEq0TFd0yHzngenq52oIo5StE5Yb0IBEbmM3/8Hjrl7Z4MTMAx98PHD03UNSsF0Oqeuu6B67bowGORBvd52TSCXb9bMpnOapmWxXDPfmyGVI9Mz+qFFak+kCuruFK0S0iSnayweR993QfCkJLFKkFJjhoEsTRhMR9dvyPMJQ+/GwnpBVQXXhcPDY6aTPawTNG1FOUnpu4q+HZhN5mzaFbFO8S7wjnCGth3AQjmZYvFBBYrH2h7nDWaAKEmRymPagbyMsTbEtUVKIohI4hLnBySefsyDLvISZ4PaXuvgaVlVFXlegBdoHa6bKBWcnS/QUYZzlqpuaIeWqlqRZDl9N4QFpqlJRLASiSLBfHaMtT2taRl6yXJ9H00covAShfM9Tz+d0rpz2qGl95IsLVhuLuidQ2rJ/bN7mAEcDWZwtPWGBydnpHkoEso8DvyupiHLCnpfc/eFu6TxpYVGWc6ZFgXL9QVpnJIUnjg+YHFxgdAaYoUVjmEwKOEZvEXGmsPrx5h+2LkZ3Lh+naapQgcgE8CEvblH6mR0rIjI85Sub+mrlsP5FOcN+Y1rdF3PrWvHLBZnLJanWNPRthfMpnPOz885OMwwxtHUA889+wRVPdA3gfNstlQWrXjh117gd6gbvO9dX4OTF3zk519jaCLUXkttG7KoIJ5GoHq8i2krgwO6YTXafTkO9p9hubwgilVQN8cJkdJUVUOgF0VUVWhh996QJQmofseVNsZQtUvi+RFPPfUUt2/fJkkinFMgoa5rHCq4XXjJfG9Clk6JdGgBSuF5xzvfxoP7p6RpGMuq2rLa1OztT6Dyu/GubmseXNwf3TMMWZFycnKfYjqjtwNOO6zt8E6CypmUBUp7Vt0FUmiKvRnny4cBaW2bILxZrymzGfP9A7p6oLEDq241CpDgdHmBtKGgSNOUTV3hhKNuKxyhvXr33gat4Ohghrc9aRS8G6v1BWUxpfYtr588oJgKlus1aVIwSEPfVGitSYuMofN0g2FTBWRX6YS+N6w2CyZZzhM3bqFVuPakkwjnaasaNxgiHaypzhfr4Amda5wdEAi62hLPJBfVa2givFPBhSOdUSYl1tXYIaIbHN2iCegzfYjWdaFDNViDbSyFiinLlFtHR3R94AcfzQ4Y3JQkCdnRD8/OSZMMIQR1XQMSrRzGeQ7KOXGWM5/PWS4vmKQxw9AxLyaIY4UZVCjaYkM3GNpu7H9YR5RGTLIpg+lHV40evMMYS54f4Rno+gVdFxJ7ersCkbK46FGxJksT+t7uOppaB+57FCusAS1TjvYPaNsG54O6PhYxiS5QLsQ3p2nw5Y3jFG8dSgSxVtMJYh0hEQgTFul5WYRFGuzs0bTW1E2DHvlXQxu+51hkmLZF6DHyuYdhMLiuJs0UprGs2jVFPqNb90RaM/SWF37jFfam+xSzEGKhdcrN6/t0XcdT12680TLxjReUkdiqdNUO/VIynDStYryQoAJC49SY8ypCdKBWGpUp5MhjiLJ8TKLYtsw1kfB4r9EywmuD8DogYXbMEhYh2mrrCWmGUFBuiy8/htTDJWqEiEBovBx437vez42nDrh3b4kxA6vNBV3rqOoNTVUzGIuSl9YyWgc0NtGBRyP96LHot6aqAj/aGSmCbc3jrewoUgyuJ0okzgdrkGBc7JEuQooE5zqieMD5CR6LVoGoHEcZpvdo2eNpUTJn62YsxNi6Fwo3OJTQ9KJHSoXDYwaL6S1KgMSh4+C6b114jjUKhcf6mixLgtCpqZikGlJNFEU07QLvwwWP6FGxJFItFxdn5GVCFCWs16d4HEUW03RnJGlE29Y0bbBU6PqOw719Ot3RdxXGWfpBIkWE8Iqm0Zyd3SGKJWYVVPuRzrm7umBd3aMsZgiVYV2DqPowoThHkiT0Zol1nqZy7B8cEcWCPM2C+EoI6rpCKsPDxQO0lkz8MUkUk6SKwRg21QrvFGmeIZWkaitAcvOJmMkkZOxmeULfjWijT9CRRSnNelURpROiKKLrB7KJ5uy+ZTaboOU+XesoskOc7wJKS8Kmq9E6FAJ1veFmEZSGXdcxKffYbCqm0+NROdyPTgUa23suFifcevIJIjkP/DFbEUc5R8cz4jhD65TX75xxeDCnH2omRURRlHizh7eGYiJZLnOStKRulujI8OCB5+3P71NVG5RIOF+c8pa3X8f5HjNAngVl46ZakET7DGaDlIEg3vULpEjxUrK4uCCOW7JsnyTOcc5gbYuUmjQrkFJycnLCc88/wWKxpK4rVJZT5HtsqjPm18DZDmc9cRk4rp/73DnPPDOj74NNVZToQFtxCWkW49wC23ryBPTckhUTehOixJIoJc1icBmrumZoHVHiGLygMRJTNxgD3g9jDGaEjGpmhyWRmpOmQT18etFyfG0PpT2L9SYUeypnenC4GxO9F6wHh5rMGFzGw02LNRc0TcN0WnK+bpEymHKbPqAKVb0AH1EUBaofmEwmvPT6Gfv7+6z6hkmWg9dju9+xWi2Jo4K7d045Otrn9MESna5QGpq6RYqUqnUslmcUuaLqejwKhGI620fJEmLB4SShcwaDQ2YtfX9JLhFScnxzyj/6R/8b3/YHfhsHx/u88MrH2D84wNmKbtNSmxa8BmURTgdkRIfFZi8NQqbsHe7zyp1XiZIIvCLSKdZYNqszDg4OOD95QFGWDG0X7h0Z0jnwkq4NtkSxyrn3+kP+zb/8dxwdHY2JYp75fBra+GaKcRU6Szk975EeTi5e5OjogEwkLC7ujx2AjHYYOFst6UxCUoDfXBI9Vk1NZQ65c+cOh8cHrE9OSdOU5cNT2q6mnBbcPzlFqoQkSoiiiCjWPHE4Z3Fxhn94TppERJGibZtQUB8cce9sgUkCoPH66Qnnm5Tj4+v0fU+1qkjiCTqCKSaM83GCc5p+GIhjhx9WtK1n9sRNvM1CS9d5pkcHnJw+JFIx5V4BONANMlEsFoEO5j2cLU5J04wiDwI6r2POmjb4ZBYT7p6eUL/ecvPmnEgIiuk+m80DhIBiOqHvK9IsKOfruqbvBto28Om8MLQbix0cq+o8oOPVmumsROLJ8yl103Mwm2C6niKdjYJZRTdYlsslAz1ZUdB2DVkW4/zAfG/KdDplsV6hSFCRJoo9c5eTx/shBjN/CEBTdwxWE2c1TddQ1WC4IIr3caqibwac8zRNF/ivTUCfy1mJsQPNWtEKx9HxBCk1OrJsNjX9sEKrmIsHnybREXGcUOYR0xEF7U3PMJM0vWdxUZGXOUpJ6sqDFfTDgGk05Swsniu3wTmIdcosjUdKl8D1gmgEl4RXNHVDnGq0VqRJSRon7O/PabuKEN0bdCBt2wadiI4wdkB4yOKMST4LjghDN3JhgxtJ3WxYL5aYoSeRkoO9A5I0RU8USqYBgCvAWU2aKPpJFZTjRBhnENYjrEFxCWq9kU08zgv8Ytvf/nv/1EvJmLns2BqJuq2hqH3Uw/BqsTcMA7rMOHvtDj/3kz/Nh77990Ecjyv3gDKqnXpb4S38H//6X0WqBwxdGHS6YWA2C15527ZB27Y4exVNC8fivEFrRdf1RDphsdjw3X/qL/CBD34pdSOY78+YTHMEMUJ44iSYcW9Rp4uLC+pNtSOFd10fBCd4kEEdFdoWo8BjPN5hCPYSSimMGdvaItAFtFIYW4f9RI6cy9DmtgYEFiHtiGiKIEoSYeUiUEH12fVjMRlSG4RWSBEypadJho5CuzuJYtI8p+k6HCEqbW+a0Pc1nh6pYDbdY7VcImSP9TXLZUM5DQKnrnX0Q0WcKNqmI8sKEEMowKsGpTzlJAn5oknJMHi03PqAiZEb5ciyIiCCzlLVFyHfOC+Zzw5p2m6HcBvTY/qBvu9JoqCylMpTVRXT6SHeK6yrd9/zFsXd8naTJGG1DFFiFxcXlGUJPqDNOpLEOqLuWibTkvPzkBCR5yUSNfJCHQfzaywXNU6saBrPfL6P9w1tJZnOY9b16zS1YW/6BA9PFpSTgvlsj/v3T5lOpzi9wVmBFDGR1qNRs6NaBxWobyAvS04e3kcpxeHhIcMwsFgs0EJysD8lTVOUkjhv2N+fc/v27ZA6EwXT8km5j2PD+WnLdJ6Aj6k2LQeHc+6e1OwdxJyfLyiLOU29oMxzht6TZ5rVZolSMYO1rFYbimJCMUm5uDgnUjOMN1R1SLaxbkBKgbEtQlrKYs7D09eZz67RVA4vVwgUg+nQKifNJMv1BVGUYI2jH/3c0jRGyZS2NUg1jJ6BDusMVbVkPjvCWsHp2WsU0wxPh5YTJsUx5xcPQBiSuKRtew72nmK9XrJpL1BKMJ0chuxwa9jfu0Ycp5ye3Q9qaiUwAwx2NYq4QirRZJqyWlYIEZEVBUUxJYkLPAN1XTGfXRuReI0xUFVr9vZmWKNZru+T59muHQ+wXC5gpAtsqjXTyYyz04cYE8yEhQrOBn3XBPRkPg82LHVQ1motODt/wNB7Ep2SZQX7e4dEseATn/g416/f4qknn+HTn/k17t17wM0bT1GbBUMPt24+i7UDg6kwthtt1RIm0X5AXGgxNhRu5WTOdLLPnbv3aRvDrSf3uXX7DtPf+IkdjeGT5Vv56z/6cYp8wpd92Zfytre+kzQtELIfE8syhmFgVZ3S1g2np+dUmwW9afECBgtxUlBt+p3rRhIFFGYymXB4uE/f9+RpQlmWFEUR/CYnOYvFAqmgKAqyLKPvDP/wH34/H/zgB3nXu97FyemDHRqUxJKLxQl5XlJvgnvBxeIhSZrSd4K6r+jacK05bFDWthHfcq3n+fOPsWWTfPrweX4lS0GKnSVTkiQ7a7Fu6BmcRcmEYbDEKqRpORn47sNgx+fDer0e1dRhAZ8kyehOYtAqJssK2rYj0gnCG8oyRYyxhof71wI6PTQIadmf3yBP8mB1NoZsCCGo24pAdtfMZjN+9eO/wtHREYfHR7vu0WazoW3qcQzvGaxjaA2CoBCOpKKzG1bLjoO9Euks73rn8yhlaZqKPJtwmJdcLM+J1FaLMCB8mJclCqeDuKauayaTGdY4mroKHoa2px3C/IO3SA+TyWTs4gWwpWvDHCm0omrq0a3Ds66rwFnuwjG3XQCf6npDURS7kIPeGbphCS4lTuDibMF0coRWDet1w9BbJpNJyJXHBiGnDHZQveno+p7pdJ/VaoW38WgLFRPFgs06zM9xFFFXLVv7u7ptKMuSTVNj2maXEtX2HUmUobXm4uwU4QTHhyGnflLOWNdN4IuiSNKI2WzCpu1YLzZMp9OdULiuOkAymOBRPAwhgjTPS+qqp+t69vYOAg+5HQI1ZOzCDn0A5dIsxrge5US4t4aemzdvjn6aG6TS430c03UDMnYYZ4ijIoAEXYNWGYwJPNZCrDV+bM9/3z/4X98QkfKN2wZFo4+fF0BQEYWUmWCj4WWH1tv4QvDW7VIglNJEQuJ7gySs2OMoDlYRPnhP2qHCugFE8IQKsXYx3gXYWYiwGt7aTWzbrmkWoHs32j5sBTpd1wJbFbOnqWqqquLk/pr791/HCYMUCVoKdCRJ0py8SJlNphwfHpM/le9i8qQMOeNt17Fer9nUFavViqHradrAgxEucJWkAjGaum89GUMhpNmGsTjrgeBjZXuQKg2JNSZGK4WTw4gUGao2+HdFMmIyKSiLlNm8YL05px02WBomBzDYMxItOHtwn+PrT1JVdxDKUxYpy/v3uL8Ik3PXdVTrU+b1BHwghmstef3e62RrS1nMsENKkmpOz87JsxnrVQfCEMcpk6IMKQiLgaO9p+m6jmmRU20WxGkU/ENR5PMyGHkbSxQnzMoDlIrouoGmqSnLgvU6tLDTNKWuWspJTp5nnJ8Gj7XJNGZT3afMrtE3gcvyxBNPsFwucdJTFBPqZsNqsURqxXK5IMvSwCeyQVw1nZbcvXuXKEvQImZS7JHGCXJcCNR1y3w+Zag2FJGibgv2CsE0j8nSOVXaMC8POCoOSVLF3ddf4q3PzrFDhDMNT+6nCDFwdHwMPkGgENJxcXFGWc6QZcl6vWB+fARSUK8jnn322WAmqwXH+09z7/W7ZLqk3bQhu7hp6SrNvLyFMY6DvQNkWrO42PDqa3fIJ45+XQaieLWm8Rkybnn1tSXW17x+cps82qNpHXECq8ozn+3z4OQes/mcvkuZTBV37zwkm0Qsq1Mac4ofKRJdX+HcQJ5PmBZzzpavEWf/X9r+pMfWLT/vxH6re/t3N9GcOOfcNhsmk0xKFKVSoVCugWHYgAEDhr9GfQ554qE98swjD23AQ6vggVVwyS4JFCUlRTKb25022t29/eo8WPvEJWGgQAOsnZObicgbsbu1/s3z/B7Faf49LgrmDmIoyEuHsxPjnFz3IU4sbsI6y7Hz1LalaQUPj+8BaJoVwStubm4QQrA77JFSULVnEXqUSKl59+GB7cWWw/GOcT6iVcXJHgjGYNiS54bdKTEcnQ88HHrKbCLPW07HGZM7TqcTjnRIGuPITJvcknlK4rh9+MD9/g4hC0wmmIaefp45Ho9p5V1vGKcTb+/SRTPPCyYLz6BhSLgxITRalVh/oCgKtptLZjcRncfPHmvtc0M5PRyI0bPMghAlNzcvmEJAmowFwe7hke+++w4pI9fX10gp+f6Hb9ls1qxWLd9+8xZVa3yI/Pqvfo0ymkyrZ3za4XCiNqlimqfIenVBlmUMw3tMqfBhxjnF797+O/75ovizZy5sanovLq7Jc/i3/+bf89/9639FURQUeUWIlsy01HVJ22zZXqzZbFZcXq6pqnROLs6S5yVapnVuIHE0y7qmqqrzVmHm8PiAUibpJLsB731aKy4Lyxzoso6yLPnjX/2C//Af/oLr60usT1nR3keqrCbPrjEmQ6qeeRmxURIWz2wdEUlRVUQRESGyzDNRpCnijz61SD/0yLbkE3f38vIyTQOtRYjUpFbKsMyO6D4FTAQMIUXsSYMfOrploipLpu6IlBpKlaQb58S3Ii9xPiWWSCmJUnEYBYudKEzBm4+PFEWBkJ55GfjN929w1qN1gdFlWleGNFhIRbI7FwAzhyXwm7fvmOfxOT1FScnQT7TrFe6sq1dC8uGwx04zZaX47LOvCFnN4fDIf/vv/oKb6xdoE1EHy190b+mHhCa6uXpNZhTL3FFkOeMpgbYvLy+Ti3vvyPMcO1u224pxsuhYIs4MwO1mzd3TE7e3t7RVjVKauhU8vPvAqm3Js4LTYU9dlFxVDcfjEaktxMB2c8k0zWwutoQA3iUDmewmMiJDf2AME+v1luF0YJx35EVD1SjGeU+eJS1nXVd4UoqUsyOr+opNu8EoyenUUxQwTY6iaMmzGqNTtPO6bVJRGiNNXbJab+n7nsV2hOgYhxloKM4u6q+/eIFREjsd0n1tcq4uN0iVtiB5bs7F44n8q58SfcIqGZNzOg5UVUVV50yjx3uLW1Jk5DAMrFZrQkgb0v3jgNICpXVKYlIGKRXTOQ5TZgW7xycIkd3TnrIsKYsqGRCD5/pyjTY5h+Mj/TQwnHpQOk11p4lo+pSqpAuisMzjgtZ/V+73P/T4exeUeI2QJnF7+JRxm8wf3nu8EoiYvsSfvjghBJxPFW/Z1EmIbBeqqsHGADGkFbCMhCV1s1qVTLbnU5Sf0pIwOuLZHSwE58zu/9+J6I+EefujvuGcFStkoKozNtsGlGRx0zmf1hJi0ts8PQ28e/eOT0gggCJLeq9V3VCWJXlV8uLihi9ff0VmkkBfibQGnKaJrus4HA5M08Lp1DOMI5/0jkqlFYlUn+LW0rpDIlniiLUaj8Ozo8xz1u01l19c8uJlTaY9p/7E77/5G354c2S3/4i1J6SwbLYtm6uv6Y47Dsd3/G555HKzxUVY9tBWko9398zZE8fjjnE6kZdf4p1M3V8e+fKLL5jtDmcjVZkzdB1lltOUFTE3SKHQ5xSXukyOtLFfyPOMse9YVRuEjOS54Xg8Mh5HjM5p6zSS7457rJsoS4NfHH5SZKrB2yOZVhRly/E48/R4R9u2lEXNfndPUrnNfP7yFXbxdKeOQiXuXLQBg6afTmid8erqFbvdDuUED/d7rq6uiAtoL1jrCm0D6/aS7tinHNt54k9/8cdkWUZ/HFgWR1vMaJVhlOHw+MTlxUuMikRpqIsL2p+nLNTCFPgwc3f7ls9ffsW4pEZrGDqst5T1BqlKiqJkmgXHY8c0Ww77nqfHLhWUzrHZSGKQHMYDy7JwetOzWm14+/Ed0zgThORvvv09x+Hdmck3UFXX/HD/5xTZBh9GhqEj4inK8yqFjKwsmFyHNA2Ll7z98D1COr57d8/l9nMe948EYbl7eM809Zg8GZxSIkh5lmjk2LlkGPZooXHRoVRG21yyzILF3p0TTyKnrsfkWTIaCUFVCaq6IHjB9uIV0XNuBKczMy5D64R6WSZDXtVIlQrBomjO5rmK1UWKtnva37NeX3L3+AN52eCXnMddx9X1mv3hgapMek5T1iy+x0qLUjnOw+l0T2EqciT7/ZH4FBEqIcPuP7yjKFOB00+W4/7ENM3M07cUpUJoi1Kau4893qfs4U+PqioYhokYBc2mJMaRu/t09ix2YpoGRIjkRYqNVUJiraEsS/rhwMeHxzMezVIVmoCDqgSRcXKK7njg9vaWq8uXlHmBakr6w54QLau2wFqXHOouQrTc/Ow1Jiup6hK7DMlooiKr6w3dyTKNmmkZQRuETizTT0tgVSw0lzOuk1xctgiZ40OSIoSQsCaPTwc+3r5FfpsmGkIopDDEmLYTRVFQ5mn6mBfnxk6KVJw1LVdXV1xtr2jbksvtCy4vkjTpF79QieAhBG5JCJc/+sUf8i/+xb9gWRb+8A//iFPfMS0OEQZ2uwN978gyTd8f6I8ndJbO4qAWcBqtBMFbiDFJKnA/uukAFxasnfmUrjLu0rSM4JFaYWeLtcdzeEZqCkyRzkGlDESNECrJLZaFqxcbpmnitO+p6zXr9QVdNyT25yQwIcMtgWgmrBNoCcswMY0zUz+gjESqkBrSKCjyimVxTNNIWWQ0jSH4BR9A64y2WeN9JMsK5kkmbfo4YF2kakpi9OhM461DnI2bupB4oflPv/krgtXkRZJ1/f6HW3TmsbNkcjPaBPK85K+/vU3a+9izLI5oc/I60rY13nv6fqTISoRI/5uIjt39nizTzzByqQTLmNzQmTboPNLWDV0AOz/yxeefMywLY99RtTX+DP8dhmSE2j31nE4dmanQWlLWAiVWqGrCyMuEHrIzdtpS5AqpJff397TtKmFvzp6IdVYTYmQiT1I0a2kbw2pbQTQcj3uUlkgV8czUZYv1HcV52vz09Jb1dou0NV2/S6lj7RrvRvwSyE2BtSOuqnDLgsgMVVnjLWg0mcyw85wc4CKy2m4INk3/rr66wFpLnhdsyoC1C1m+QUtxRk/lSBGZ54mfffkqDdCIuADLEogI1Fnb7f3CdFWft3eaYFOKlDGGorikHw4cuz1NXaBVxuubF4TomN2E0QVOrNPWcErBD/OUtKH/4AVlFJFw/k+a+qViTskE+MhV4gUmeEZCj0ghUEKQZwZtIv10SmsIEchNjgv2HCEGwcQzqNqTG4GQpLxMU2AyxTikyybG8DyKTlPK5HBL0OdPU0r7bDhZlrQWO51OCCEYlxmTp4mIdxEfw49xfEpTVRfE6P9OTJ+1lv2xY3dIDigf7LN7WwhBWda07Zos01xebnn9+VVyqRv97Nbq+ifG0fL4+MgyTmnMHUZMHri9f4PP7lnXV+hM8ea7X/NP/8l/zqps8PY7/q//l/+Gf/qrf8a0zByGI4fTkavrLcKWXK1bshihh1pU/PSX/4R+mejniVwZlEor8Vfbr7H+yLosyUyRxt+FoOuOTIdAZi643L7CnR3xda54dfOa3/7mG17cVIzjzHG/p222ZLk8x8wdsLOiO45Mw4mmrbCLYhx66rrFLo7gZ4oiuRrzfM0wTJRNyneObqHJG6RTFNJT1BdkRrHb3dOuVuRtmVy9QfIf/u1/z5/+6Z+xrtLf3/c9WuaM0fOP/vBXfP/dG5QT3GyuaZuGn7z+CV2XusmffPkFbVmgpEFKzVcvv+Dh4QG1Sl9qD2w3L/jw8QeaJsMHOB1P1NWK9x++B7Gw2t7w9u4Ba2dUpnAWslzz+HjLDx+PZ13QQAyCYRipmoJhPCZcUYBCp0tT5orDcHwGGX+8e8eLFy8IVtJPjr7veDwcmaaBEB2o5ITOhULR8MXLDQRDmAQqm7DLTNsabh8GVusXhBBYr0qk1IgA797/wNXFl1iOKAzORsbxRFG17I9vaFcbyvICH3rGaUAg8WFCGccwPWLdBOLItFja1SXBGUIcEUqAk4iYzD95kXTR4zjRbpKxpR9ntDCs2g3epul9USq6fsdiA1WVDvymXrE7nvAuUjdrPBPL4kEadk8nlPYYE9nvH6ibktl2TO6J2S78zW+/pa1f8vbtAxHH1dU1Hz6+pSi33D984OLiAh8mFpdoAX3n0DJDa4FRSTIyDB2nw8IP391ydb1hWnqkzPFo3GRRMqNsNTGctUfnhzIlpogsdmTsHUoLjvvdc+Ss1gYfHFmQdKckA1BScxp6Frtg+5GmWaVV8mkhU5KoApmpGZYj49QhhODjww8URcG8dEgvUCbj6ZQmGHdP90Q82/UFT++eCCGiVUGwgaLMmOxAjIHVuqGbTwQJmYmMx/FHTFOMDMNCXa/YHUaKEsrigmP/RD/0/Jf/xf+c+/s7+u6JVVvxl3/1ay4vL8mKkqlfiFHQ1g0+WGbbM8wz/RRhz3NCyicjoI/hLKMpz2dCAt+vV1uiFKyqmqapaFclr25e8hd//ue8f/+Ri4sr/uAPf8764iV/8EfN84r37v6e+/tH7u4f6IeOXBVU+QohA+PYU+aCrhsA+3dwVVobDqcjWVZQasN+/0hZls+JZpCmlDEmSkhRpEzpfpmY5wN11bJ4R54nU954SOQJmQXQC7ooabR5ft7WxlR4ZHVy0XqYvcUIKIqCvEymEqVy6qqlGydmv3C5WXNxscHOPevVJRHBu3cfWOxCWdZEu+DmiTFYuu7I0NtUgEiZdJ/GIGPA2oSXEhLyXLBEmG1aj+d5S4ierJTEKYJYcIvD+jlJuOREiIF2vWZxBx4eOwDsbHH5iLWWDx8HtJE07RqPJ0bBoTud08QieZalpmEUPB5HothRFAX/6e07Ap71ZkXfn1DjzM3NNcN4OEvpUjrfvNymCbIo6bo9ebbis1dXLPMtTdGilE9G3yjIspLZBpw7sl637I+7VGSuamIW+PjhkRACN9dfczwOOOtJPqAJo2uuL78+45kUWhmEibTNK0IImDAgZEaR1zRZBXlGedGQyYIQF+IyIFRK1hqnhawtUSbjdDqQNTlX8gWcJQR2ms90F49UAednlim9nsJUWC/xi2ecRpxbUk3RaKZlSUzO04nJpn9+erhPRWmRtgHl2YGe1SVVWyFJ29L1Zc2m9891UpYXLPOA8zl51nIYe9ZtzTxYvJv57Prledv7D11QxsSrU0onZ9z5iyKjRApYLKkL0p8g0wEU2BCQQrCOGk4jx7sdpS6YY8QLico03TLSnJlowboE7XYe9ckIpNK4f5pGsix7dup++pvSeiE5q5Nr0TBN41kPEymKnKEfCT6hAJQURCmY5gUlQ+ocSV/AYZl/XKvPn9A8ydijjKYuC0LI/pYBKL0+/bLn7mng2ze/R50d70II6rKirmva9QWbbc2rV5KLiwsO+zv+/N/9P4jhxOc/MdTuNfvdPa8uL/jFn/1Txm7mw+//DX/6T/6Ef/L1KwoDp67n7uM7TtPA8fDElzc3xDKBcxGRY9/xOBzpl4GirBnHI+t2xTyMiEywLMksEVxgmidW64Ivvvwptx/3tHWFlALrI5vNNd6O7O4HqmxLWAwiDLy6uabIG0II7HaWi/YzjseO1y9e0s8PdMeUp2uMwc0LZVHwtH/g+vqS6CRv379Hq4LryyusGwnWUeqa/jgi5czl9pK6KrgoJaXRNFdptWLthP/sJcvQUVU1TZlRZYqXL1+zOzylXOfXryjLMjHwmibpPkOaYuwPT2RFzof3t2RlhSVwmJLuKc4C6wOn42+pasPDBw1y4tTt8E6wucgZusi++0jEUtSGMDpCMExWcv36hjdvv6cfHtLvLWr6PpKbgnHcsSxH7BKJZc3udGS93hLwNG2bir/LK+53ey4vAkWRcEzOTdS1Yj5rRLtuwJqB2VqGU0bTGl59/geM9pGLzYa+c7x4Kcn0GjsfGYZbtuvXjMPAZhPp+zegPUpV/OwnPwO98PT0yGwfEWNA0uBsYB6SvmmylkpqnJvIMoudLLm5wc2GEATaBOZp5PHpwKqVZLkgItkfnmg3NfvDHXW1TTigENkdPrJZrQHJMgmKYsPx+JHL7WumaUmaKDHhfY730M9HyqKiyjfcPaQVKMGTaUnZCO7ue5p6i5IjZWn5m//07zFmS7uBj7ffk2ct3WkmxsAPP3yfssVz2B8HwmIoC0XwC01ZMY4dm22DyhWX2SXWJhyNPLsfTaaZ3UJkQYgK68bnM/GTVrRpDWPvaNoVVe3OODPDsgyAYHYJ2xFjZPZHhBfpAs8zTt0jIXqqrCT4LGXOTyNESVtumeeZopSM44CKOTqHeUo54nmhub55QVlkfHh/z6a9woeZvpsZholu7Cgrg7WWb37/hqZdIyUcpwkpz/ijc5V1Og5EFCEqVqsG6wS7Q8q3btdbfv2X/w4pA4fDCUGBswop0rTs/v6Wly9fEGPk3W9uaevmjB5JU0tCPDdWAZNrrrYZOlM4b89bJkc33OO9Zf+gkybUzlxcpGnm737z1/RTz7/6b/8lSl7y2WevWK3SBKooMhAJqq2iZDxMHOZ7fBiZxiEhltYVPj9jsM5T2XEacIXCjTORBWME43hiCUkzHhbPMByfUVR+lzSTMoB1M9OwQynFYfHP505Z5RTFimkZeXh6SJ9bEvPQ2jQtHea0sZJR0q4bhmHi0B3Qi6auS4o8Z3QLyEhdp+Lz1E3Mk6UbdhijWNxZn5klcsapn1FasN1ecbkun2Mxp3nAxYgWHoUmzJFlGtG5oaoypikl3MWQInKPpweabMvueDynyBgCFkQC0yvdoYRmHsZzTnhC8WmlELmirDLG0WKMZOj7c/yxBALzNBGDPht9HDrLE09RCKSC3e6AEBEfFe9Pb1m1iUwSrae1krrNQGvsqcOFHtt73t4NzPPIOAQCPWV+gV14Lp7dYrm8vGS2CzFG3OOeuXtIWuO25c27WyI2peOYwDjMrJsLfv0ffg+Es1YxaRKnMwpovblAG8Ew7CnzgqLIEeEeIdQznqc+kz/qskIuQJioig19b1GxAzSznqnqDD/PHE/p3D2edihTUdVrHo4JQTWe06KqquLj/gRdkk60bUnUFW52RLug8wwbPF4pemtx40iWaepcM/uFeI4cHo8T680lKZNrYrYLi0t4o6Gfaduc4+GRXKcc8ugnlP77+Wz+/yooV6Y4s/PAs6BkBOERpMi2y9wng0OEafzEPipApHgnOx74z/5ZS7P6ZyzLW6JpiNEwjZCVOWFM7MPMZAx2wC6Bqi1YrCf6CefS6sRa+zwGVipNKRN3yzxrNud5Pn+YHfLsRJ+GU5qWaYGde/I6Q2YSI3XSbkZFjIEYPFpKlEnom2X2zxzLT1iUT113Sqg5Y3PcxOXlNc7ptPYnoYGE33P/4Q0P9ye+++Evef/+d1xcbLi8uKCuKy4vXrD7MPBxnLm+2nL/ZJnHHmNyrr/+jL959wPN9pqn3ZHH3Z7gPW6ZMSKwOz1RlRmFNgz0LHJ5diur4FmtGh7u79KFsTth7UhRppWldRrnWm7vjuyOT9zd3dE2OW1zxbu3dzw9JPNKXSd47/uPH1mtEpri8uIF3WlMK8erCx53TxSNQghDXW2Y+gmT5eyedmht+P77N/z0y68QouPFdU5wj6jQ8Ic/+RXzMjA3R4ZDTqkUlcn5+rMv2O2fEMDYd1zdXLDetKzX67TSlhrvBXf371PXVhW8e/+RfjgxjUlqIHXGp6QkbXJ+//0PXF1fM44jP7z/Dog87kZWmy2gCWphf7I07ZZxgnKzYpxOLGS0lyvC/IRdDCwFhSmReaAbj7z74S25yfHqibvDoehfCgABAABJREFUE2bImaYFf7gnz3OKosb6jn5MF8zdwze4mDSNUz89x4x2/eGshUuf8+AjIuRoZSgzRdmuzyisnMPuhDS/oSou8b7HLo6qNTztvgEETdHQD3sCfWLomYIYFHkB3/7wl5Rlzu74PXUjQYycukOSYJhAFJ4s0xjVEIw8i/FrhnFHlidodVQ1QeQoEyhqzTyPVKWmKK/px5llDoTwyFdffcU8WKZR8Li7oyzWRF8iVcaXX/whx0Na76+zimN3i19K9qcD8zghgmIan7BLhxLw9PTEPAVs8By7A4iHtCKMkdxc0A09w2hZtVsm5zjt0wXvVCRXJTqDrMk47mfwCu8j+2NPZhoOh8AwndAqGamWxbFqchY7Iud0GRmTkRlDVf4IHRZCnZFZAmt7TscBCChliCGlPVVVdY57TSYBo2u0EcmIkimqqoKQmHTRCcgCBBjHPpEgcklQgWpdopRh3A+U54tCyiQH2h+PtFmDHScGd8S5SNMmc9uynJMwnMIvgqwqkDpneDqm0/+89S7yGunTSu7UWfrpRIyCX/3qn/P2/RsO3RPb7ZbHp48UeQ0krNrx9MS8WBY74QPY2THK+fm5J2NjTz9O5HlOf394nvw1q3S2fDrXjSm4uC4oyi3OLYQAjw9PbK5qtqJiGE9Ie+L2/Ue+/70nVw3TuLBaVxijuL3bQWnwiwBhMUrgo+L65QohbXqyInFXpYA8Twkh1s6gJN45TCaSyaUoEF4SQjKXLUsK0JhsSuBKdx/oIsOlSQuz84QpXeSRJAVLsZHphdZaI5TALkmPvh+ShtQUyUX+dDjyuEtZ0QlXlyRlp2GfitppoSgasqKk0jpp3mJke3VNliUCivWRZrNBEnnx8oZ5HvFueTYxxtlhssg0LrSrLadujxCRGBRlYVBC8/Lqc7oxmZScWwgxsq4K5mHGCUtWGoq2/DvDIykEaElcBKeh52J9wTSkKboy6bN6HPokGZEpdKRdt3SHIwaJRGKniVAkU9M4J6lJdAPD0wl9ivTDKRlO8xXGROwiyVRDvYkU5nWKJCySHjclskmGYcRkGc6lhKxMv0o4Q2Z2p3ecuifqYnsuoGu6/h6pwtk5bVMz0E0sy3KWWDxiTM7u8RbXNszTObCEwDI7gpNYO2NM+lzLs1TP+SVFJZoq0VlCQJJiFstqzbIslFUB0lAU2XPKVNMkxiWPj0zzQJGtz7jAR7SBusmSljxGyvU1V6uCbhxYloVpmtjv7rm82nI4dMgYuXy5ISKIOKRJ27T1pmG/P1LWl0TpaFY182QpcoMTnnH+2yGt/0AFpeTPmebpOdkgMykiL/EgYXQpm7iqKlzoklA4pAQJk2lmInVe8PrnNZP7HjtJcpmRK8EyWwafPpxNcYN76vDhQIwBEVUCBlfyea2dXNSpqCuLdLiljkk/44Sc80CaEuZ5zjDskGLG6MD7N2/YXDYoWeDdRFWVHI8JRPvy5UuWxXG6PyVnnNFIqchFxfGUMDt5lTrPaZoS9qE0BK8oM4GTks1qiwgRu4w0bYGdRvLqwK++/oLgQZy7z7q8JIZz6k9Bclk5gdGJtN8PMypL+q7PX0n+cZ70V8rI8yrdMk1JT5qbxJ6UBGJMh5Y0WUo7kQLpD2Smxft4PqyT2clFhw8D86zICss0Ovyi+cnXv0CoCYLn/v6er3/yy5RZnOfcPTwy+SPKeHbHE9JIsmyL2aTYrVW9eYbgz/OQxNYIfvL1HzOdBl5sLxiGnu74jnGybLeXFFcZb979htfmCz7eKg7HR7aX19TrK4YxMvY7joeOosw4DqmDfvHqJb/5m9+lBqKOnA4nhJD0ViKESdoyABnJsxUgseeUEu8tm21DZgx9P6BVjXUPTJPC+YnL8iVLl4GaOB2e0HmG131ypPsJETwqU9hxAump9JZpOFLkGdpEMqPZrF6ye1zQVOTFESFh1W7Pvz9FXHkHKssRemQ4P6/xtP/xUCRxW+lzcrPFxYHNpWEaa4I4Mp8cmap43H2LDJ9xdXHJ/cNbtusvGGdPXuZ0p5mq2PLhwzs2mw1jJ6mrDVW25XgcWNUrenuLOesDnQ0EInVbsNiedr3ieLolL2uqesUwPRBZyEvo+wN5VZ6jynpMYYh4bj/uMbpmHkaGscMugZsbg3MHroobimKd1sfCU2Wfs19mclMj/Mjrm1/y9s0jUqYV4bpdM88LbaP5cP8WhOV0GshMmSYksqHIJ9xSMfcF1u/QWmF0Q1O1BEbCULDfdbx+9Tnfv/uWuk255mUlCMFRiAopJUUuyQvD2FtMbghCQFQomeP8iHM/dutaS3wQBCuo6zJpyYMHD+M4o7OM/T4l3kSviVEh84bD+EAUsDgPMcPOjkkuBOfJJkVVttRqhRtSY+7O2mulLdZNCDRKmfPaLGUKT8uCFBmKDJ2XjMOEdw7vPZttjdokpuVw6livL1g1AjF9OFfGkOmc168vOT68pSobXn7RoHTNZrNhf/qBn/3yhsLUfPnlmrFz/Po//paf//zn/OSnr+n6I9MUCF7zqz/5KYtN90Tb1jw+PnJ5fUmIqfH+/IufPrNr7RnAnBWWslYYoxhtxxwCdV1h54WyTWvx9bYhxC1OKLQOOCuYe8WHDx+5um4Zhp6vXqzw7oibJRKPXQL740iMgXE6r+3OyVXBS07HKa0Fswyhk/HDuZCMLUsy4iit2O/688ZDgUkYvDSxFggUWmpcBG0UUcLkkzN+dDPWW8yZgDH3Pfqc1iaH5dysgcoU+1PSa3p6umFIBbbOzvpUKPKKaVoYx0fGeaKqqsRR1jq9jiHgXTzLCHK8t9zv72jbGrssxJgmqUWpGKYeoT3WDZgsAp5xmAmigzww2siwTMi8pl7X5GbNNFmktcQxkEIuHMOQ8GYhRoJwhLCgZEVRVCzOoXSGkTJlS0uBUiYxLbUgiMBpORHySJRnCZ3OqITELxZiTvc0EdWS8sZDSVm0ZMIwnhw712GXIxcXG+alI/qByFuWIU1Gi8w8bzKfjbHaJHMUgqISmKzh88/W6bM0zwz9gpZpWHU8ps2IQDP0U/o8eocPKe6z3bygKCV9N9O0BT6kCWxRFZSi5HDcQ/S4sOBtmlyO44iODqMFxiiGYWE5WYpxR0QhThkyTs+DECVzng4HmmZ1bro81u2wNjU3dV3RDQkUr5RhHB74UNXP98YnMsChh/WqwXvL8WFHfwo0TcNsZ6Q0nPqB02lk1cwYNbFetyAVT8ckCZEq//uWiX//gnIcd1RFjswFershYuiHgbIwbFcV/ZSxP3XUmzUqyxMbce7xYSEzJdXiUyKKkjRaMsiZYdgjRaQ0cJEXjEtE2DusU1g/MrmMTCvGLtCudCqU3EiInhAi3kdUrhjHkapcnV/IH4vOtAZPk1Q3H6mzbzjaW/70Fxegnjg9dQjdk6sV1cWIbQSrNkHC9/kT1n1kXV5R6BYtA002EOIjyzzT1FtWWYbUARcduQyEWdBsrrh5fcWpPzIsCh80q1cvkV6QV4rgE5OLOBJFQgS40DPtPfIsJbDLQJZpFj/ho0OonKqpUXPg4f50NhydNTI6pReEzmLKjKAVITqEULjRpuJTCsLScDgmvVNeVJRVjqAHwNqGpsySAH0dcXPCUEx2QWrFZ58ZsP7Z9BRJh8KyzM9MTmtPHI97yuzA7e17lO5o1hXHk+Wrr35Ctx8x2UKea7xYiMrTjxNtU/HmzV+x3X7Gq5ufcffhjqyYCUA2JjTF/mlHP5wIRpGLkm4YmLqR/bGnHwcGOxLGIxcXr1mWQJFXWO8ZfGJROj8yDpH7u4RMEkBmUjdu59Q5DvaJqmrO+JsLfFRMfs9Wb1jcB6bFkGc1SnmqpsTO6TD9k5//io8fb7H+wNefv6LrJlbtF4zjxHBaGPodJtMoXaClR+kcO2d4P3J9vWGaBt68/YbNukIKgR3HNN3XkqIoGMcRk4GQimHaEZkQqsYuHj95NusVh8OOVflzirpKMXaqZhh66rrGupm6zmiqLciZaeoQypDLy+T6zBRGGl6sP+N0WFi1SSA++5lcKRQlzgtW1RdM84mqeo0VMNoDZV5jcsvQLewOR4pCE+MRES2vrn7G8WlPWWna6jNCaTnsO8Zecv/wLZuVBdlxOniK8vcs8567jwLrBnwYUWKFNpKu66iKDsc903xk035NqxtWF5FhOgGSEHtq9RpXPDEvnkyuGaYdg0td/meff8luf49WgQ8fv6fKGvCCTEe604gxmrLKmEbLNHqcS9PFeQwokxHjjCktyyxQ8ccjM/jINO8pSs2psyyT4PrihmmaCUsyEuZKY6eZxSW25373LUZXGF0T4kLXP9A0K5bFIZUiy0oOp2PirZqCGATdeCA/N5N2CRSlRYYSoqIs2/PP9FRlSdtmWOuJTqF1RrlZMc0dzkpCyInCsT8OjEv3tz0q7LsT93ZC1gWD6BHW8uXVL3n34cD98Y7VOmcIluhmisuc9U3D/e3I9mXGpsi5ez9iZeDVF2ukvERoxX7/xFeXL9hu1xyPHVpnTNOE1pIYDeOsCD4lcUFIkGWfihNnJeuLmrnT7I8n8nIi6gE7panRYT/ym3+/5xd//AWrRjCcRoqsIrsqGSaJagXTm5n50VKtssSdrX4MPt0dTjyqtNLM/JwQY1Kl9adOjF/gzBZW2GVCSghRsgiP0hE7OaQ2aCUQUmIXC14m/Fs8a20DjOeCL+nyS6Q632X+7Efw6byeZo8UBdZ7lMxwQTwbhxbXnWVWCqEMp358LiidP6Q0Fp8y6p8DNILjbp9S3mRIWn8fDUYvRO9SilJI7v8sU2hdooucolA0ZUXTloSQaCTj2CGlwcUBY1oEEmeX5KqIASk0ImbEoOl7j9RT4h26iNYCLKgypesJqZgXjxCcXeILWhuyrCDI5FmIgcRElmniN3enZOYVM9Jo6mKFdzPWpYjE2U7oLFCXa7quZ/aeU58ma3VdE4JDqhQOYoNjOWrKvGXoLUJalmVC6wwXA8sysX5xRXc4EuyCNobvfvie7XabsrQz6E8906A49R33909kZZ5MjRbqJmezvUqg/qcTr1/d4EOH0h0ytvjgEEJSloY8V2c3vkQpgRAF0zAijSLTEoFmmQe60wGTZZy6I3lWUK1KDsMToMlVMiy50DFNe6xN2K6q3OCc43iy3N59oCxa/DmY5hOUH5IspWpqohpx0fLtm7dcXq2RBZzGWzarL/7hC8q22pAbgYoBF0nB6kVFU2ZMhxNjmNm0OXP3xLAfeHW9Yd+PNEWJwTOfV8WZ1szOUQhNVq05PO1o8zVS50S5kFXpsg7eUOaScdoTIkxTAv1+MsJo/WMOqRCCcUwcqyzL8CG5pz5FiwkhGKIlb3KurrdkpqKfj7x6dcPD/haRZRgcZa5Y1bB7fKIyAl1VqDzg/I6Xm4Lbuz3rqmSSkk0jWeyMtUckDqVr6toyjw/85q/+E+vmmt3+nrIsebgP9JPl6voVi4f15pK+T0xGrTJytUYXE1IuyGAoi5coEYgC5ikyzV3iEwbPelOzLAuRQAgjEkemI6Za0S8Dtp8pixYdUxcdgsX6iTIvKXOZDik30O/3VHnqgL1P2ef+rD2RCg7HHVJrsGDnlEyUjEhnM5IDkzconQ4y4Ss+u/kSHxZ+8VNx7pqTe95Ixa47cDw8ElXk6TAwDoF1tWEcPHX5gseH+2etkQuedrNmf3ji1CX38/piy4e7D/z+3TdsNxvKPKPd5Kyva4apZ9lX+FGSRYHJLaumpDWJCbbYE5O3fPnl5wx9chnXbUPfjYzjhNYGreqUKKEidV0xHAYu2itEWLhsvsDZmc36gt1uj1giOM+mrpiHnkwp2uxzLtstyj1QKENvDzRFwYuffsk4jlxefoE2MPRLigGMG7rpnjKLvLp+wWZdMw7zGaqf8bR/ZJwtpjhHdy2Kog5Mc8Pj/khRKubR8cPtD0gZKYoLPn58j9YFeZ6TZYrTKZk6ykqiM4saC4JzXF1dcjw94W3kYvsyTSfcQl0rysrQPz5SlgLrBVWx5rDvKarUheflQjcGvvr8Jygj6foDwVm26xecjh11K2iqtCrfxDVSOZZRs8wTl9cVb9/s8WHm/cffULULwxh4+/bE1fUa6wcWlyGVZ5gHXO8pi0jEsW5rqrzGhx1Pt55225CbJuUgbypktOjlhrF/RJeWUheEELm5vKB7HJFKUZcvsP7IMO/QYUN/8OS1ou+O2CUjzyrsPHPYPaVVtY242YEOEBrsMoD5sVsfTxNFqfBjpDAtWbXw9HifnLpCMPQHTCYwWUZV1ol1qFuUqLCTRWmDloHumBq7sjIcDgeyXBN9OEtpkqEhBM8wdIQgsW6hzCuUVImzh6dZG4Lr+fDhgFYV87wQxJHlYUJgsOf4OoKgaapkcPxbBVbEo02RNh0hae4KfYEXf0HTBKI7T9/m5FK+uq75/q+eeHrIWW8NYw+yMOxPB6ZpPgP6LXmhuX9IzVqMI2VeMJ/Nk3lWI7VJd8I8o5REixWLPaFlxmx7dF5Ql9U5RcsQM0OIke+/m/jVP/3HrDY947wny1uOpx2tScizcfIgPTqrn9nB8Cl4Mcmw2jIHlda/hAwvFEpFlsUlTeWQ5AKf8o6VTIVvbrLn6L5lcTgW8EkCpVUOQZFlOeM4Qki/2+LITMLaCeHx7sdENG0S6UFKybwk7f/iLCE48sIgQuI+u7jglnT/VXVx3tL5FKsKtG3NNDqUkuS5YZomqqKkLEt2ux15lVOUOVleUmZZwvs5maQhbmEYenTmaJua06lnWsZz0eFZX5YpI1quOZ0GlhkyEuVgnD1aaaTIEOLIYh0yeASJgCBchCCwURCDTjr+Z9NsoiEQ0gSzKAqmaaGpW/Iiw52nsUpIdrtDmvZHx2KH1GijEne0KNGyIssTU/R4PGDUjzzglKhnkDJgzIq+mxjGHfEcUbj4BRVSLTGPE0Z3rNfbJHOYB66vr1mWmSAHxuWTIblEa5UK/ZhIHkUeGWZQWpKZnLJesdv11E2O0Zd0p3TXhnPUs3PhHGlJMu0JELLgdJiZM4hxQcZUbOoYqaoCFMnYuCxJqlcLwuIoc0k/u8TErRu6/v4cnJG0tkOfBkTpc5uGQ0pIprlnnNz53tDc3h3wwlDmaUX/5s39P3xBWcUC5QUQ0EYxWsfsYRmnpCmZI7M4c6+MZt+fmKM4a3eWxKEyOd08M9uANhqdF4z+yLu7PU21pr1YkWeSXA+01QXEtNbi3OzFc2zip6Iyy7Kz9ia9QEKmTO9PiTWfDpEQAsuho/vwAdmHFCMZFlgGXtUXeCVYdhOSQGZ7XmcFdbPhYb/j8HjLZCduleQwdszRkWnDt7ffgQ+o82i9P9ySqZLLzQVOjhQbhxgt7eWGN2/ecH11jcw+4o6et9+9QWrL6TFn3b7k6ents9mlaTN8W/O4X9huvyYr1kidobKcUieWVVW0eH/WrAqJJOnNc5mhM4huAiQyCnJVEhfJ0C8URZXcn1lFLgoWO6GEYlXVCJMKy3FMh0hZ5kmzR+JlpeSWPOU8R49UAjdPgDvjNDIWd0IpwdPugNaKMisos5wYI9cXn7FuLmnaAikl45iKJzdPICJLnBNM3jqcX6iqPCEODk/MS0AfJoID6RW1LukPHYOa6fsj2uRUdUu5yXl8fKQ0G6IVjN2RcZi5vHjJ5Soj2JruONL1HW/fPlCUGh99wgYdBvIC5qlns67o3RFtWna7PZeXW1bthsfHJ9p2ndZZ40QQGhsi9WpFU2v2pzuWuDAPIz4uqMwwuxMqj9zfvUermrpVPNztUSbiQ2S1zlnVLzkejzR1Q9939PNIVZQEMnxwaC1RWck0Fph45PMXrzkeBlQ+siprtJEM84GLi6sE/+6feFn/BGv08+s9j4FVc03bbDl2tzi3sFpt6Ic9SnvmxVJkG07HkTyrESHj1fWWYehoyhZhIsoqHp7ekmcNj48HpHI07RbBiRAWvv76p3z77e+p2oUsu6bRr1KHbR6wi6Ab9vzyj1Je9u3tiZc3n2HdwDTtEfGKi4t75kmmOLlwS5YbVtUrhPDUxQW7w54sJ8WqLZFl0QRSMsTkLA8Pd6zzxEPsjtCfHEKUlG2fUnxwxEHSlBdEaRFiwnuJFhoRLbmRlKbCmDT5KbI00Qg43ByRqkpF5flRloJ1c0GIDu8XHncjbXORNGDjiFYChENLw9hPHA8dVZnxuL/l6vIFTVVw6vb4xbO92HA8PZCXGWVe0bs+MfCahuhT81mUhuAF3fSIEjDPA6NNq1ezJPKAMjXWelSmkXiUqdC64LA/UdUlSmikBJOfjYPn51KUGcYosizhT7784muGoeP7b3/gl7/6Eh86ht6y3W4ZR0m0DSbz7PcLP/vFDR/f/pZuChRV4n1+yiqXRqOUoO9HYgBTFigtz/BmWNzCNI/nJjUy+ZFSB4iCfj6xXa0oFs1ke4JOeLZ5aCiyGxx75hCZnccUgtwXzH1AaUteZSwm4oVKjfV5Yvjp+RqVIWTELjOZSeERUmaYLD7DokPQGKPPDOQE7DdGnfPLky7uUyRxiALvBCiJnReWJeBdWi2eTieqqkimSJO0lVmecpzzQjMMUwqv8DN5bljscC64FeAZx4GqLs7FVkLwOJcmZ3lRIMfxGabetJosM6w3LYfDLhWw2iO0R5rA4kYWB1KKhMFrC4zOCVNkXa/Y7/d8fLynKlITo3TGYT8Qg+bxaUrMaB+Zp2QOSXeQoKglp9MBo6FuW2wYMbJMRBYj6Q8jRWlQsjw3Gx53Nh/lZYofTNKNkRAlx+MRjlDmmhA1WgrqJqeuC0ByPCap3TxblE6G03Ha09YrTJHz5WaD956hG/gUhDFPCzoHHxxESVamSbGSGVksE8LMepROKL9hSng5kyed++wWJjv96J6XUNcliw/PbvZxOSU8YMw4dSNKGkII7HuBRICMqPN2cXERFyTh7A9JTMqn5NKu0xYqxojODD5anHA4K7DTQBQBQUZZNbhzznnfWaJSbNZbuq7De8jzguPQU2Y5fX+gahvCEsCncJh+HM5g/oJ4Zn03q5bf/e4tyJHCJH7oP3hBeTvcJW3RmNxHJs+YlgXnLet1yxIl89Gi8hFtIsfBUtc1h3mPNBWPj7dkWmKMwgfPcNyhdUbMPNt1S5gPfLx/IjxMdH1NuzYcjmnlK0XE2cSglCoSQ8B7MEYmZ5i3GJOeyqciSAjxIw5ISrquT1/ccWEcHFcvG8ZhpD9N3O5vKYVmu10zqgg6sh+eaC9b3LSnbdfUQeGEpNUaH2aqSqe8TTKyrGJQhr4fsd2BTZ0Tl45MRB4+fM9Xn20RsqQ7Lbx+cclhvyO6SFkYnL3ni9c1YYpYt3D/8I7bD4Grm684df+RTV7RDZ7je3jx4obuNKRs4TmwWm3Ph5/CNIbMrME1BEJCnyiPd4qyWpOFCWfDeZo7Jl2KjEx24PC4ozA1qHSBHbpDAuWapF8yRmFKg50tAUGMGSImkXfAczjNVGVyJAtlaJvkTl2WgNYSrXKEd2Q6FZJJd5OYlkjNMPQUOueyveI8RMERyZuIzJ9Yr9eUwmFDyvANAewwIaVF7T9SlIplcey6HWQNb94deHFzBSZAPvFx9wPH3z7SrtZIqcnLGmE9aMXYd3Tes335BQ9P3zMNimtZk1eRbvDoPOfD3S1/fJGSbY7HI1mWsd5cJIfyPDNNT9xcrPDeslqV7PdPXG8vGPoFuyg2myu6+RGKPd0ITjzh3RrnO552FrucmBbPxWaFULBt27PZTLLMAb9YzKrAy47L9dfJQKMMdlmx2AkIZEZQVoYQ1lRxRdc/EkOBIAfR4eLI0o+I81f+6nrN7e1HjK7YrF+xLAvDcASXojcfd+8Yp+ToLQqTvnfRU1U1Q+eo65pueMQuD7x+/SXzPDOOPUItLJNBq4Gnu5GiTHqlGBTXF5+laDf9BT/5ajiHEORcrP4EKS1Xl2sW/4B3kpur/ykhDHSnESUrrq7XrI4pDOFwuqMuv+B0OnF5/YpxcNwe3vD6sz9icSeCyznuHbd3P6B14OPHE3YuKRt11pwVjH1H07YpR1kNEEqWMbFsy7Jhv9tTVSsAssxQNEnHGOP6+Uwsioz98S1GGbKsZbMFKTTaKLQTaHHOE9YS1Zgzz21gtblECEkUM8pILq5SUdq0LXVTJO7euUCQUoLUTItjf9qhM4XJU3EzLynVSGuNCyk1xoiKKCeE8uSmZBgmFjezvmjQqoCYTCham7OeMM3sFrvgi5Isr4iyR4mX7Ib3NBvL//v/9de8/nzN519e0B0quuMjzWqhXEueHo+4MDL71MxXVZ1wQvOYtPXzgBGGqDwmzxmXnjC5tD4OKXFEZymuLkaPbgogMk89WWUYpo6r9pr+9olYJID+/smyuWjJqxPzBCEYssoiZo8dIlhJVUb6YIkyonRC2f1I3YRlnrFIdJ6+D9ZO1IUhxJmyzpKcymjmacY5f84bjzhb4X3CCI3TRFVrpAxAnhzGy2OaZpJ08MPQnZtzi1RgdJ58CLNDyHiezCqkyCjyDB9HlEiDmxDAEzFFQVZUuLPec+inMx9TcDp153Wpoh9OKdEtWqbbLjmzVRoUhOBQKmk5lyXgPPTjwOPuiRg01s4s9oT3guAjRmVn6Pz8rONUUj+HjKRHIMsNITg+ftyd0+tyhsHTrnOsD8gISkfaTUtwE/OS+JJVVSBkTgguFYPOEc5TuhAc9arFqIws02zXNW1bo5VkWnYcDz1VnabEUkJSJwRMpogRhmEk11lKbAseEVNdEAUso2AZj+k90vEc5mCfE/e8SHWLXyw+QjeMaCm4e7inaRpEzLEOQgwMfsCoT0SXJINQEryzTFO6axeXNJ3zHM5JST5hlQSp8TMaVCQIy2QXslIzTwM+aIqioC6qFCMsA9YlVnWWZ0SZIiADBudnpCSRaqTk0J1QiHOBPuN9wPqept3QDbukt7XubBotKOozwzVEqjbn0O0g5qxXJf1+IdL9wxeUi1jYPe24efmCu7s7xJiE6P14ZLIndFYxLguFzFmZEiUEfp5RYWLuBnQZCdGzO1/IUsLYT9RVRXc6oHSFUIq8KDj2A8fDRLO6YBiPiRIvIlICInUbS3QJU2RS/rb3Z1i4/BEp9ClBQmvJaViIpuT6q0uG3vOhe6IwBUJpLm42NEYhhGKcFsZxIa9TOsdPf/lTXly+4PffvKVatZzGjqpQrLcXjP2As6Q4p01LXhmGYWAcJ/aPTzSN5vXrFxAzJrsjixPvvvlI22zIy4IQBXlR87B7oigFLz57SXN1xTCeKCtBDDXzAVSY2XLC3T/xantJDCO6NNy9/Z4YUzLFadAMXWAcIpeXl/z+29/w9c+/RpBzEgV1swaZknHKukUZRQySomxotMFYyzgPiSeZ52RtyzAM2HkguIg0gSLPmWeLdQ6tG1SWzC/bVcnil6Tr8x4RI2Vunh2AEUuUOc5ZhAzpZ4JLjFIJ623L4BxOjOeDT7H0J9qmZJM5zPyEVW1iMp71odW6xU4LL1/9YXJJSs2nDO/9xRNaCtbiC/IiramWJenSFpsu3mmesW5mffM5XddRI7n56k/59ttvufvut7TNmuHhkWbtqaVmv7unKjXLMnF4ejyjG1p0HiiV4PY+ubif9ie899zevWW9WSVEVP+AdycyvaIfRnR4jSk8NzdfcffwDdaMXF9+Rt/3Kfd2ipy6gV/+4g95//4tVjpOp5E8z+gOw/kwNMiw0Bap+CzqBr9oNmtN1++4uPySsZsQauDm5hXdMfK4+4GihOBz7m9PXG6/pOuf6IcdVV1jXMMcZ0Y/sL18xYeP3xDoaWNLJV6k1z2/RISRy8s1QjoinoeHB0QssG5ku/oJkYnNakumHlCyIs8NT7tbnp4OSCnIyxOEyDQtlHmGjQcWdyL6jPX6BQjH73/3Oy4uG6I4cHxoOXV7Pr7fsb4omZeev/z4bzl2t2jdJAOMTFQHpR2XFy/p+j2blea7b9+iYsRUgnkuWJzDuY4sb5hmS5Yr0IZcNQQvEVIy24mLq0umMZ0x0zLi/YRRDdrY5zPx2D0So8aqwDB8oK1vMNnMtBxZXdQsS+SiXhNCoCwa3r59jykEi5sQWJxf0JVmnBPDEGEZZ0d2/p4pCT467CKIWHSmiLEmhBEfHaaAXEm6U09ZFWSmxM4zSgeM0cx2xOTmzNVdEhpqGZEGfPzb5VWiVfgoUVqT64Jx0Oz6H3jxukLpnDffPXF8klzfZLStpKgdFzc1d7cHDoeeKDI0Of3UcTzuQcZnCsZ8zqYfpglvLWWZg084nZS7PJ6j+wKLHShMmqrvDgMXTU2m07SzqEuEv6GpezaXDqnB2REpJEpojFkQ+Yq5C8TjDu9SDvynogh+nFBqk1B0RJASdJm4lmWlzySR5DYuylQAEtOAYp4tRd6kZqjIcX4mcpaAmaSl1CIhp5Qyz3p+ay3L4hin7nxPQWGK58lp8EmL76Mjzw3jOCZNpCpQWc7pOCXHvjQsi8Uu6Xmk0I6Icz4lk4XA8dg/p7RZmxiuSiluPx4BcBb6UyAvRJpILwFjCpqqwjmPtR68oKwyYjicA0cyhmHAyE8ovaRBDxZyVdM256hG6ZDyE8g/Mk8zRVERPSA8UkDwHh8iIqbiUaLQ0hACZPknJKGlKKpUKPc99w8f2WxW5CZjmQNllTMvHaZQTOME0iCDwLkRpVIDtkwLQkiQgmmazhgnjSnS536eHcYkz0E3DGhdYP1EPyRUz6dJ4mgddbvCZBofVWJkzykNqa7rZ9a0FBGTS0LQTPOSYltD+uzrTOGcR5y/c25eUhHoAsbIRAYJltxoxrknp2Q+zJyOSTtrsuT69zi0Njw9PbDeXvP4eMe8DFxcXDBMM22oyYuETVxsmnyqzCC1YlgWUJJpnsi1AanohpE8S6ECXd9hbERnGjv7hOBbX7LMh3/4gvJitaatVyiVcbG9OWsPIH+RNGwyTrSvCpYhoIzEu4W8bJmWmottxrHvwAfqVuMXT5FX5Aa8dSg0yuQE3ZM1C3ltGe0d63yNPZ1wVlMUJq0o7N8Cji/+RwFyTMdFwi0kM07Sq0RMZhA24GdJ8AkBpGSOloqh6zCF5tjPKKHJTcmL9RqVa/qp5+27Dzw+nmjLAqMMp8NMffGaZQocDyPrpmH0I2EQNFm6lIbxRFPktFXB7fsnNuuXoCqkCVTlOo24tScSycuSlXeMx4nDk0DLhnV5Sd/vOB6+o2kr1kWNNdcMw8Djvj8X5AumSpqc47jj8/Yl+/6JdeVxwwOvNw6zPCEw7HZHnqTF2chqdcHThwBSczz1rC+uaZsNR5dRndMtbh+fuLi4SJFMIlBUFfa04zh3eG9Zr7dIIZlnm/LNhcPZ5H4MIZ6nPAk2n0THDr84gp1BRoosS+DtM0N0mtI0VUhDdB7nA3W9JgiNKavk1A+OfjghlUJIj/UzJtcsywk7BJRYMc89bVNzsW2wiwNKiBqtDaa9fkZKxejZGsniZrLMoF4LbC9QOvJffvmPub37gb4/8eqLX6U4K5Fh/cLT7oH1RlNVM6fTgRD1+YIIbMsMkwVKFTidOq4vt9R1RaEyhqGnVGsePr5ntX7Jvv/IVbXh3Zu/pj8qtK7w/Qe6bgIy6vaCly9+yrfff2ScOso2ZyNLhi6izIKRhrlbqFtJVeUMJ8/JC7757i/5/LOfoM2K//7/8+dcbq9o6jXf/O4vMGrFl1+9pK0rDocTVZnWVVJmFNkWO8Op+8D19SWHw0LZKL748g+Yhoi3jhgW6vKKaUwMxKenPdZKnLfcvHjJw+MtX3z+E5wf8LbicfeWafRkeUfV3ND1O/JK05SfczzdUhQNjjcc+zVV8YJVe8M0L5y6mWnu+fKnF+kzIS4pisjhdORnf/SSftjzB1/+MX/zm1/zz17+T3j//j37XU/brGlXNf1hpG4yVA5D79he/grrBkYf2D0N7HczVzcX+GCp25a720eUqglxRKuMcZxxfmSxDmXOn904ATmOO/yPXHPyvMA5i13SahO5Y5jShbE4S9U0DMOAlpLOnWjbimEyiBCZ7URZJai5854sN8zzSBQeZy1Ewexm8rwEYVAqUtUGYsp1jjFju7nm6emB6xdJ6nA8PSJEpCgqZuvYblv2+z3ep4vR+RmVCaRKkxTCJ02hIIpkmEB7Cv2S3emBIPbc3zu0Lvj5H/yEb3/3yO39N/zBzy8JrqSsM5R2PD305FXg6f0JTwSVLmoXEjHALj7xX+eZtlmz2OmMXDpDfJHMZ2pHoVv604iUmqvtK3KZk+ct2+0Fgx/J4+f88g8yfvbHnt9/8xtOx1SUIjxDb0Aodvcj20YiRVoNB1ciCPztR5pkDef1pWJeRnywHHYuafa0Yr87obVOpg5PKlyEZ7HDeW1tzneMYF46lMpwi0MYRQzq/BtTxF/btkmj65LbPOknPYgz/D1KfEzA6U+Rf8bkeAdiCSkVR2eQENVMo0vED5WMO04GvPVY75AyTbcAtD5r30NEConJFCJEgvPMA4zdkRBSOMh+15FnLZnSIBzdcU9V5vR9jw+CVb1hmZPeUgiFXyJaZgQL+32fihq/hxiJCwzzxGbbsPQT3lnywpy3eoHoPJFwNhJpCm1wNrBMaX0vQuB0eGQYJrabFcPQM3QnYpRIoSnKDFAp0lhIQOHdmD7n1rPb7ajyCoTARsfiHNp7ovB4zkl6Kk03lYhkpUQgEaZKfGuhEErQDX2KxjSS05i2KkWZI7VE5RlBqlSAyxypNePQEaNF6oxjNzwD9rOsTDg16zDGoM+Sk4jHncNVpFT0w0KIilM3pHX1/oALnpuba5SSeDwf7h+fV9n1eoWcFEFKqlWLXwKjm8irEhMVMcC49OQyR5scozeU1YaqSKzMruuQMX07tpdXLFOPDTORgJIl+/2evPgfIXrxw/4HTFZw++aRMq/YNDWCwLwM1G3F0+GJbvaU1YrT7hEh4L67R5sGNwQ06RATEaSRTGJidDNFnmMKTd8LxnmhiCNKl8QgmRZPVrRImRFCR56Z8yQy8SqT09sjMEgZnydin4w6QkQQ5yxvu/Dxbs/rl6+YZ0sMKQYyqzKCgCK0aCkRISQtoirZjz1N0VAWOd6PHA47qkbS9Y9IzHkFDyaL7LsTIXQJgVB5dNVymASirDnYHW5IAv91vWLuRz6+mXF+YX/8houLCxrjObw7sLg7TCmpii2vLn/GqTswzIqu2yFlGt0Hk1I55jl1UnEa+Jtvf89PvvocgcMHyXW9oR8t09zx9c9vWMaOcZipKsPjw47vv3vDl1/9lId3v+fWOuqmQdQlxbplebjj/i511ZuLK0KWMciUlb3fP7FaX/DZ65/jQzL16GDIRCRGRQwRo1P+KwSOh4F205JnOavVmnlZcCF11DorKNuEU5BapA7uzNcTUmDthEIwTzMyy8nKxPRDS2IQPB0m1qsLhmEiA4wq6M4JIk1TgYgE7wgR5GwTJsYYTJazLAu5qPBT6uqFKbF+4v72SF18Tl3OgESSIgFtlDTtT1M0l/dASAUAjr4/Ib0jRk9eaMzhEednTs7yYf+BGAMvVpcEfUW/DMxW8sP7W6Z+Icsq5lFycRkYFkvwIaFD+pQbH4JjMxvePO2pV4bN6jMW59id3vH2znN1pen7kbzWiFjwl7/+K7recTo4bj8cIVrsrMmLD3zz7W/4+idf0J3GNH2DlLO7qsmzhtVqRVgs0xzojyOH4xNNnXPYn3j52Q3f/OZt0gSayDh1WOvITMPT3e8Ye8XHN/+JyDl/11ZM7iNVLZCyJ8+vOOweMDJyeX3FYRcI7oJcb1mtVnz/w0cOpwEbbolIno6Ou9sHslxT1Yahiwzjd7Tthv/4V++RytP3a7rBotWabo48vHnAhwlxdDzej9hFUtXpErh739GsYdWuebjv6PsjeX5kGhO9IMsFSoezKarA2oSP0TLDW0dVrui6jiwXz2ficTcQvKFp1vTHU5qAiGQ2CFYzdWBUS9cdWZaZssxx9oTQBiknTv0TSimapuDmZs3TfmBdXtJUlxyPHYfDLWVZMvSW68s1VROJUlBWn3Pcae5ve16++JymqVBqxntBPzhiiGiVDD5NWzJNM1WZ44PkdDqB8GhdwnKe2CXpH3VVYMSWsGgedrdUlwtmqsn4BU/7H/iDf+T43V/3fPM3kc9+XpIVlqY2PD32rC5XRI7sdx3OT/iYPZsGqqph6HtCdExzOE+g/LmoSk1lXuUIIZFRUdZV0mkHS1XU1Ks1T8db5n6imx7wHLAiox96+j412GWxRoSCxfdI4VFxQ6YTF7TMa3L/abmfiknvAnmVdHBRBfCRsqhYpCJEiYiCLC9xzvH4tD8XeAa8pl8WTKaxS0CKmghppS0UWpOaZcXzZLgsC6ydz87xH7F3QkRml1zvzi+ARFiDdWOKHHY2DR/KLE32okWbEjDoOksSJpKJrzscadsWH4ezyzfdtcDZxKrxfqEqC5xxBA91nZiGh8MjbhnJVQqwQObEoHCzYPCWPCsS43fq0GJFllVYmxLogk2YG5jY7z8QVDyTCEyKWu7TVkrFnDAJorRJ6uYDzi3YZUbkObkuiDFgVI4RCo8jAG1ZoYVmu7ogOMu8TIiY0e96yirHh+QWF9Ii5cI8e7RQVEVBIMUw57khLzNmNyLP7Fil1BnzRTLkSANYpEhcybJKbFSVKYZ5YPFJ8yukYJgHsjxHaMX9/oHgIqvVKiG9pGKeZzQe6xK7cg4OSaBoM6JLhiSVCaYljZmnocdIc240UuiB1pLRzUxhoSgKunk4szUFURoyWRKE536XJHmnYcb7mTovUAiWeWSZzglPfuZ47CnLluFMkpnnZP46nY4YY8iy7JwXXyRNea0TgztaQvgfARuUm4jzM5dXW4q8Sg424XEG7qcjUUqq4oJxXFCyoS1WhGLBhgmpDdPoCc4hcfgYcWFIAmW58P7+lqZaMy4zORXEHB8KlskglcfGPSqkzsT7RNpXKumpgvcYk+HdkoCd51W3UjIJ6qNL7nARePvmA3/6x9fM48S6ukSqGc+MKRvyRePjTG87ejFiIkxuJtoFpSb2e4+QiiyTdMOAFDl1vubUp0p+HB9p2pKu65HC0A9Hxn6gMiX9aaLarIhhopsCoDDGYkpFMA2r64wwlAzjB9aXLeNg2Z/uOHU7MpWlVWtcKExB1ZQ8Hfa82t4QVUgFlhEpSzRYxn7Pod/x6ovP8VKBCnz3/e9xLKyaNbuHA01V86s/+xnr9RpVLRAlxoHRAeF3FJyoi5p8nTPaHUuXUEIXhWa19tze/zW3/R3eG1abC5xzfDws3NzcYJdAVuTkefm8yrK7nnb9Bcdjd4bopq7eDRNaKmT0KKfQIuXbOhmRWqGkhpjYpj4OzJNNa5Ylwcq3dc0y7alMRGUlbjGUZQKJL84SvEaqiFAnJmp0ZhimGXGOq3JzQpn46BlObzBqhVRpIjWeZkzukHIgWo9RGXaZiOd0EKEyjNbkeZkmGDFP4ObhRL36jE+RnSGk1VOwHWWb4Lyn/pb9Y5+meNPb5BT9m5LD/oGvvn6JVg+UuaTMFRftmml/x7e//YZxjNhoUw6sgMVNjPOCtZ5V/TmL21M3JcucAYHF9jR1wRdffMZu90jfOb793T2L7TFZ4phmWZGYgNM9h+Mdb98pMlMkRqlLLv1Mw/sPTxh5gcz2DOMRoiTPa5SvePvwLciZwrzkcHjkt7/7AaManFtYbwse79+x2rQM4xEp7wheJiyW7WmaJ5yVHMZbBAWIkmk+EcVfM08BkwFI3OJxsUPKj2htiMHwsZ2x1nA4/SV5pqjKkt3jxHq9pSwajqcd+12KaSsqwdTnHJ86hHRIZfA2kmfq7Dj1BG8JLjUF87wgjMIUhrZpwM9cX9RIYZ7PRBEFUgpiXChywzwNlHWKvVxmR2+TXrjIDcu0QPQIKZE+Ep2nLRtQilyVLIOnyTY0VY1WiroquLn6KYjAaT/QrDQmnxmOLesyJ85Hrn55lSY34yNtWYBsOBzfc/Nik9BBsWIYd1xcVWTGMA6KVVvQDwfo3fMFSYTtqklRmKdk2qragS+/vuL2u5ZuB9urkazQ/Bf/1Q3/+v/+lm9/u/Crf/Kaqi457BTbG4+bQYiaq6trTt0Oo0lrZTx1m+JXx2FBSM4FTioGjBHkRdLDS7+A8mgjqDMFbqbIKw77if/sP/uviCrjX/3r/zN/+euUFX39Mm0h7j8q5hnaVUHuA8PBs7kOXFy29H2HVQMi+1RggdICbZLzN6DQssV5aBqJkDFF5eUahaDUZ4d4jOgiS5rgmAYbSuZY76gyTYwuTYOVwItIUZaEmKI9E4MxorRBIMhzwzzPlHlGIK2cnXMYWZDlEm3OhYlWCBFZ7ADCJ0xPSKEZoJnPxZExiVOoZI3ORSrIZfIs2HlGC41SitPOIlWEqPA6UTUy1WC0pizWdN2e6sx2LrK0FZymiSJvkoFFm3PjrKirDHx6LwV1Wr2HhWn0GKOYpgUlLNYuKJUh0Gid5AzeLmTGUOc5602LiJLHu0eqckWMln6eWeaZy4sXfPx4d/7GBdq2pWlK6toQ/UieFyiZg1T0w55uGggxUOYlxy7JgxY3J1SPTuawPGshCKSckUYS5pjyzpVnGgakgru7DmXOvg0JwzSlv9tDnmccHx6ec+p9XDh09xRFhqLAlJJhOKKMQhUZudIgLeMyIWJaXc8jIEkbV5kTQtKtBjzj1LNaVwTnMSUIHThN+3PWvELKglPXEWXKqJ/thAsBZTwTETeOzNNAYWqW7gBxQUs4HZ/o+pGrqwtAM82BaR4IMWNeUuMxjZ52U5GXOdZDXmp+lMb8AxaUvRiRRuPsxNDvk8ZAwzyeUFpS6pbxsENqhczg/nRkniJ1tWKzrdB+ZBgS+NT7gc16TbAaqTzZSvP93Qde3qxpC/jNqaMP91zm1/Rzyjct8shipwTvzWT6kBcSKR3eJ5FzXdeEc5wTnC9ypxFC4+WMrldcvqoIHrQJfLibyKqI6x5ompqLqxtel19xOg0cTntEnuMzwVJkNO3INKfx+7rZcnd3h8yeKIs13gdqW3LRXHHRrkE4qnLDd9//jsl2bL+4ZJo7IHJxvebd2weMkqzXisvmmt3jDMVCtl4xRcmH4y3/+T//M24/fESTc+g8QmbUm4ZVWwGBqR+4vlpx9/CBzfWKofdEJq5fv+RG/QHff/97yirjcrMluh6xBIwKyDwy2FtUc4Er1rz66Wd04xHXK6ZlQEgwzTUyb9n3I7HQvHx1xcfbe0Zr0brk85dXLNPIw7t7YiiY+gkevmc//o6qqTjcB6pmy26/Z7295tWrz/nh2z/n9auvELJAaoV1EyFG0A3eZUTlcGGh2BYocuY+YJeRooBlmcgqSZPVDJ3AhRmsQCibWGrCM5wmlKywY4/UC0qnKW7XDWSZQnjJgqcoc2KE8RwvZn1y7bXN5iyVMMToWK1TznSe58nI1HXkek3AE1iS2SKrmOYZlKbKQZIc2VlW4L1FaEVTFXhrkbplWUZe5td8/ZOf4aOGKHl4/EhRpoP9k4h+Hk+oEPhq8Bzv74lZ4E//0ef04x6E4/Fhxzx5HruRECHPK8ZTx2JHXhRrZiuYFo+UFd3TDG++5bOVJPoAYUQJiZo03mmcH1NRJECR4cOC1g7vA0K7xNCULUvXIdUef4LTMRlStI748IY/urzi8nJLNw4cncX5gBJrpFzwvaQn4A4Dy0IS1ncnsrpEmJLdtw/UueFL0RJ1QGUzSxAYWUHhmcOCMRuG4SNRFOSZwVlwHuLjDIy4UJLZAuUVixSYfiZ2ExFNZl4zHSLSdBjrWIIlqAwxRnIt6d2M1oIwlul5O7gyG2ZfsEwTDYrgMpwYEf2Mlj9qKOvqAu9n7h8PFMWa47BDnDxCzxhdcuocZaUI44yMORk50+RwdkGbEj0olsWi1EyMfYJD247FJ5KGzxTBeq7qLcFPWNej45E3//bIersmy1aI08JloXg87dBFhZ5zRPeE0pCrlhAs3Q8HhuEt3geaesVK5fzxZoPyD8QQklzot2/5p7/8FYf+kY/3R6rLPe3vr5EfVuTNB1a6YTyMFMOK17/6jL/4Nw/89DbjdQHfdZY/dBu2YublTrKSR/opABXLcaHIc4KfKMuaU+8hRrRRKCWZ5ymhraLCWUdEUBQa7zLyfKTOt7w+9eiy5n/15Z/w3/w//yV/YjM2rwp2Tye6v0lO3ZfNiCmgUZrOeLqdZ3WUDKNEx4wXxfhcUAK8tJGf9wIlAzEEwCYdnFYQz4XfnBwfOpN4ApnRxADORbxPmsrgR0LwZLZgCpq3tWQ+B0dMU2Ia5mV5noIn1u88WwSSqi3xYUrrc5OT5xXzPD6fOVoIpNDn5LEmFTQhRX/6JVDXOcfulCL2LioO3ZFKJVqI0hnTPBLORky3xLPUKzKPFpNrrD/h48I0D0AFUwARGadTKiLO03YhBVIIyrLCu8jQj2it0AqqVRou1VXL4+OOuinQVTpXywq0MkglSFnqmjyTCHKKsmYYD6mQzVqUkHz2p695un2grquzDnDgiy++4Nvf/xatFetVw6GzDOOBVXuZ+KUicUmX2bPYDJ03PD09MU0Li5QsznPz6gWzT43ki4sVJhMcTnuUzGnajOPJUlZbpB5ZpgHrMvaHA0om06DSAYQECsZhwTtwLgMRMDqSmYw8W3E8zMSQ8s8JNcs0sN0YvHWM/YAUNbqakBiUTpK8umjIjKTvD+hshSVQmJyigWmMKKkRRNbVluPpkShC0p9mFikz7CDIjE/oKyTSTHg7U5QaIZLWtigLAo5lUqy2LV46Tt2OsthQrl/ggyPEESGTMUgtLcOy4Fwgy/6Wxufv8RAx/v2qz//d//F/EY3acLF9xfc//BZlxrT+iSZNdqTE6FTERZdzPAwQJ6pakWeaYwdVuT13cYGrqxeMp5myWAEpeWR70bDMO/7DrzX/+//D/4ntOmMKICMsYcLonOjiMzLnU6yZcw5B9jzGTuggnqGwVV1wvF34X/8v/2f81//1P+ftt+/wYqFubjBZeuOiPxK8pqm33N5/pKpzrl5c44NmnkCXJ2TcJPCpHen6RxCOTBZsLwtsH5NeSmcc9wd++ctfcux67u8euL6+pq01wZ91awrGqaMwGcGr5IKOkBUl7z/e0fcnthfrNIaOOU8Pe3QesCG51grTIILAZIJdt6NZJSRTDJrZLuSF5PHxnqvrC/puJgbJq+srpMlY3Mz3794QBdy8eM3sPP3xxN4dz4fdDAT6aUQbg9Q5WVaxu/+AlKkTDiHQljmFNhyeDhSqpC0Mq6YGL7FzwM8Tl+uasR+pixalKx4enkAYyrbF5BndMDFNC9uLK8r2FTYuDEMgzyqG6ZHj8Y6Li9dU5YbMCOzi0ZlB0jDMyakXZGr1MlNjRJXWUiI5WWcL69X2PLZPtD0pYVrS+sm7QNd1KY3EhTOCypxNXva5KRESmtpAzFmsBeEhGp72XYqfOx55fNyd8241eZ74l1VRUtUlhcmoV+X5ELHM88Ljw4G+H9MKWQvGWeG9S9ywsuB/83/7V6zvngghceakMkghCdEDnhhTRymlJMa0wkrJM0mfFUPAnZ+DDx4lFBCTuzhGpEwGAuf8eWXlcC6x/jgrnTgb4YRIQvQQEgtWnPekWhl8SE7+ECOEs+wkJnGeVALrwvnijef/XyQKATFACAgpEjkgBCKRENLPefdpJRqRSqYfjzFNGmTC3QgEMZ7PnHhmwyn97B5O08PkZE4pUulvEzK9BtEHkApBksUIoQjep5UlSd+klEJIgZIyuWX93zk+09+T/g2AP79W8izF+RQfHZ+fi4iCSEKecP77P2HQIKWNBR+e3SMhJEqFIK3CYpBE3Fl/lSIERXIrpgQY59N7BIQARkvyrEgIH6HIyxIhNLfjR/7qy7fnvz3yi+9vyKaGaUwrzLxQSJlj7USIE1Im0HeM6XX33mGXQF6UzNNCXiimyWK0SWgW/2NSGUAM4YwfiSCSwzt9fsXzPxMFUkmCT4VlDJ48r8l0QYiezfqCj3dvcc6iNXinWObkhDcFQOJb2iU1BklfGNL6uJn45qf753fu699t0FOWzpBPr/enq/D82svzd+XTfxdSnItPfnxeMZKSFxXBB35oIv/bf6wYFo9d0pq8LHO68YAxmugjMSY8izKSaRqfiSRS/kgl+WTkybIi/Y6QXicfZrTOUuJaTH+fMhIf5lTY6gx1jn3MsowYAnXVJrZjVTNNU5puynQWSamoimTKSCaTNkkQcsM4p6ncfE5Iy/MSo8bEerYBPHSnAe/heBj46ic/w7MnBn3++yeikNSN4bjfU1Ut/bywzI7oPU1TpUm6j+dkpQYvs2e26na7PUPPNdl5UhumBa0l8wRGl7SrnOPpgPeWdpWxfbHh7u6Oly9fk5kqJY/NA8J4mrZMa3Z/hJjRtjXOj0yTxdoJlD8HOYwIlUD03bHns9dfcep79vtHjCoRGD5+uMeH9D0vqxxwDOMpURXGDGMM63aVSBh+z9hLyrJEUhPilLaZp1QwbzcZH2/fcjxM7E+Wm1drBBKpBE8PPVKUXFwUjGNPJDXUgoyrqyv6vqcf9iBH7Jz4q8vsUBoEgSKv0ap4NhAFne4rKSNScSYG1Ekn6x0X9ZoYFUYFptGfQwgU/92//OZHrc//wOPvPaHUpDf3/fvfUeSapnmFtcngUBSCqRMMY7qQt6uaV9cvWGzP7mFHqdZsP2/RqqQf9jg3kWeO9auSp6cHpLK83P4h796/4R//yef81X96oOs6bq62jH3AYVFaIRUpXeAc2p5+/9k15T4dsp7gP10miVUZPCzMfHy6x6wqVNtydd1wOkb2047v3/4GKRuUzLgxBV/96o/4q9/+mru3f8NqvSUEuNlecTh2KQKtNDTZJcMwcP/UM8TARVVgakX0ipsvvuSH27e0zZaXX33FMB7oT8k5qIRks1mxahoIghAEpWnQZFxcXFDUF5R1wTCPRG85Hp8wDVRtBaoiRE1AJ8aYzti8/IzffvMfKasVF9tXdPOOp2FPXq6wIrJ52bI7HPnu4TuCSBeXFQttu+aHD7/HO8HV5ppNLBE+8PDuPU3T8MXVS47DhJ/h9PRAnQeaVYsQiqf9jmN/os8UPrOoKvDmJNhmmmAlwgEiMC4DQUgWXROXA6ERSBlZ1ETX9TgXKArD8fSWfnjH4izzKLm7u2O1Ts5W379H6Yo8X2PygqxqWW++QmiBUBkiRoJX9IOj7z5yPB6Zl4m8rJjGFBM6uwGja/LccDg+cv94T/CCp8cjFxdXWJt+zlrLer1mtWqfobvd6ZQcmr7DW41UEPyMkiXOxh8vCTmfD/eULx+jQInkspSk+L6UkBApqhIpUqSnkAHnJjKZ2GYuBFbdTPP+jiWET24zQlieC6kI6fJJsc/nVZJAyhzvHT54pDIoOCeuJH1aCP5vSQ6SxkoLgZD6zHRNazLnfMICdUf0uTj152LT+XTBpqWbQwDL4pMjUit8TCiOT5ex1kmE76N/LgqN0YT4qfjziODPgOC0wpMixR9y1jvFM2ZDSYnROd6H58JWnA16SqaCLZ6LE3G+9GNIhZ3SGkIyKDyXo4ofi4MQUx6xkKkY1pxfaY+zgag+aXv/rrlDRoE8A6rFmZEbQ0Sdi9yk5RZnXeOn9BV9RvZ8+nelwsp7RwwJbfRjgUwyVEiJiopgBEZ9KjLOes2QLmUlJObczCitUTLDZBnKZHiXnLUI2O93HObDj5RvBOvVBav1iuPhSN/1CCTzOKG0wygDKGJ0ybwT0wTVLhFn02cphHQm+xAwaIQIn8zj6deoT81QRCLPrwvngvr8UhNTcce5gI9JCyqkJJOGcRrPmrgMa1MTmBWSGD3LFM9GFIeUIuVrR86/AJKZ5cdH6m3SLxeS5AaG5/tESXlubj4VjoEYUjEdQkhPjPS+cn5/QfBlF7m6Hfi+lSgRsfNEbgRa5Ixd0tRluU48R+tQIuXKZ3nGtCSdXgJffwJ/Jw3dOM94m/KXvUtGuQRgN8QoyfKMIBwiOpxLmxWjFd5GlnlECvAhhX2kHPJ4/lyqM5IvnnF0jsP+hNQq4XOcoD5ni0ulKGtNWeZE7yjLnK0tAWiaL5BGczq8QFDQdR1z73n58iVFqbh8uaUuVzzujuz2jzzcP9EtC0pqoo7ILOO0zDztH8nyRNP44f0HCpM9vyfBJ7On1oLDoUOguLpen99bwdvdxPTX3yEEfPf2wHZ7gckyHp8eqKoCx4KKiptXG6ZxpCgHFttR5A0Xlyu6wx4VPEVdM48RhWazfsH9x5Es31CVBbmOZKbBWs3Lly85dU/0/Ymqqvjuux/4ydc/R6pI3x3ZbBoOB8Fm9Qcsy5LeLxUYx5FMtUjbcbgfOT31vH75E242IPLA/f1HmjpB1ePa8Pr1Fzw+vSUrDOPQs3vqWK9e0vWPZFnGq801VQP73Yl5SXdDXddkJplQl0lwPAoWt+PlRYtxDZnegBxRa0cIEFcb5nnk2Fk22wqhjqy2KVqSUP89KsT0+HsXlNEHVs0WKSWnYw8+QwuVxKqh4HqzQiqY55F56Xn7rqNpL/nlP/rnWNdx//GRvj9xsb3mcJrpuo71q59ydV2w3++5O9xxu7tniBfc7Z/IVw0LjpjleDeTyZRVHWKawgitcGeXoJSKxVuk0T9OB+InFqVJhafOOHnLbgnc9jAXgb53GLPm6z/6M+7vHun6I//+t3/Bd3ffsd1c8vbde9p5oGlzxjc2dUWZoK4L3rz7a1zoydQV/bKidz3LNDLPnqpsEsn/+MBsF9pV9f+l7T/jLcvS8k7wv9bafh9/bfiItJWZ5b2D8gWM8KIpQDDCCI1AAqmEEZJomQZkkJpBjaTWCLUQAkkII3xBUVBQwhRlyTLpIk1kRFx/77Hbu7XmwzoRCb/pmakPrfMpM+LGcXefs9/9vM/zf2gbgRAapTwOVzOKImM88u2JjIjQiRhlA/KiQAvbnBC4HmWVIX3FYKPH6VlB3RaEfUV/3Gd6NsePJf3BCOl0VNWU4Sji7CRjepgQOjs8+vhzhHHI+QsbDIYBaZZArAg8n41hn+V8QVVmhP0xVbHi2n2XkMIw6PXp9UccHJ2xs7mL72uqvEApxWY0YTwaMF/OkL411e9euMBiMbNcsa5BKcFpWjMa7lB7muduP8e1a9c4OToh9EKkcRgOehRVhvINXevTGAevB1tyyO7meRwsQ+1g/5RSTIlGO+TViuce+xCLectka5Obzx+sfbOGqBcSRQGj0Yima9Cy43RxxOOPf5ow7JPmOa4b4Ac9lHSJBkNmyRTXVaAkQeCRNymro6lt+fD9NdetxQtiqjtqk7AnxuG4T11lGMcgVLj22DT0er27LRhtq2lrC9t2HIu4uoPdsCcOO7xJBXXTULc12WpOtw4tmHX5sNH6LqFACksrAIPpzF0FUSDWKqBNPWLs8LiejpBKgjEWlK8kBoPUeu0FE1Z5xdDpmrppcD1v/XcKrW3yVDkOIKxKtFaNpbFX0wKNFOD4Ll3T4XoendZ4joPr2BOdDQjY98FxHegkaMvcc5TEDQM7oGAsLLoDhGNViapeDy4SuQYG2+FN0HV2gFHKon+0FGuPnh0IHCnRmLUKJFDKsUEKKWgbW6GnzVr9XG84DBLMHYyJPZEjXrhQl9J6ynSn1wOJWA+R9n2+M6RYde+OGteB0WsxTK5VTL0eGhUYbV8L4u59IqEz3Ro7AnVrKRZ+4NvEq+8TBhG+F9lWGMfh5OyExeKU3d1zmK6w9yfBlYq2q23KW9wR5QxVXYEncRwP5VRsbW2TZwlZnqzDf1apx1glUQB+4FKVDQJ7jAgBZWlrcVkrx1Yp7dbvidVDW93dPVa1ufM74YUhc+2FtyEX1w7jrmtxRGiqsgZ8PM8es/b91ZRFRRC6QIcUYn3htVZ817e7F2RK4SirJt/pPraP+YKqKuWfVo/VOvRpB+c7z9fc6YmW0qqXwmEcepwENqQnnQglXdqmQeHQdetQ31q1xjiEcQhoAm+9DfhT6ieAKz1a1eG7nmVjVhWOH9IL7cVHVRcoFK6jcFQErj1PNnVLFA2oK7smr+saEASB7QGv65ayqCnaYk1HsZD1MIyp6pZkVSCUpKwamsbyMo9kvU5V22rYpjZ0CIRY0bUaIVua1n4ODB3L0tC1GRubA5LVPlp3dLqi6TpoWqLIQzmghaBuNZGvGA77GNNjOp2BgaZu8DwPx/NojWVhh/0BQgiWeW4fF9e2DCnr0Vwezzg8ma/7t63Q5fsWrP7MjROU09kATqfpDwPCoMd8ccSo16dpW5oayrJm0LfDsRfYRL3rSOrK4LkBj3/2EKFq2rbBaInjBByf3KbVCa7wGQxL8lVLtT3C9w1B6LG1vY0rHcqy5sIlw4c//CFG/YvUjULogqZcUeWSKi/XlY6KZfoZ0D6Op+j3+8TRLp1JkapivkhIshhn5qFcgxIxvdjHGEXXwHAYUPkt40mfpovw5JiN7RLHhaODisn4AogKIaGtNwl6Ekf2WC6m3HfffVx/6hPkefm5jomf+0DpxXZF5nkjNjbGRLHFUKRpSl03xBuWBzdPTmm6FULB6f4B09URRigUteU1dS0IW8v1+NNP4rn2w7gqS6KNHs/cfp5WOUjXpzPagrn9EXWd4nnOWpnsQLtIYZ9+27TrtZddlymlQAi6NX5BCEXoCo5un7J3+5RVekpnLP1+terwnMsM+xFGHzK6Z0jdpOTlnPGoB6JhdnpC26QEQYivYZGW9AZ9lBqRrjrcsKOuKqRRdE1KrmtGwwlFmdK1BSdHS0Yb26xWMxzlEYVjpCeYpzOapsFRMePhLrPj22jdMRgMkQiS1RlFuWQy7nF4CkmSkGQr2qOCLF8SeEN87wr9KCTJl4QjB9+JuefSFq4Hy2zO7m5MUdXsPb/PufOb9PsxJ4cnPHl4i8lGTBQHlIWmFVOKtGNzdA4lHLq8pKlyru4OOT45Q9QB/SDk6Hif3iCmKnJ8L0QFAX4EyWxJW6aEkcfueIwQirLMOTnZQ8QZD913lU7Dhd1zoCSL1ZxaleTKfpl5UcTBwTF+CJevXCTJNatFSqdL5u2KLXmZZ58/wciA2fKYj3zsk/aLXHhoUwIRabZkc3MTjEvbCKJ4iOu6bGxs0LUOXhBijKaqcprWkGYLAi+krTvKKsVxHIsJifr0wyFN01HXDVE4RFCxfW4L5QrqJmU2s0y3IAptS0XnUJYlSIcg8KjLkrpu16EEUMqgjWWNAei7YNk+0+l0rSy5OI4kigfrda5FfbytukUt7EmQ1mDW6sKd9bXRHaYx2PPi2vO0Xp8JDKayfy6VRElb8Ya+s7YTNFW7Xh2/MCyZqrNr8fXe1nGcu7FRIYQdotFo1SFdh6ZpcaVNnHZdi9YNVA5lXeKsV53W26WQRtkTd2WHQ20so5Z6PfQiENIWIBjX+peMNminxRi7+nUdj7pubPGBUWvFyN4kkrqtLai4s8OLaVlDzWvbymEkbdfiuhYSfGelfGd1b1rbMW2M7Sq2QyPc3X+yHoaEXL/X3RoSbgeBr3ndg2T5yobCjCKI+iilcZUkTa1vtd8PqKoaY1zypGA46mOkBVM3VUVT20BDW5cY3XHp4jXu3b3Kzu4DbGxv8buf+DiuM+Tz3voWDqsVaW7oyBlGI37gB/4uzgWHf/gPvovjk2O7Mm0THH/AT/+nf8Y9leAR54Wv/+ff/TZmr3wt/+m//Ds++sc3+Y4v/Qs88MB9uK7Drb1P8dgTH+P2rQOm0xlR5ON6CsdzefKx25RpnwsXPHbOe3z4D27ykgcfIAhadGfLDlpdY4RECAetGzpt6DpN3TZ0usX3HPKiQAiJqzRd19pkdK340i/6Yg4ObzPsb/Prv/mzDEeK17ziz/P49Y8xz57GtNZDvLsT8+hHD/G9HV766k0+86nrdOUYJ9DMThO+5N4dLov53df7qxcdbvWGGMw6INTYWVk31HW1Zjp2OI5L17WwVlVdx/IWHce9e3FxYdXy9be6uxFy223t0rQWNdR1Nok+GHgo1aMoE0bDCatVShgGFpje1etmN3n3YlQ56q7CrYTAc13qVuMJlyCI6JoWR0m8eECe24AhxkUqMJ0iDCK6tgN8C7kuSzA1ruvTtta74Xq2OlBgE/eDwYi6rnFdu91L84S8yPB9F893aTqbavc8j7ysUEqsK4jtwNkU9hPorQf709MzpLQ1rm1jiH0XMFRFgaMkyarEC6y1Jo6GZE1Ls2626Y9H1tLg2fO4pkMKhcCnqRya2qDWIZ+2re2FjQeg8ZWH1i2u8BDCv0sUgIrWNERRSNsaHNchzTIW8yVR7FMUAnsNW4FbsEzWYdblGcKxlq+u6yia2irLboQx0OoCz6/ptKZrJMerY4Zph++4PHfzSTzHJ4wkRycJQRAwGPYQIuOhlz1CHGywWtrh/cLOLmXuslzk+LGibFIcX5ImDcKBsgxp6sqWG+Q1eQ6O39DUlmayNWmYzZP1RV6IFw5YrZbEcUxVN2RNzYVL21T1CjeomS1TulbQ6wc4XkNduXSyYnd3m6PDE3a2L+OoP2U+/v9z+5wHylW2oCiO8dyIrnXQtSGMAtoup6pTZskMx+2I45A29WkrjZEd0+QG2lR43oDIH5GvbtsDuFMI4VDVIYaaostIsylVYzg8MkihcYSDJwVd2YJw6FqrFOhOIro/dUI1wvpbjMGsfSVWGxDrtYWkH0ump1P2bx0y3MzpGkHWLhBOxSqDRWKTZJEb0uqO0XjDtrZojR9OaJo5nuvbtF67gaEhz+ZI6VNk4MQNeVly7vwOy+WSg7MZg3GPKA7wGkGZ5vT8IU1bo9vMpuSRIBwkcHh2Hc/xcZTPfFEhEXjKIfBC0kVB5bk0dUboB0xnK86f30UanyzJ2NocoIXDfDklWU7Z2biGH9gr/fFkh/zgFmEMjmso84TJxoh4EDMY9pjOZoimoG6X9PpD0rykLQ2uI4lDjzxdoZucjWsXqMuUje3YwnOXp4xHm+SzBVqDcB0GozHDvl29z06X7O5ewJjINhYYieNamLvje4RdS1EmDEYj5rMl0mT4oaaqW/aPT9YNIz3C3oBerNFmRl2njEYD5kXJ29/1ausZKSoW84SyajCMMcZQ5B3RpE+WZSyXGWVpFYC6Kwkjl9PTU1zHp9cbcHySMBgMGPV3SdOUvGgQXYUzGJGlJWjBqB/TthnLZYvWHXVX4MjYQm/LzPpUuaNowmqRrn1Q0LalPd6FIPBjBgO1VhkdisJWzp0/fxFfSaRrV6YXvRTXcdfbSIHxHHAd3vL2t/Od3/GdfP/3fz+PPfbZtU/QrlGVkohOUzeNVVbWnw3XdVBSWs/i2mdn7iiZQiCVIBCeFcKksN4dade7d06Qd8gJrFfFWps7m1IcYVeyjulompJWGFAO0pd81f/0dXzln/8a/tq3fxOHh4f4fmBX2hIb1Gg7tLENFcLYsIYtIwDHddGtHSa77k7q3z43x5F0Gozr4jvQdetXJe54JiEw1h9H2yCw99cZgWolYj3c+jK0vsPOJo+lCddzqaCuGoxjLSlSebR1gxAtWr8wUCplofl6zQZsdY3neHTacHbRpz+8wqVL19jfO2B/esaF81fJspIsnyOVYN4ZDBYbFMfnePrGIfPFnm1SKVuuXXqQF7/olcTBmNe/+vO5dO5+5kVD2XTsHz/Pe//m3+Dd7/pCXv3It3BwtIcXBkRSMU0rPl4e8Z73fBPJvRc5wBrsjXZJs5ZbE497w2uwfOKuRzDZ7KEevMbjvk/1yMOM3/1qPrV/Sp5NiS+8iIdf8hqe/IVfZ/NlFQfHz/LUU88QhT1mI5dFV3PcwsPjiGd7HV1XsBm5pJn1Y8Y923mtTYcfRfheaNuwuhqhsENza/uGPU/huJKDtGbn2mW8172Ugz+Yc6tu+WST8gv/+r+xvflifv/vv4eNC/fy5BPXGQ4ERCHhm6/yq7/4KXj1a2hessVnP6a5fDHg0YOUl2rDRfWCSfI4MPyJWDLoj0izBV7o07QVUji4vYiDgwN2d3bWq2cXjLRKlQd4mrq2XkKlXEynrS8WaS/M1vQRIQ29frSmPaxriSX04x5oQxxGdJ39rnWc/trqYLcyas3MDXyfNE1xsKE6B4EfBhSZreYsC6vCR72QsiypmoR+v0/d1nSd/T5pG+stVcq2+txR3G3gxKybkyRGK7uO9RyUa6s4pYqBEK01RZ4i1p+rbL7EDwLbuOIowrBvMTtOZxVQaY/T0LUXmdUyRcmApLaEDWEsOFxrTVtZD3PaZuR5Q7Iq1oxcie9bf2enNZ5vQ8FSQttWeL7tbV+trDoahD5KcHeTIMQdT6rCdUIWixWeb4gCl6q0q/+61UjtEcUuTa2pmhQ/jOnaHFCEQY+msQUHvh9Z/6luKesczw3I69ySMtqCutUE/hhjKkbjAUpZmH8YhkjPJa8bGpFy+2gP14nWfs2KwWDGbH5G6DtcuXKJ+XyKIraVxwYcFSG8lnSR0VYFiJqwiqkqWKwSLl4eUFRnSL3N/sECIW0ozHP7pCtN09ZIGjzPp9creP6ZWwR+iFIRTZqgHMP8eGkvrEwfQ8JpsI/uFEIrPPd/wMrb8Vxix4JEhZY2xFCuGE+GNJXC8Yfk+Yosb4h6MW3bkNc1vrOBoyKWqyllq8F0LJMFUWiTsG0jrE/HHSJVZwe0coHj2Csq6diDdjDaIM9T2rpFGQcjFFIoNB2OA1q4KGW/7NtWW5O8kbSNNegb10ULh49+7Bm+6i/cz+Ft63kTuiOrbuJL69VIVgukDCjdmixJ0aZGqg7HGbBYLGmakp2t+6lLw+bGBdJ0tTbzDmlIOdybEvoBG+M+q9WKWllCftuk5IVGOYa8sBVaVWk/fFm6sEqXO6BtNatFQtPmRL6H7/tUZYnn50RqRNWm3Hfffegu5ODoBkoaTk5cnMBhZ+synjNlc9euX6dnGUWVsrXlMz3LmJ6ckaxsA8P58xc5TBdkRYnn+QRc4Oz4iGE/ZDCK8BwPTwXEQZ+t8SWaKgMcev6AZZJw4dw9nM0X1Bp6vR79eA0pTxNmtUNWVqTlihZBOBxT1jmmKmlai7loyoYgCJjtH6J1S+EoPMeiJnxPMpkMWa1qqnpBWs+ozRZPP/csQSjpqhA3WXB8fMxgMGB6VtN2OXEcoJRLb9QjS07BqRBuQRC4LJIFZVHR1j1cEbO9sct8dkrgdWAW3Lr1PJubm4ShpOsWtO0K5XQM+hM6s0deJggT4fgOruzo9RxcR+D5LlLZpGNRVMRhjO9aP+JkMsF1le2lhbvpzbxICQKfXj/EdBrf96x/tMotDuJgSr3uaDbGULe2drJbqwpNba9IhcSmsYW0KiUvgP0N3Xpdq4EOR7o4roPr2AH0jhpnsL7BFzx/kqbR1itobOBDYD1lYm067GS39vitV38SXOGtPZQdxqh16MR+vXhej8AfIySEQUjT2GOww9C17Xr1KlBSIJQ11rVdtUZ/CRwp7qqnndEURYUQVml9+9u+gLe/491s7+wCcHpyzB/8wYf4nQ/8BkWR461Tu01T8eCLXsxXfPn/xLVr99Lr95nPZzz+2Gf4pV/6rxwc7SGEuvs4Qur1GtMO2UoopGyRa17c/fe/iC//8j/Pgw8+TBCEzOcznnrqCX7sx/5X2rajF8VEQZ8srcnzku2NTU4Op1y+PObihRexWKw4d36LxWJBFPboRUO2Rhe5NNnm0qUXsblxD/tHc/71v/13fMlXvAFv4wK//9lHCQKXyxev8sxTj2K05qve8zV44YimvU0gHUbbu3z847+BFxvuve8hFoucoB9TlxWhO+b48BOcTGcE91+F5frL3RiU1BydHPL0U3u89jVvYjzaJc1rBkOf1Uzw7DN7/Mmnf4OdnUt82Zd8HQ89sMezz1/Hu33A/u1PoE3J0X7A+Qs7zGYrjFCEgYcSHlXZAI5d1xcd05NDWm3bfIw0lFVO6IY2QJHW9PoRe7dmvPPzv4KutsfxE088wVf9+a/lda/5fL7jO7+D2zefJc1bfDZx8RA6pNevecObHuCjf/gML3nFPaySG7zzXV/Jxz/8n+3jr9fg9qDVKAFFuaJpSlwPPFdSFhVKSrY2NzHankPUeq09GPaAjq3tDY6ODvC8gLbsCP0IR1VwN1Rm8H0H1w9oW73mB0rqoiPuNSjl0rYWJ2WMti0uUZ+qqtmcbECnyfOcXuRzdjplPBis+5Y9hsMxy+UStxetawtrWqNp6hLPc/ADRdMUa4unxnU8HKkIAh8pHcpSY1mYBq1bRGOHS3vThKFP01QM+j3KdT+1QBBFfUQHnifJygrfV3iuwAmjNUwdAj8iDDyaVtNpLCbLDXCUIvKERZF1AoWgF9rnn5cFnhfgeT6OkqggthsOz8dxHPKsxAt8UHZT0LY1fiiRRtOZlrLuUA5EUZ+6Lmnahrpq6PeHGKFoW3sBW5QZcRyCaSyJRluvquvaLvNOLy2CKIayKPCCHkWWIxwL/R/0R3QYVquCOI7BOEilcdBgaiQuoT+g7WqCwKGpHVpjbTtpCh22FMSpB3Stg5ItaTZHKYf0aEng+SSJ4DOP7VmGs7Ftb9b2EYBTrZX0HN/3mU0TG2Tq9ZmdzDBOjXIKEA3W8qRx3I69wz0Gcc9iqOqGNLG4LuUsyfOzuyzVKOwhVE7THdOLN1FrZbwfSzD15zomfu4D5WJhzcuKBs+1DS1RELGcZRaT0pT0QisN357eotWKzngs0znKnRM5Aa5b44gQgWCW1WyMLuM4ltZe+w2r5RlnSjJbGIT0EEh6/QGn+YzLl+7l5OSQ5WpOV3eW6aUUVZ2v06FYpVIYpGG99rO9z8JYvXIwDLn+2AFNe46qOWXYP0/TDdCkdFphVIkTGZLVGfWy5dzuZRbLUwZ9W6no+dZHUVRnrFYFi4XtKzViyah/lbaTuG4fpEOSpXh+YDmEngAZUlZLel6PQA1QSLzAwmHrpGF7MADjUZUlw8kGTe3T6ZplljMabdBULVk9pe9PeO76HK+3RLgBmxvnSZKEND1CmF20hrPTfdI0pypqhOkYDcbURcmgN+G+lz/I8eHzKFUzGPQIAoeyWlDXhmtXLhBHY7RuyfIlVZcSSpe6Ljk6PaA/6OGGIZN4lyyvqdH4PR8ZaFoEnTHEcZ9bezcpy5Qg9nCCiGyWY5EGhsEwoqpbZBBQGYHqhUSBYNDf4vQ0IcTBCVza1sH3FfuHz1O2BZoZD7wsQGiB1H3ytGP3ygTfGyDY4bHPfpTLl88xmyZMxps8/vgJm+MBy4WgSAp2tjbpTEsUu2Tpiq47BrVi3B8wHo9xL+/Q7/eZzmfcaV+6feuIKLameeU7oDtOp0c0bY44vk2W1/R6kYXHtg5lURNFfeJoRNca9vbSNQvOJsctr3KtclYpxnRcvnyZy5fP01UVRVVydKTpLRJ7sjV30rD2y/6FFKoF9rOuzxNotFn7lswd75XGGEHTGITQdKKl6STleui8Mwza86tZ5xe6u14+IV5IHgv5gqdLSYlSEsdxUVLgKJtatsligSM9MBK9DkeA/Sw6Pggk4Ni+aG2flwk1nbmTypZ23Wy69euzr7/tKqRep2AxuK4HwvBtf/W9vPGNb+GPP/wH/O4HP4Dnebz8Fa/i67/hm3ng/gf5N//qR+4OES97+MV8z/f+zxwfH/Hb7/910jThwqXLvOVt7+Y1r30D3/c938HZ2QkK13q0hVonug3alLjSQQrJW97yDgD+8rd9J889+zS/+su/wGq5YrIx4UUPvRjPc6mqnNBvWMxPWNx8grgfs1qlbG1tcuO5Z3j44YeZzQ44PTlgMthm68o1dkYP8PDrX8Uize0Q2g349Q/8LL/2vl/kr733O2nrimtXL5GbBNOVZKubfP3XvZlrl7dJF1MubG3S5lAVJc8+9xhvfNNLOH/uIsIRSK1xIxgOtsiSFZPxJhubO3DEOhhjfXVPHZyCrnjwRfest0EgCdje8WjKGRcuBgx6FzidrkhTwcUL17h6+WFe/pLP44nHP850fkCZC1w1wnSQZ5rRcIvxJKTfH3J2ekxRzgmCEJA0XU1d13jSguSroqBoWlbzhGyhee1r3swHPvAB0izBdXy++2/+bf7DT/wkv/xrP8n9D9xDk0ryeoErJ9DC5sZF7nvRLT7z6C32by8YDgf84X//NL2+Il3liA17sWKEIAwiYt8jLXK2tnYBTVHkuJ6k0yW93nC92vYtyqsqiVwfLVrqOuf87hZNrUkp8UWAMeW6jajG8wR5niNKlyjq4ToK34ONyYgkXVAWBY7jMR6PaNpizYTNkLhURWqbxnybzJVCY3SHkoKqzikyF991yPJkbecZU9TV3T53aVxat8UPrH1GKUWeZ1RVhevAcDikqi1kXiHx3B6OBqQGpREanMi34T06urbF90KSRWLZjX0H1TW0usENXfI8YzDoWTtbXVLUxh47aMrG4Ll2xBBC0xsGtE1Hlta4bkjcC+l0xSAeWkKKr7C5NQcpDEWVE/V9pGu3LUVVE8bWnuC5MUr5dF2FdLRtO2obfM/D9ywasNUdYWQv9o0WIB2k7CgzQxALtHZoG4PRdq0f+R15LhFOhnAaojikKnI81cMYB2NSQneAr1z8NcNTiQAjFPHQY7ma4ToBTeVgaBBS4geaqk5ouxjHCWmrmsjv0XYVjmrwvZCyNGgtELgop8XzJkjRorAXB0IWKBms6QcV6QIctyGOLQaqLCF0RlTNGV0XEIcT6jZBSHu+M8piEo0xNGWNF9iLca8dsbMzpmkzqqpBihDXxOtLiwxXeBgi8ux/AIfy4596AiEt8LfVkjByGU9CHK+zvsZ8XR1mFMtFjuf4eJ5EN9qebOsZmIrRILYdvkGf524+hZGausvAcdFNTsAGqzNN6NY40iVZroj7Ec8+/ThV1bC1uU2el3cT3oO+/UDpPENom5R0XZ87YGkhJMq1qUNDzjKVHB/2CMcNWZ3Q4VAULdJJUdrHNA3xYJvjgxOee/5Zzu1eIs0leVoRhBaM2nZwOp3T740oqpoodjlZfJa6O+OeK6+mzRuuP/1Zzu8+yGq+YOeCbdxwPJemFeguoyoXjAZjihyEsmu7PM/oRI0vXFyvR6h8hDyjbECbiqYtMX6CM/ZYlXN01TFPVpzfvQfCiFpW9Ps91Fr5uXTOrmw8J2Bjsk3gueTLms3BZYKeYTo/YzK6QGeuUNZTIt9nsTzDV5J+YP0tydkc08HGcMjZYkGUKVq3JnAh9BWegrpsWJ0tiAdDUp0TxX0uXLnKfHm2ri1rSIqcwWBCkiTkWUMYOXhuSFFoshV07QyBQ9MtODmt0LgWKaFakCc4DDg5yWyNomMbFyI2WWZnhEHLa9/w+Uznt+m1MWEkuXhhkyxf4DiwdSlnNOhTNTnnzp1D6xEnJ2dcvj9GOi5KBghCfMdlM9hgMh5Q5JqtzR2qZkGeNSRL2+Eb9w3KM7ziZa/iicefZjabsZhnNC3U7ZKijlgmEQBBEKA7g+/HGNNRtT5K9EhXKRcvb5JkJzz62E2efOoivb5LVSg6A/c3LRiD6zjoTiAbgaPUXQVeSjtcybsBkXVe2Jg/qxwK1ulp64HRd0Mqd4IHdpC7GwKR1h+pTYcwdi0u7jRuGIFSkjvR4K6zWI1a2IDLnWW6VUbtg9tVNDQNtrJQGLq2RuvyT4UdhFUotbaKqdZYK4i46wWVKkIbRVFWdBqk6BhPxrzxjW/hwx/+I/6X/+UH6NoapSQ//Z9+mn/+v/5vvPq1b6D8UUmarHAcyRd+4ZeiO83f+9vfS5otcR2XTnfs3b7Nt/zlv8ob3/h5/OZv/CpN29C1Gtex7Vsg0MZF65ZzF67yLX/5rwHwod/9bX78//UvuVueAJhf+FmapsZRLkXe0uv7JKkk8gyDaEQUxrRDh3jg8o7P/7/x0oc+n09++gbvf98HeM973sT141vo1qBkwFCWfPITH+I1r7qHe64+xLPPP0nkCkJ3yNmyonVPeP2bHkK55yi7DE+1mNAlPUvwJjlf++6vxhURRaPxvR51u2RxOkfGLd/w9V/J7o2GgjueWJB4UKz4C1//Lh5+8WWm8z1GvR5NrVFhx/P7x2wNXszLX/cG+lEfx4e6rFhkCUfHh9x+7Dpv/eJ3sDXZYLl8iq4oONg/4uT0iOu3Esq6s75bMUA5LX48QLkKz4kJfQ3Gp9ENW77HcjXlFS95BM9VfPxjv8sjD7+cN73x9Tx740m+92//De655wJ5mlHkNVI6PHvyNDvnRxyf7bO12efytZjpSYWjfFzPsHM+YHnaYMbmzuFGVVSE0QY7F3Y5m8+QwsdDovx14Ktr7O/fOAz6QxbNCQIPXw7YmHj0+33OpocMhiHxMdZaoiRKBZzb7fNMesTGZo+TkxOgB0g6nePQIU2BY3y6pkLQoVCEwXrNnTdsbEyoi5I3vu6N7O8/xzPPPMOrX/lKbt26xcHRKY7j0Ov1rF1DGDSarMgRKIy2HdIWTVSBzJEqsJWPKifLE/q9CW2rEQqcwADK4m6aDPDuXtxJKUnSnMKpGQ7HlGVJsmgsn9eLyFc5g3HEcnnMcLAFbYx0rUdRSknkK8pige/E6171COlonGGBF1rv93i8QVcb6iajXMwYDYZo49CaGse1739dGhzHZRAqyho67ZDlLf2Bh2klUmrqeopnJni+IC/mCEASUuUtXVdgm80q/MjFDxRoCHxJI2vicISSPoaKzZ0+/d550tUpvhsROrZowfMGCDEmNymG1rbQeD38yPpeq7rFVwF5WaK1xnV9HCRoF0/27XBYVpRVS93otQ3Ko+kqpCPXwSG9ZgPbpr+2KzDUGNPheQYpHVzpI1yBlIEND7UlSrmURYXjhkgVk6VnKMeeV6vSEHoKoyK0bujFPlpD5Dn0pIvULbE3xokFaZoilEdR5AT+Flpbzrjr/g9oykmSBCl8us5gRENTwXKaEAQ9HEeSFQmuZ09Sda0xXkS6KunHwRpLoKjbjkW+oG1blnlKVQniuE/Vdug2xVEdxunTNJrAC2maO9VBVoHsxX3bPBF4ZPmKKArQuiLwBK6IqKoKsQbkIsU6lWf5esYIhsMRxyc5v/SLv8+7/twrWKUteT4lLxqu3nOONK0oypRBz2eRlJSt4ZmbNxmOQhwRopWPFh6OJzl/ecJqNaPOV5gSTNlj9+IFi0GSEi9sWSaP4wUurrrI1tYup9MjVsUKaaxvZpHkzBc5999/P9PpIU3XEfVGTE/3UDKg6wpGo20a3VGWJeONLZo2Z5HOEXhAS13POTh+nuGgz/HsiCT3mYxGNEayKmzBu+cWjHsTsjLDOA1l57A4EQyGWyT5EozL5mSH2WxKspqjowHSbBJHG3RdQtzzqAvNZi9mY6g5S5Zc3LjKcXZKXXVUZYa32ccJfW7dOqE37PP8wS0WyxnDkUcYhrS1YD6fW3M0LUm+xHMDEC11q5kdVSi3Rkkf5QuqLGW8scPzt57gnmsPWqK/bmnyiP7gHJqUNM0I/fPMZ/usZp9lMhni+Sl5pjh3foeyipiMzpGXc/YPn2JjY5Msa2gbzebGFmk25/RkSdtknJwcsrt5BSUj9m7NGcQTy1HsWhazkiTJ6MWaoBfgeX3+8A8+zUte/AoQzzEYlayWKWU5siqbUbYyrzxhMOwxGvrM51OO9veRErSWfOzjT1q2WyAYb6b4RY+z0wTNgHOdT9vZjmfXDXA8D+k768AKeJ7L137d1/COt7+T8XjM/v4+P/dzP8uHPvShta9Y3119X716jfe85z088sgjRFHEyckJv/d7v8cv/MLP3+XvKUfxgz/4Q2xvb/N93/d9fNM3fiOveOUrcV2Xxx57jH/zb/53Dg4OoLFp224dtviSL/0S3vKWt3DhwgXatuPw8IDf+Z3f4Tfe9z4MhmpdLVbVJV/25V/J2972NobDIXt7e/zkT/wEf/LoJ623S1oP4mg4IPADTk5OKcvirjorlR1uXde3KqcxRIHFlSwXc+IwAOGtWaOS5WKx9n22NgQkJWEYUTcNRWW9Ua22W47p2Zn9fluHC5t2zR8VAkc5CClwlUvXunzZV3z13ZD3z/yXn6JpLafNVjVWKCHQngMaHrnyMJcvX+HR8KNsnh+hhcfu7n1sj/4C/cEl8pWiKjt+7Ef/FWeL23z33/k+Dk6XOEGL6wWcncy5//4xb3rze2yYAsBxUQ5MT29hupI4OmfX8k1L00r8fsR0/yaB6rj+7D4vf+T1ZHVK10oib8BimbGc3WYa9TjHLi8EjKydor8dcP7qeVynh+9sUNdndJ2gK2M2Noe8/nVv42QaoduAIOqDkfSFpB8v+Ppv+gI+8fht1Ob9XH7pO3n9K+/j8m4Edc3BjacpVprp8ozjoxnP7R9yOstZnB1RlQW1VhRlizQ1p4VD1Wne+I6389sf+jBZGTPZPM8yn/Nd3/33uHLPOZQboluD51v24oUL9zEYDSnLjPn8JpPJhMV0CVKT5SsuX92l3HvmhXU34Lsxgo6mmRNHGonDdJoghSGrxRoRJdF0LBYrjBR4oYcrNLdvzPHDKZPJhLpsWawOLSO1thDoUf8+HjhvQ2O6uUjXGja3JixXZ4y2LuE4DqtlSl1ZDBIAwiWIB7i9hqqZIYRHlmX0hh5X77nMeLTBcnXGzrlrlHXNbDajKg2SABdLW/ECSeTHdE2Nkp7dVrR9PK9hEG8S9hrKMkeuuZeuq3Bdj83NbaQ07B08y3LRMhwO8X2fuint+cuzw7J07bGtlI8ULqtliTAhrtsnzTOkUHTGJs+lUijHoVsjlhxPkZcJURhiWmhygzEtruPiey7j4cRip6hwZIfuXIoqR6oaqawQ07QpSvXx3A5HGtp6xWrZcO7cOYqqwXFd+7kIBygErmM3hI7TQypBlmUYDY1u7oYeQbNYnSIdYQUw03JyckLgeiyKmbUkeT3A+uGDwCKrokhQVyWykRaiISvCMEZ5tjGtqV/oZXccu9FVvotyKvve1g1BENHWFqjvSgdHOQSBoM6rdVe8h3LWXM61QIZ0CUKXJMnWIa71Bqrr8IOQJJnjeQ6OYykYUhQ4XofvO2gtyVMNomHYG9OKlLYGSGkaz9oWPI84tK1WVW0d4p7T/1zHxM99oBzF56m6nLqu0W2I1lDXKbpNqOsO6UiShR0ATQer1Yp+32G1OKHIG+aLFaPRCNf18Pw+s9M5fuyR6GOELHHNDm1VkNEx6W2yWrZsDHvM50t6YUzndvYqqrQIAM/xWMxWgLEQ1qJgMBghZUNdZyihEEYQeHa6FkJSFQ39qMfRfsn7fvWP2dw6T0fO0fEet27P8f2AusnQ3TFRELC9PSHPE/ZWcxQdw1GHdEGbgqhnyLKOIo0xvuV0nR53zOc5ly9f5r5738Dx6ZNEPrR1RFEIfHfCfHmbMPJRKiZZFeRlw5PPPM8oGpHXSxbFlLa1SbCyWpF1HePNAbrsmK32LLS5c3CURipNMPBZJftEapuaFEfDyfyEOA65eXaCkh5e20N4C3TrE0URtcg4nS5YNkMcXwI1epXihj1aN2Bep0xLmwbs92Nmq5qmM2T5DVZmC6EET50+TlI7IFzOijNIK3a2rxL0FGk2o2oW9AeSZKnpahfcgl4gOZme4qiYIFRM53MQDUq6hL0xbVsTBeeYJ3v0+hOKXBPGfequpc48XNljuDlhMumxf7RHf6zxtIcQPWLvAqfTZ8nzmlHfx3Qew94lbh9+HMfpcWH3IaTbkmcNy1lNmZfMV0dcve8Ky3lD4L6UC5cmfPyPP8VwFKCiE3rxmNOTlo6Ky/eeoywqXNdlvlowWx1y+9Ca6ueLM7a3t1klM8LQJ088Ol1w6doWRV5z++iz9IMJ9953kZ3ti+zvPYfWmroUjDcijEiYuD0mwz6rJCc8K9GdbfqRUmDalqos78LWv+EbvoEgCHj/+38Do+Ht73gH3/M934ujHD74wd+1vkgEr33t6/i7f/fvcHBwyC/+4i+SJgkPPfwwX/M1X8OVK5f5oR/6IVzXvetVDIKAf/SP/hHPPP00P/uz/5XtrW2+8Iu+iH/w9/8B3/Pd323tJMqCsP/O3/mfefjhF/OZT3+aX/j5n6duKi5evMyb3/Rmfv+/f8ietH2rjH7Xd303Xdvyq7/yK/i+zxd90Rfx/X/v7/Fd7/3rzGbTdYBJ8k3f9E286c2fzz/7Jz/Ek089seZW3lnj29Qr6xBJWaYcHx/x9ne8i8PDIx5//LN4rsfLXv5yXv+GN/K+9/0qvX4PKYZoDY8/8TgPPPgivv07vov3/bpdeV+8eImv+7qvZ29vj4/88UdQro9wHAtVdzxYn3SatqPTmpe+7BXs7+8BcN/9D/EXvuEbOX/+Ak3T8OlPP8pP/B//lqOTQwQwPTzmZG/OS17/CrTqoxny0D3vYu/wFvO9BSCJ3Y5nrz/Gl37N1yKjmLp5HiVHCMewmM7YPe8z2domLzN7gU2DrxzSxW18z2E8eQAtNEpqhInQGE6P91ge3GJ34yHc3gBnautOPafPydmTZIs96o2XWzTMepi0r1PyW7/3Pl7+0nt4+1u/iNNpbRtAXIei6jg4eYzJ5AJRb5cib/HwCfyI+fPP8IpXvJT3/o0/x9//wf/Ap59+lsPDiA/93rNcO3+e173qKteuXeJ0NufhN76S19FHOg1ZUbJarYh7ObqF+bTmuRs3yNKUo+Nj0sNTnn3iOufPR0w2Y/7Lz/wSJ9M9XvbIPeRZg3AFvusgDOR5yWKxYjIa0jUTfM9hOM4p8op0lQAuV7fGwPSuL9kLOsp6StvFbEzOkaSnnDtvOYwyqyxvsRdyeDClLLTlNHohWpRsTkZ0Jmc2O7PbsM6swf226vDGszc43bYA6tFoxNZOj+nZip63ixQNnlLEkcSkhij0rZcwawk8UF5EV7t0neH604+TJNboerA/pd/v0zaKpq1wpY8TCtKsIQpDfF/j+TA9mzMcDikzmEx2qLo5ZdkQ9wxxOMGVvu2yjhRh5KF1BbogCAa8/MWv4myxz2Aw4Nkbz9MLPKpKU9cNxtiiEIWPUpplcsJka4ckSRiN+9RNTpqmKHe4JjDYFLQNrkHT1fb5V5UN5zUVmI6WFKkcjKoxwgHjUBXSEjA8GxQMBxF5niNlaDFQpqRap6wvDrZo10QHYTraukMLRRjH6G7NzxUh8/l0HTIyBL5D1bTQCnwvQqmGycYGUirm8yVh6FOVFg8VxjFKxCTpjCCMqOociUPbaDxfok3NbDpDOYbA7+GHAWWZ0bYtQTii0R1FXdpaTdch8AJ0p3GVQ13aznbPVwjdIWVHt4bh20HYhiM91/oenXVzj+e5hH6EENKya2kIgh510zHoX7bg+a7D6BZnU1GWJXVlUCrA9wVltbDDqTEgFY7vIYWPWrcsNbqhyJd0rcVxOdGfZbj+XzJQzpYLhGwpqyVtV6JEZKuEVEgrczb6fbrIQpxdZRCiYHPcY3HmkFYtw41zOC6UdUE2WxHg0xdD0qVD0/k0ssB1BcY0dG3GsD8mTRM8x6PIM6TSVFVC1xr8NrB8PCFptQW8AjYdpSwgumnWeBZtjbyBF9LqBkNHtio4d3FEnhbsnIsY9q9SVh512SBbB0eEBLLP7WfnVKXBd12EaEmXKxxlIbNNl9LvW/CrGUqqDurmFD/0eOz6Y/T2BrgKfMegupyTkyVZnmBUTqdr2sIhDnuEPR/RKDrjkuUptV6gG0lerNjduo+qKbi9d8A4jmnqFf3emMoYVsWZRa5kho2NCcuFNTw3aEI3JisFFy8/xOHxEW4ccZTkZNkpwhE4XkNJwvz2M9x37yN03ZIiXxH4PfobF5gupkg35yw5pFQbHJ2eEUYugRqS4qG0Q1ML4mHE3sEBXjjEGFgWS5AORkHgbxH3RqzKGxCCIzaYzhKE8gnjPk1b4sc9lqsT4p6kUxlFVVPnR6hAc+P2c7z44dcQ1JKTs2M8z+A5PtJPWBZzPF+RpitceUwc2UTdaDJmc3sNuOaMpDQMhhPicBPJgM4knJw8y9WrL2VnZ4dZuk2nc7o25cWPPEiarviyr3o3Wb5ACM3tW0f0xhFhb8RTz3wa3w+5ePEiu4Mefm+IcDOmRzPSPCGuIRqEzJPrXLn6ctLlhLQ4ZLI5QHhjsqnDYEOwf7TPi19+P4fHz+MS4fg12WrA9sUtpmcLpklHFLkoBa6yiWTTVSjPR3f2OB8MBvzVb/9WiqKkrhv+2y/+LP/23/4Ef+lbv5U/+vDvUxQFgR/y1//6X+f69af529/3t+jWLMDf+sD7ef7Gc3zLX/pWXvXKV/LpT3/WKilaMxwO+eVf/iV++Zd+ibazRQGnZ6d88zd/C9fuvZdHH/0UAF/1VX+ehx9+MT/zM/+Vn/zJn0SIOx7mF0DQtnnDjiyLxYJ/+A//4XpwlXz8E5/gR37kR/j8t7yNn/7pn7KfZUfRtJYHmBYVy6RYl+l0a6QRdwHfd5brP/SDP8h3fOdf55u++Vvufk81TcO/+/Ef5wMf+IBFE0nbx/sLP//zxHHMW97yNt74xjff/flPfvKT/It/8aOUpcbgIoSLkYa6swELy7oE1/cYjyfrthd473f9LX7t136Fxx97jEuXr/Ce93wNP/BD/5Rv/qZvIE0ydi6/js9785ewdX6Xd37Bu3n4xQ9x37XXkrcron5EHEd88vc/zCtfs8lr3/QqTmZzIi9CG0PVNEyn+8xOZ/jRBo2p8F3Fsq3RncsTn/0YR8fP8LrX7NLpEqVAdDVSKZ5/+lFUXrK9fY20Sa0NAgFGkRcLYqdk0L+GbF6wLwDIvZtcOJtydfYK4mdukpxMmYwGgKE+OWT6O7/Jc4+/hC/64iucnh7TdC1NBSqpuPmp9/HDf/JBnG6DNzCiERl5mKIPlvzKr/x7glBwz/2vYO+3Ok6Wc7ZHW2xtj5jNC87vXmR+OsULNJONy3zhy9/Mp/VTfOgPfp8veeR+2maH9PHrfNG5Efd9/ks5OlhSqBVNV9kwiRaMdwfotqKtK6Q3wg9bvGtjirwljnqczW9yJfZRixewQdu7Q156aQtXDLh2z0VGI4+yaHBUwGAwwFBxeLTH08/cJAxDpmdzms7BVCOuXNtlY3yevFhSVAtGN5b0nr1JVdvB5uWveBgpOqIoJIgEUrhkaU3bdmxsbNDUsLU1oG2gKDKKcsXWxCZ+kYJGC/Jszkte/AhaG87OTmm7kn5vQpq2eF5EXecEoc/WlgLdUdUCQwvNgAsXJ+wdPo7rOVRpzqVzu0ynUzyl6U9iZlOLqqorW+Qwm07JkpxbNzM2NjaYFxk9b8RyuaTrNM66f9yVCmRJ02rCMKQuK/pxj6op6feGhGEIWtO2DWGoMNgtSZ7n1E2GcmKM0KAaHGlIkox+v0+WWxKG4yqaIkXKCG1qxuOB5XKi8b2YqrQ+7yyrCL0+bs82z/V6IWUBTavYnewihG2tSxJrvYNq3fwkrKfQaLxWUtd2rT/o+eRZgutEOI5AqRYlDI7TI44iiiIjiAxdUxEGyvJ6jVqTMFouX92hbQzaNNR1iZI1440RXVcxiB1OZ0uiKKCuS9AObV2uN1kdppU4jqDpbAhTqQClBEp69Hp9ksR6ZQPPpyxLWxutBI3WONKlrRvcwNaYhspFSYXnKZpGs0oSIhUgZIeSkii04SxtXHRb0LUOrge6dZEutM26dUwrpAgwskM5gjxffa5j4uc+UDZFQ2/Uo9eL6PUti6lMG3TnEHgOussJAkng9PEcF9/3yVcdug65/9oF0mSGBlo9Ik0qqtqy3WLXJxz2COIecRxSZtZHEIU9To47us5Q15K6LYlC709N38K2gTjrdLeJaVtty999H7AVgWHoW8YYmk5LlHIoK5/kVCKMJD0u6fUitKqw+TMHJHiBy3Z/hBw61E22XpsbAn+A6CJaNWAyGNOaDE1Kdgpe5FGnEt1JThZnCG0YDiSuM6U8cen3e0TDgEk8QQ1iRoMxl6/skuUlZwc5lzfu4dKVMaenUxaLI3RZI9qGPJ0SiJC26eG69vVvb9xLmq0N7s0I16sp0xXD8TbT4zlPXn+W02nBM89eZ7I5pm0gGpRs727QdSMO9jtGo/M8+ukboA2uKhiNhtS1BW1HsaIfD0myFaOBT6sTilKCtHWW6JhaFAhnxfysZrRxgaZVCFEgHR+k4PbxDUbjDfJ6wc7oAuPNCxwc7NF0Ia2tnSDsbRD3hxwczGjMHEcuiHsRWbHixq2nUEoxn9acvzgmK1qOz07p90PyrCUMdjma7uPMOnqDOb7Xo5yHxHGDFilNWeA7W6RdCU7C7vZVHnhwwGx+wvOffIKLF64ynZaMhtv84e//CTvnPc6mC1vHKAqODwqSVc0qmdEbhpyczSjqnPEkphdP8JyAyWbLaOLRNjF1rbl27UFG8Ta90KXRthruRQ9e49r5V3Pz4CNovWSV5bStZJUVDIcSjcvN2Qk4FeNLfbLHE/wgsF80dUMYRVTS4Hr24/r+3/x1e8Ek7tTLVfzGb/wqf/EvfgsPPfQQH/nIh3n1q1/NeDzmP/yHf4frSQLlWai5lPzhH/0B3/KXvpWXvuxlPPb4Y9CJNZS945d+8b+t0T4SRykee+yzAFy6dIFPf+pRpJS89a1vJUkSfu7nfgZH2YFNONZi0nV6rSqau2DmX/7lX8YYbT2xxvD444+R5znbOzsUZQUIhDH8k3/8j/mn//gfr5EfL6xjhRB38kfAnXYSSNOM/f19rl+/zqc+9WmCwOMd73gHf+lbv5UsS/md3/mgvQsBnuNyfHzCpz/1KT760Y+QpBkPPPAAX/zFX8x3fud38kM/+IM0TXM393THDmDWbSnDwQiwwzzAz/zMz/Cf//N/otOg/+CPOT4+4bu/+7v5pm/+f/BLv/hLPPKGd3GQz/iFf/9rPPn0Y3z3974XZw0O71qJwCNJMl76shdx+co5qqoiFBFplTDoDZhOb3PPtdfSG56jritC6aCxVXd1m/CKl72F0fgcebtEdAovdNk/OES0Ka982euZjHepmtKGBGgp64oqW+IxYHPzKs3e9bvf7QLB5A9+h29c1gz3fxf5m/+dIWtVWCr6ecpfyyp6s+cInvvf2GnbNXNTUFcVr8qWuH6A64XQaYyAddMpXVOh6w71J59Yp1RLmvZJWycqFYYWg7rzrnMoJFvAe5Sge/Sj66ClPQ6cT123GCdplSuznoedtcLettbL6rkuQrD2BtuygTJoeX6buyvvrG5ZZjXjEVQ652SWkCWaz37mI1y+dC87uxtEQcg9V+9lZ2eDs+kxt28dsL11BakqTo5v8cpXvtJySqtnCMMjfN9FoxlOety7PSaMNc/ffJo42ObKvefJ8gWDaMDR4ZS2NSwWGWEY8uCDD9K2mqKc07b2fbj/vitsb2+zTE4Yb5ynLGug5fK1LZIkIY4vcnyyj0DT7w9pmzHz2Yrz5wN2z00YDB+iF28g9JCw1zGfjxiMfDxPMZtZ9E9e2EavwcCir6oqYnZ0Rrdm1A6iiPlqSVvWRFFA1hQMJiHLRWb5q6pDULO9sclqWYCOcf0SYRRbWzss5itc1yP2ezS9mjRJ0Z3LcBSzWpxgtKCtBFEQI2SLcjoUPcoyJ4430C34gSVb+GpA6ELdzpBCEvcUHR1hKBGisev1rkOqhlandOT4nstkMqAoV/ixi9EOnudYLmbT4Di2/hFKBv0Q3SmK2sF1Q4LQgc6uqre3d9CdvFvNu1ie0bYK3w/I8gWhH4DnkmUJwoVBPLQBxjUVY2u0CYCMJFIa2lYhhCXlSCkp8ophr49SirJKieOQNM3JshVB4BEGHm3b4geKKBwQhJIkSehaiOMIRE2yWuG5MY6jqcoVrifxXCyz0gmQriQIFUV2BkhczwY2o0GPNCnww5CiyGhry+32XJ+mrqi7HN8df65j4uc+UOZJy3I2ZTTeYu/pBdKp2D03Zjmf4zljlLPJYjZnfnaMNA4bkz6+6tE1BuWcEoUh8/mcqqosJqNvweJhuI3jSOrSpa5ymrpjtZoyP1tRVjkCh6ouCb2QoqnIypTxaMPypYKQeXJG07YUmf2FK+lidE1RFDiOg+4sMqDXi5COWJvmJcvFDMyKwAtIlid2DeIFKKfD9/oks2Pi0EdJnyiO6PUdapPQHyiE15BmNfv7N+n3h3Ro2tKCwD03xg+g14+QZoLQkqZYIYwiSTuSrKYdBQxCxf5yzvQsxQDJasXFyzucHEEQbhMHMcqFg6M52bIiWdouzjwDoRTPXH+G7e1NJsMBTz2xx3DDJUtynn8mY7VakOcFN59/lCiKODs5A2MIQ5/j20t8z7BcdBy7h5TlAldEeD04nU+RoiaKPcqiZWd3izD2iKIBTdXhhisWVUAv7LHIjpiobcrGAydF0FFkOb43BFFxOtvHCJftrQHPXs84iT5L7G0x6E/46MceA5WSJClN1xH4Y9K0Jerl9HoDunaPttHk6YKmTWgqn9NpRZqcYYzGdz1czxAFhvlqynK5Ry8eoynYGj6ACk5w3IKdyRVm8wolcvqTPp/57JM0TUeSzMnznKOjFXUFm5spSZqzup6wWt6k6kpc1WOxSPAD6FrJ7KwmDDdYrVI8F+oyY7U8Ydjv4bgB09OE3lATzDdJpwtQc6q6YHGmmM2P+MP//hmUW3Hl8gM8d+OUqsiYTZP1lX3OwfHzTEYXGY0C/Mau8cSaDbmqU2qlbGsM8PQzz1A11p8o1on0559/HsA2o2jBxQuXAXjve7+H9773e/5PP9ODQZ+yzHCUrYKbzaZkeQIIHMel7jTz+QKAOO7RGRv2OnfuHDdu3LDwZ1h3eK9RRJ1er2peAKUfHh7eRa/YhLkhSRKGwyGuo9YsxDvFBC8UFIBVIo0xYO4oapY2MZlM+H/+6I/y/ve/n5/8Dz8Bwt7/Bz/4u/zzf/7P+Cvf9u187OOfIMsyBILv/Bt/g4ceeoi/8lf+MlXVoJTkj/7ojzg6OuTbvu3bedvb38773/9++5jGci/XMWcQgrrN/8x798EP/jZd22CMIAxCPv7Rj9F1HY888gj/6b/8F27sH3H58iaPPfYZNiYh1+65j6NFZpUYpUiXKVmWsLXxRoTpI43GiIog9imznA9/+A9597v+HIE/IEunZHVGHMdcf+YpfN/hyrWXIJQgX6wYxBPaznB2eEZZZUyXQy4KByVqtHZQvkNTlRw89xl2+ltsbp2jPXiGF3pj4NmNI8zE4DhL/jS8XQDtsEVrjeNkdwf9u3V4WmPMuglnXVt55z7tf3ZrAPg6DIa4e6zcxQz8mdsLz+n/7GY+l58wd1TyP/vzYl2YYwwcThOeyVNcZ8rzz29w7eo5wkDg97e4uXfKMisZjwb0gj639x7j0qVz3HPtYZ67+RTbWxfxoiWPPfknBM6I3bKyIRdpL0Rm04T4ni1OjmdcPH8/SXaE5/YZDu7DdSwZIgwCyqIlimIuXNjl5u3nmS8qeuGEroMoipjPz+j1RyAaTk6eYXNzG01BfxBycnLC+fPnWSynIASbW9u0jY9UgosXXkRTx7h+zWpxRFEJ7rl/m662/2482qasC5K0YXN7m/5wA4GD6/UQD2Ts7e1z8eJl9vcO6Y1ChsMhi8UCL3DwfZ9HHnglxyeHhBFkxYyuTRj0PU6OZ3huzIXdc5yenrKcLdnY2GDUi2i1j+8qlIE49ojcIVI6xNGAIAhI8hVNW9jwiwtZmuO6IZ6SFr6Ox2SyBZxnvjihMxmL+YqNzfOkSUPRtGyMJUXdkK9KJIIgiC3iR9gAb1NLqqpgMBgw6A1omgZJh+sNKbLSkkvaCkVEJ2xSPwxDZmcFUTgkyeYI2eCqHnVbUOoUoy3ayJgCRchkOKZpbHWmXpesrMoVSjqEfohRLcK3VIWqqmg7QxjcYYMa+v0+VVXhuRrpO/T79gLWdTocV+I6AWWV0esNEMZ6I/2gT1kFLOZLdncuURQpZZUiQpfFPEWYjqooEKbDcwVJuiCOdhnuuJydruhMQd3USKemrkq2ts6RJhnjSYyQVtD5XG+f80C5sy3JM0Fbz3FkxewsYXa4ZDzcxe25lE7J1sYuYTBFdzlNmYHrsrnRp+sStAkIwpCtrQ08L6AqNfNZRtdYEGyWzjk+2aOqWwYjnzpXTKenDHtjgsCjKlcMByGOGyOFQ9vZL7NebwC09AOP2WzGZDKhaRo2t0YYYzg7OyMMfYb9gKxKaFtBEHjEvS1Ak6UtwgiMlkjh0rWapMxpmoZu2MeYgtnSGpU3dwYc7C+4dXSTrd0J2rScTo8QUuM6DgqF7/XpRx5eYMhSgXag6hob+9eSthU8f2tB6KZrnIxGeRbTcvPjT+IoHz+wyWktShw3oO40olP4aUXXQlloOpNyeppQZs/alcxSUTcJVZlYQ27nEgYxWV4jZM2w71PkmtNnpwh5tK6isvDauimZ1wtWiw6QuFJQdy1H04SsXOG5Pbb62yjV0N+KmO4/z6osGbj7tMah1+/oeBzdOST5dUznsVisiHohH/7ICU4gKeszAmffJsfyhk6srIolDVV1RhA4TJMG0c2YjGNcpShMBQ6slkvq01u4niBfOLiiz/a5iNnZAUIJvMBwNrtNsqrJNmvK1JCmGeNhRZkIvGjFeLTLdHZkURrSJ477hFFF21XsH84YDAZoU7B1IWaysUmWFYyXVm1uG9tc4Hke5dLjuWcP8QNoasiWLYaWpikweod8lZCleyjH4Pseq1WG0Fvs7PapKpdPPfpZ4mhMq5cMRzFdI5jOZ2z2tzg9vU26XOLMbU2abXcw1E1D5bRUVbU+UYq7lg7bHKHuNl/cAVXfQf38+I//G55++vrdLyxgncqGk9MT6rpCK1uXZzuy7dBWVXYtU61r24zR1GVxtx1E6440XdlAjXgBQWQfR9r70vYPHeXguO5dXJG2PB47nNyprTN6Haq5U4P3wjAgxRqyvmbLGgxf+AVfyHA45A//8A/tEIpeN2HA7//+H/Ct3/oQV69e5dFHP8nW1jZve9vb+NVf+ZX1RafAdIbONHzodz/Et33bt/PIIy/m/b/5/vVjgFkrlGCbd7K0pCzLdV80nB6fEvgh4/GE+WLB/uEBSZIQRTGO49KPe9SppqlrvuZrvwEv9smPlwwHBkFHMivYPzzi2r0vtb7muqYVHb4fc7h3gzAYce3eh6jbgiRJME3LhcGIGzee4ROPPsWb3vy1FOUStKCsC4RWmKaixcGfXEQ4DvliwSAY4AU+Nw/2ufd8zLlLl6lNQzbZsL+TO6rsunbwjm/07uB45+/kn1aNX7jd6RxHrHvm705vArS+w8Zf/71tecJYvp423fp4+dP96C+MgHcpWetj146ja2j1HYyBMX/m2L4b437h//4sUQBD4whW20PGQURerDhZHKD2OjxfUJQtw/6Y2XJFmhfAlDAMmT/5PFL4tG3LdHbApUsjbu/f4tK5gEVihRKEsLWoGs6ODzk+TDk+zBhNPGqnI1kd0nUNw35IVU2JgpCnrt/gk598DCeQzOdzBvEC3YacLQ7Z2OwzGu5QVkuU63H9mWOi2Gd6NicMI5LkFKmg11PMl0ccHBzjuEOkOuZscZ1eb8DhwR6j4YS8MijH0NYuftxj/+AU1x3TmRGL5JA0PyUMY8pVi3R7PPHMPmEY4oUOaVkx3pogPUWZ5yA0k/Emvb5PkAaMxkMGgx5JOiNPLNLo5S9/gMVyymg0wnMkQlloeuTY4OJT15/g4sXzpOkKI1u2djfo6Bj0tzibTgmuDEhWGU0tKfIl8SimqI6pi5bRMKJpW6Jgg2WyoCwbHCdAyIa9529z+cp52ralKjTZckHT1MT9Hm1ZcunCmNl0ySxdcPHiLrPpgrZxcFTEffdd5nR2m8ODM5z+AGFq0lXO1tY2ULG1sWZtFxWjcR/lGFbLlOEopm1btsYXOD09JggVxjh3t6mu18cYez7QBLRtS9NU2LrQlI2tTfJ8PQ80HVEcEPdCdGeRXr1eRFmltsc+Cum0YDzpMx7GaNOwu72F50uWywVdp4miHeraNhMul0viuI/u7HzTi11u3rxBfzi26/orFyirFQKXskqZz+f4geLk9Iy4b2wDmhv9f3zu/7/dPueB8hWvukSSLulaga4iilShhEcYuayWM4x06JqO8W4foXzoRhRJhzEFjoio24Q49jm3O2AyGnN2tsC0CSent/HDPmWdsbEVsbm1RdlM8cSEjYWLrgVxHOG4HltbW8znc9Kk4vBwThj06ccho3EfYTqGE0VVFThty3AYUVUNk8muDRJ14PohnY5I8gTdCKRTMRhNaLuSrtEY04KpcT2PQRhjqPC8AKlAkzPPGrRRTDZ38QJJEDq2EkkojGPwXENe1OTzBCE1RvfQStOKmlAvQRp6/Q28wKVul7SqJa0bylXOMHJQbo+qOaNOXesV9UPOFscI6dM1LTIXtgLLC6lyxXQxx3dhmS1oOxfHFfiOT1lVtG1F3dY4jqRpKrwwAK8hHCrKsiOrE8rOB+0iZEdXV4xHI9pakWUZnms4PTpDyIhlm3BymiCbDlxBz1HUOEzrJSoScOpCa7E2rdE0bWmtBq20H7p5ggo9zhbHuK5gEG8h5SZlJdAypz9wKYoKx4Djt9SVopEtqNtUiUSYCNPF6MqejIxomc1O2Nj2MXpAEDtEkWbYH9F1hkoIxpMejjQEg4q6lixWK3x/066alkuMzMFxUI5GOFYxm85KlGx55voxg/6YJF0R9yRN5VuwsG8wJiDw+zRNjlx33yp/xni8y96tPQLPJ+47oPvINiZwDBiPdOnghwpftdRFSdiDpilZLXOqoiT15hhCkmXCZtVZL5+504MsUNIGywAuXbqC4A/t0Kg12nRcvmIVyb29W0gJB+vwSFEU/MmffPLOuXc9vEHX2qrAO6uXO5rPHc+iNYa/oBQCSGW9jPv7e1y+fJkwdGnW3eRy3XqhtaFpbZCmM3ZwLKqMPLcKn5TyLqDdGBtmuFPpaGeE9WCyHjKElCip7PMTrCHRsLG5eff+NAa5BlAjxDr9aF+D6/psb++sn7+0A6mwqC4QOJ79WYvxYA1kX/dsCYkxHW1r6wKfffZZHnnkEQBe9PDDHB0d0eiWs+kpvu8xGAw4Oj4GY7FnZydLPN/nyj2P0DQVkSOoih79oeLg6HEWizN2rw0sHqQ21MKg3JbnbzzNpQsPMNkZs1qeobVmNBqTZxlFUfKWz/9itrYvsbd/k+3xFqtkynhjh5s3HudkuuJN116EMYZhPEJ3NZ2WHB/uc3Wnz+a5y8zKjP6VyywffCnMbtNUNegczwlojH2vLZrK1tpWZYbrOrhevB7zbJpea81qMSXwXZQX0LbrgA/Wd2q6hiCAvNQY4SPo7HssHIxpqKuMOBrieoqqTtGd/Xdi3aHdrfErAqgae4EjhKTTHRJ5Z7x8QYkUtrdbKnuMG20LLoRYw8XXPz91XW6mJdv9HcoqRzklSZrSzjpqkXDj1k2GwyGRF9LUgo4lUc/aq0xTsVxNOTrZRQnD4cEneSB1bQ3oGtZ+cnzGvh4hVEmaHpFlW5RliR9A2wiqOiNdpgyGIUq55DWs5gVNLTmrZwTBgKpuSNKa2wePWTybryjLiqOjmvFog9WyBGErh6fTgvlqCqKj1QWzxRnjSZ/j2bPE3nnS3CPLSoxpaGrJY0/eRGtNlt/G9z36I5/5fI5GoYQtN5BS0tQdURShHMHzBwW+7xFIh4OjE6bTKf1+jOcFTKcL+nGN62kWy4Re5LFYFvR7I6q8xHguFy5sc1gcEo998qzikZe9nCBwEKeGqO+TZAWuiOlaxaVLW6xWS3TYwx+7dF1kA71KYrRjn1dvhxs3bnDl2mVOT8+Iw02EkxB6Lvfccw9HR0fs7R0RBjAaXaDrOgYDl6tXNlGm5dLVK7RdwvbuNllaE0Yjuq7gUm9A1MsJg5iNyVXLnw58hKjoGof9g4YHX3QB1+9RFBnL5JDzO7tkac2lSxfZPHMxxjA9m+N5HlEUYYwhTfN1lWVJmpb4WK/icLRLXTXrnxMEYcNydUavF3P+4jlu3dxDeIbQ0ziuIgob+qOIyaiP77u4skeWlRS5QxyOWKWHNKWi0xLHVfiBQxC4bG6cw+iA09OnuPe+y9RNRl0pen0XJa4wnU7Z3tzk3Pa5tUJqsXfz+ZxK/A+oXsxnHhvj+/B9xcH+KdFkgBCatsvxo5isKAiHLXmS4qqB/VoRNUJJTNcR+R5d6zFdzK3Zt5ZsbW2isfiAoBcihKJqUlwnZjQO8OMJujUM+kMEVqre2tmkyCv6wz20gN3zF2jqlmtX7uO5566zWE7xPA8pHZYr2y6Qpimh61JWgroyxH0PbSpQEa0WROE22jTk+QrHiej3+3RaUncaYxo09qTeamnN4NpjNstQykOqDiUVum0pWx/ld5SFpikbDCdIEdBRkxctfiA5O50RhjFS2XWZkgGhcsmLjsArUNJHd9Y3obyWgTuhKjROYJW1okwwjiEIAtqsxAhBWdW4wsUoTWMacCAMbbq1biq8KGI2te+FJATtEkUDqsK2jvhhixM6zFYJQgjiYUjTOIRuAGjczkFJD4UA0eC6HaMgomtcVllK2PMQwtA0Ha5URMJBKUVd1+RpSxQPaWoY9/p0umSRWKxL5EesZhmpVLierRob9PrUurKqX2cZh2HsI7Sm7hr6GwFN2eFHPdKioStzxpNNhv0BqVuhW5dJ30NrWM7mdB24UtJ2Dmm2RClFGPnkWUNdQ5rmjEYjkmRJkmRIaasYZ7N9JIquGXJ8fIjuJI4jGU0CosimGMPAw2jNIL6E5zrcc6+FwFdlR2dWzPMFulPMbu4jcEGUtF1DL+pRNgGhb+h0ihu45CsPKTtiz2ckPaQs1nVtjkXliBeSdl/yJV/Kr/7qL1MUBRpBr9/nS7/ky0mShM8+9hmMgY99/CPM5zPe856v5fd+73dJ0uWamWilPz9w8TyfPC/+jHqjlEWE1OuAwZ2BUsg7aqLi937vg3zLt/xlvu7r/iL/8T/+xDp5yd3B7I5IdNcFaWwSESwyyXEd7qyvO1NZTrt0GY/H9Ps9zs7OKIoSga15vBNGsrBlqzbdvn0LgHe+8x08/fR1KywKWzX5lre8hbZtufHcc2AMB/t7dF3HG97wBn7qp36KJE25Azx/5zvfCcAzzzxz1/sXhT47O+fI0ozFwqY7wzDiox/92N2B8i1veSv/8af+IxhDXTd8zde8Bykljz76KAaBcXzmixtI0bG5OUK3MVI2aBJ0HXJyPCOIQs5vXMQ0DngFpnBwdcPBySn9cUwvGrM6PMR1rYcqS1KKZMm5++8DOwdTtCW9qE+xnHF6tuTyA68mki7J2Slu4OO4Pm3esTHpc/GBb+BwnjDwN3EliK/4m+j5jOxkRn80pcxi0AMbHOwEgRNwePuAk/lHuHjxFWxdeMgOdk4IzpLZfkO+/1E2z+9wsorJXYc2WSKU4uR0waUNydbFhk99pGIpPFwHpHEQQUtU17z2FYrbxz7bu6/FaQ/I2WMnUBzs3caoEZictCq5cZSyORkhS6iaDOW3DDZi5rME3Sg812EwCZjOTwl9nywF3ULbGMCqU1UpCCPDolxxe2mIZYDsClzjIp2R7VfedQh6O6SrBiM7jo6O6A02WayOmSUe/XgbQcX27nmSWUoYSBxHM7u9sMe/tInoxTLlZr7k3PlNNjd38DyPs7MVQio8P6CrO4QwlLm0GwanIS/m+IFLYzSdk+P1BLUp2Nod4YcBq2XKZGeDJJ1TdAlOzzbgFGWG5wa4gY/WOb3YxZF90iyl0i1lc0SeGdq2RZuMjcGE0/kBjhPgeQ5FV1CfeRgsjk8bu20Z93dZNAuW2cyyZdFIsUVj5mTljFYrGi1ZzqYEfo/jaYYxLkHoMVsa0ArdlpbFebxge3+JETnXn7lB0+agYza2rVp58NjjxOEWQajsZuq5Fb6v6PUGLJbpmqnpMBjGzGdzoh5kU8nVe+3A9+CDr2a+OqWqHPojgXZa7n/4QV76ipdzdHCbrjNEvaH1j64OuPaibUJvwCw55PL5h5ie5khptw+r1Yp77nkzbSNwnQ2cYMHBwR5XrjyMbkMuX91juaoo6iW92OHS+UeQUhKHJXluB1vlFgxHOyT5EVLC1ugSxhSEzhbT+Q3SbAvlafLyDFf18L0RUmqmZ8f4wQZ1MyTwJmxONhhGiuUypSobAr9msZqCjhhE2xSZTf5vbO3iueCqIUE0pmlXGKGZJzMqkzE9mjPPSra2Nsh0w9HBIXEUIkVGMS1w5Iy68ojimijKqKqW7Y1zbGwO2N2e4Pmf85j4uQ+UMlUMJwPKtCI0MatZRVLUxANFmXXURUBbaARDhB8hZMfGRo+uaxE4+E6M5yuGoy2KtENSEcaSeDim03B2MrUDUFvRdc065u7SiwLSJCUIXQ6Pp+tOUkXYi2k6TZHXZHnNpz/zGRwXpOPSmI5gHQwJopCsyGmlouoKkrxAOIog9KibhjzP0MZQlEuUcvGDHk2nqdrCImAKTZLkQEtVNQRhTNsWlqvZrHBd13KitIcxObIwtEajdYtyDJ0pqOqSyI9puxaET9s6FIn1dQo0iBZXeZRVi+M4OMqQVwld1ayDDgaEojW2EQBhSNMVQRAglUFJj7KwSbi2q8E01FW+9rIJVssUiSTPS8riFMe1aTcpwvXVqEORVHieA1rR1BJEtV5RekBN4NeURcOot4XrWo9lmiyJex5SVSSpJgqHzGdL+qOQNM1oSmO9QNMZUrgkpiOMXHTT0nUdy3X1ZLLM6HRrO1HX7MKyLEFadIPpGpQjUEqQZwVCuCzmGUoJdNfxxJO30G1E1dTrPly7OvV9Hy/ss5gtoc3wfBcDVHVtB3JtCKKQqm5AeGxsBESxg5AtV++9yKDX5/DwGD+cUJeWr7ZcnTKbnzIZ71CkGWVesVrm9AchnudiuhapDLpxaGufrmvZ2RlQ1w1tC2mqiaM+6WpFWxvCyMLPvVGPNLHIitXBCoN6YZV3d29ob8vlkh/7l/87v/X+3wDg3e/+QnZ2dvmRH/lhm6w2UFYlP/zD/4h/8A9+iH//7/8jv/mb7+PwcJ9eb8ClS5d505vezA/8wN/nU596lPXmGbDwcrMO79x9bNarZ2m9b7/4Sz/Pa1/7Or7u676eBx54gE9+8uNUVc2VK9e4dOki3//9f/tPrSBBSeeuiimlpF3jj+54JY026Lbmm77pL/Kud30B3/s9f5PPfPbTVqVcB3SMxg5qa5/lB37rt/jSL/0y/tyf+2I2Nzf5xCc+ge/7vO1tb+fatWv83M/9HIvlgq7rWCwTfuWXf5mv+Mqv5Ed/9F/w/vf/JmmS8tDDD/HWt76Ng4MDfvM33nfXe3fPtXv4x//0h/ngBz/Iv/5X/3JtJxB84Lc/wOvf8HoAvvALv4DBYMBnPvMZvuqrvoov+7IvY29vj/f/1m9htMZxPG7evAnA5qZFSuV5wmA8oixz9vf3OX/uMv1+TJrlNLrDmJaqcjg4OOAdb30b8/l8XQ9nPw+z+ZSDgwNe9MjDhGFIHNtV26DX5/qTj5PnOZuORDmCZLokYkDoerhC8olP/AmPf/pTfN67v4CyOUPLPq3wGV14kMev/z5iJXjkkZcAAs9XLBcpXhxz+MwBz510vPTzXok7uoDsamQQgyhID5/grBUM1S79y5dYLqd0MsJ1+xSL56nCBjkacdztoTY2qKqGzng0dcaVOKRyBMdVxdlxyvmthxhuvo7T5DqJe4I0fXpen9nx8yj3CvFGnyJP2ezHSCdAssvIPSPuVZRFi3JaQrbZPXeV+WyJocUPR8S9ITduPW5JIeE2WzsvQZ4tmafPoYRmEAyQwiEKYpRzQnqUEPlj8lVDULtIal7+wBUOD06JQoMR29xz7TyrzSN6Yc+ukMvnkE8e47khWnfsbA3gvGBrawNJj0ZnXL58Dwd7CU3XUdYLXnT/ZVbLgrLqqDvBRriNVC66E7QtBL7DKpkS9a2aa+ioK2PbYAJv3bDSUNSlpSeoliB0WK0WhAHEvYgut+ze0WhMmqZgGqraMB5vsZgVtFjlU6AQyiBVR5MLtrfOcXR4Stjz8aVCmwZEhxLpOpXs0dQ58/mcXq9HXqzQnSQIFGU9w7SKONgA4TBfnBD1XPLCKnZl1+AHFk9zOjtisVqxublBUS2YzmqCwFYga9Oy2TlkWUIYhtx89Ib1cs5zLlw8x2qxxA9uEfoTqmqfsj3DcQImo3P88R8/zmSySS8MaFtbVahPlyjP9sqXOQSbLT1/h3ylGA17CGGrL3v+Dqv5CmTGfPEMvd6IQb/HjeeeoB+fI3BGNMUZG6NLtHplX3vrE/c8Dg4yNrdcikwjaYjXr+X4YM7Odo+bh58h8F3Ob1/gbH7E1njEyfEMZSST8Rbu5iWCkW/P02VNK2t2dq7i+Se4fkNTK3a2XRANVdkQxyPKsmJ6tqDf9xEiZ7FYMRhFdG1Hp21HexC6FOWCvYMZk9El5EDRFGCky2Q4om0EpsspMg29DQYTB20qNH0uX3mAs9n+//UDpR/1efyJA4JQrbEXgqZZsDM6z0plVHFq2UnKkuN93wXR4QYBeabRnaFpSp6/9QSDfsB40iMrDHUNfhjQH/sc7B+uk1qCoswQ0rBYnnD+3EWkAjeQnJ2dkWUp/X4fIV1u79+krjuisGfB3bQWpK2XFHlJYzQ4iqxs0Urg9q26tLqjzLiKvMnW9UQOJ/NTy8AKHWaHU1wvoChrMN26O9Wn0wWBP8B11Dr93VJXFjAqpYOQ3Xo9aFeqCN/6PtuW4SgmTXL6wwiJoGk62rZFSk0QxUjHo9EVQhmrcho7SBVZAUbh+y4GjafsYJanGZ4bIlTFMkmpq24N5bVA97Y0dI2DVraD1HMjOt2QpS26zdEa0rxBGlgsKsJggFN11G3GxmSTNCnwfImnHDxPsEpm1JXB90KiOCDNVihleYlFUeAHLloX1FVG4A/puoYgVGAUvd6QZbKwQYawT5FXCCCKbAe3hdh3lFVGvz9cNwh1NE2NwKcoC3yvT1vWFGWJ50b4vkvTgu7sh6ftCrSwfbVd06AVuD0XU2pc3yfLMhxHkmQ5XhjQdZo8Twk9H6kszBVjODtJcWVgB/yeg+g6PF/Tjy9S1y29fsC0g8Dt0Yt8ytpg2oYoGq6bnGqiUNDvT9AdeFLRH8QWPNzCqD8G2WC6jrbqcJVNEEtpwzKY/O5QZrTGyBcUyn/3f/xbXvzil/ClX/rljEZj9vf3+Cf/5Af53d/7nburYmMEH//Ex/mO7/grfPV7vpZ3vvNdDIcj0jTh4OCA//bffpZnnr1u07C6vftYXafvrtn12gN3Z/jrOo2Ugrpu+Fvf9z189Ve/h7e+5e184zf+Jeq6Zv9gj996/29QN9XaA3nnPltb54igbhqLAIL1sKoRUliV8u70amDNn8QYhBR0urWKpbHr2DRb8h3f8e18wzf833nta1/HK1/5Krqu5datW/zYj/0Lfu3Xfs3iToRdwf74j/84t2/f5gu+8Av56q9+D67rMp1Oed/7fp2f/umfvgsP79oWIW060/M89g8O2dnZtaqrkPzwP/tnAHzFl385b3j9G3jNa17Ncrnkt3/nt/mFX/glqrIEAXXdMD095uLFy7hOwOnZAZ5rmXDpasZyOefq1Wt2++EaKBVxT3F4+xApJefPnyfLbGVe13X0ej2uX7+O7/tsb2+TJSld11FVFUVRMJ8trY9rY2wpF1LajuWqIqtqVvMVm1cusznaZL44xvU9TKsQGj7zmY9x/txVFpdrlFeh1JgwDKmqgvnimPHwEhuTC5StDVwVVcZosMHsbJ/pdMq9928gPMlWP6Tph6QLTVPkLOZLPvNki/RC2rbCEQo3bGkXkqa5xRNPz+jUq3BDl6QoSfdqzl99EYvbJZcv+4h2j5H/MK1w0Soj2LpCujhDtxotFgRhQ+iOePjK/TTYkoW8qgkmCqlckjIlz1Z0tUIqTVIccTo7RGD9vJ4akeQJm1s1dA2qCxkPrYL/8IvuZ//waQb9bYoi5a1veAt+qFnMYed8wPFxye7Wi9BaM/AV8YdXCBSu5/PII/dy9f5t9vZvotyacTSk1RD1AzxfEsdbXH/mWTx/yHjrHKfzGWWa0LY1RZrR6ZzRKMI4DSenQAd1m1C7M5raBul8zxBFA3xPI5RH4HkU1YLd3fM0TcdquaLXs6+PDgI3wPEEulNEcYzAIQgtYk9KSdXYY3Jj4xyDoY8Q22xtbXBwcEDbRmRZwnB0Dh0saRtrZQqC4RreXeG5IY6jSBOBEi1VvaJtBIEXUWpN26b4oRWDmkaSVXO0aBmO+yjXYbIxYnFWMxx5FEVNUdYUTYHwNMrr2LmwSZ6UjMdjwkCQCIXrxFR1wmKZsn1uQpKecTpd0OuPWSwzZtMFjmrQbUfcH2Bkhzx1uHjhCovZCt93KfMZY1ch8MiyI0LnPJuTDc5mOed3B1S5j6cnDIMI31G0bYFC4bod89MZ6I4wjJiezhmMfJqyYtCLSJIpcRBTVRVbm5KiWnDp8gWrHq86zm/fi+O1CBNZoLuuyauGbGF/h6vbM5YnhzgMiGMfFXQ47og6L7hwcYsiMxRFxfndc6xWFnK+XC4JYomhpmlbXNdlc/Mis9mMOO6RFwnohsgPiUYevneOpsq5/95LGCPIs4quEcT9jlVSoITCESec2/zcFUrxp5WE/1+373zvu83hrZLzF6yZc7Va4PkCh5iiyJgn1iPQ78eUTclkY0gYuUwmE4q84cYzz9Ef91isppy7GOM7MYtFTSdS8rLBwWe1SjHCpak7u6YO7VWYQuH41huVZfkahKxQSrGYp7iuXWFKpQnjkCRJ0J2h7uxwqbW21W1FgZRYor2yXoeqKiirFN8LEdKsB6OStoEgiCjLnI2NMWmaU9clYTRAyI6mNnTaeiyF6Oww7QowLk1TrD9ohv83bX8Wa1u2p/lBv9HMfq5+N2efc+KcaG4X92ZblQ3OchbVuQxIVQZLpAQvFi6somgsAxYIAzJIIPHAMy+IB16QQVgggYHCLlN2FdVmZebNm3n7uBFxut2vbvZzjoaHseLckuChLCVbCimkiDix19p7jfkf3//7fl+ShmRWO/QUaULT1uGNF/5UsxaMuzqSWKtRcUQ3Hk6vMQ7K5tgiJPTdRJwE/pXg9EFOAuqotyMSDV4ydA15EXE87knTUJ/mpCWJU6TUVMcGj0UKHQYHPxInEXYKSVkdycAQ84Y8T2jqUHGW5ZIkDnWAWmuq+kikY/AxIgqBISkFbRfUU0HEMHbEETivgzcjjomjNLAS0xTrpvD3ennymFRYN5BmUYAJZwlt29INNqihIgfC0Nw2E1EMQmjcVDONNgzxSuEItgAhBNZ4pjGol+KkeE3TRCQV0xjUUs8Y+G6DpchKpmlg6FuEAB1Jrs4uqKuJKAkJ5mnyDH2NZyTVBWmaoLVmGAxm8OgI4kSEC4fV4XdpqFE6KH1t25KmeQhZNRNxooPiLOGT2vPf/G5zuql7/tP2mkn/U3BZwamiUJyGP3lqxglKmv+q99sH6VHI4D396rVba98rkSDeq5MQ6hm11uE/dyd0jv/KfymwwSke3uMTGugr3qM9VTsGbyc/z0eIEKewzuFPPjN34soE9TEA06U8+Ta/Wodbizol0J0NjTlfhYa++jOdC2t670LN6vtMh/j56w+rfAmo4MH9ygfow/etlMIah3WG5XJNHKcgPLv9jtvrG77+9W+ACN//P+0pFchTM1Zo4YIQLhJS8G/92/9z/t1/53/DX/hz/wLf/Pav8rB7R4Qmm+f88Ps/4u/93b/NX/mrv8Pzl0952N0hfM7zZxv+1r//H/LTH/2Yv/bX/hoPu/370FISx/zN/9u/hzGGf+lf/i8AcKiOuBPb8D/4m/9Prt+94a/8S3+V2WzG/nhExwmLxYIf/PH3+Yd/7+/zZ//Mb/ELv/rrbHcVUZlQFBnXP7vhJz/4D/nOL/w6o1ySZxPWxEjpuX79hrq+4erJhyw2FzgvScpQa9sfR27f/IAnT0sO+5KkkNTNIz97dcOqvOAn3/8ev/ALKz77fMfbrSWZF9hhRGaS7mFkGX1GFPWkq38Rn8cs04gsjSlnS/7o7/4HlLOIb377iqF9zdX5BXqMeP3qLU82T9lXXzC5Ca1nLDdrjlXLcrUhLgT1bkDoitfXtxTzgsGORPESpzzf//73mc0KZgvB3fUji/Ipx2PF1dUChCVPniDimqYaefnyJVEUgmdxZkj0itDvrqi7PaNriHTGOEHx09f8i//OK5yFJE343/2lDdUnV6FD2guiJKftW9puS5okqEjRtSNVM6F0wu3dA0aENXiaB8Zj0zQsZxckKqfvx1O6O2F/bKnqLdMIRR5W9bc3j6zmC+aLDMGIMwVtXzFMNc6EVhvvG/q+J840XTuw3syJY4e3MVEU0Y33XF1+SJ7N8GLkeAhUga/CcsPQ40zC/cNblEqYz3Nub8PKNUkUh324RA+jx8sjaRyT6hXHQ0+WJ1g67u7fsFxc4FGMrqKcL5DC0R4Hri7WKJ9y9cE5r1+/xUvFclVyOD6wXi5wJviknz65ou1qrBEslgUP91uOdcVyHXF9d8/5+kO8NBz2DUVWsH24paoa5rMVgR8Z0fVHnj/9hOVihvUH+g42q2fU7U+4Ov8Gi0XB8TCQ5ZKuGZHK4Z2mmFnMFPB5g2koizld151mkeBFffP5gQ8++Ajn6vcp8S9ff4HWOctVwa4+Mss3oeZQhKDOob4HObI/DJyff0A/dESiDKnrruds/RzPyN3dA7MyYZj24BX4hIuLc+q65vz8gurYhfrMk6AyTpYnVxcM/cR8Pme3fySNCp49e4HzDbdvGp4+X3PYPyBQZFlJPHlE1FMdLE8uLtFRaOn77d/+H/9/J/L+f3z9M4+ev/DxSz65lMR5xGgrsvxj4rTkeNyTZCn76khVHRjHnnFUFEXG2fkV0zSRFvArv/aLHKot67MFQhpubm7ouo4XHz7D3jlkNOKbHk/wkUTZjMGAMZ5usoixRRDRtSPz+ZIoyk80ip6qbvAWJIK+75nNZtRVQyQVwzAwDAOzxTyoWVO41Ss1Mk59GOZUjBChiL0sC6QM/MBpMuR5yTgavIDZYomWgsOhJ0kFwkQksTx55ka0EFjjwhpawzQ5uq5DZBJvW/rB0XdD+DP7ibwIK9E0Vjjn6IcOr8JwmaYpbpLYaaKuW7Jc431YfWVZaAwILSeCoZ9IshRnLNZNzOYp0zTx8uVL9vvQZx1nJeBCcCeLGUdDFMWMg8PZiGEaiZI0tASMHVXTMZunHJodWTZHq4S6rZmsxFmFlJbJOYwJfbRmgKKMOFbHUzd2SlGkTGbAOf1+gEVq+mlERprBDGjJiQ02UbcVcRJahKZpIssL+m4ANLNZjrE9XV8TJ2EYSrOCKHYMvUf4nOW8DEncLGYYW9pDRZIkGOPerwf7LrQWzGazgLsB8ixDJ6fgkwid3FrHlGVBkS/Z7490LRjbM9WhtqzptmiVkkULvAsd1m17ug0XBYIwoFvT03Rb6hNTTEgZlNwoxhlLpDKiWfq+KjFJc9rmMSB5fOh8Bd4Pa199+ffJW4mQYdByzvNzIVOc0tdBcVFChqzJKZgTLhIOJSXeuffDkpCB5xfCLfqUJg+r8KCkB7+k8w4pAmTceYc6hTQgnGdCKjiByM0UeoqFFCB//lrC++ZPQ2vwSn6lbPoTbiYorgodfdWYI3BeYKZgDwkp32AJsadh+H0q+PS+WOtQUiBlWKE7GxiIxhmE14yDIcty5stLJjPRDz1pmtA1LXEcVJxhHN+Db776mqYB7yxxkqOkYrL29E8Fj/c3eDexXC5PDx2BGQzW9Nzfbbm4uKAoU/a7KgTp+o7D4cD9/T3Pnj3De89+v2e+WKAjSdNU1HXNh598TFmW3N7ekkQxRMECsds/cnZ2xqxc0PYjcVLQDy3eW5rqwLOnF1w8OQ+NIzqhaY+Us4Tjfs+zpxdM00hHj7eOKBJ0bcs0GcpyHs4/BoTIqeqGXGnawyO3DzuMspRxhnCC//g//jv88p/6LYa2JY2TAIpOHFHaMHYTcezxJiWOjvzSL/zzvHmz5bHtSWNDOyRoHdHUB67vfkTcx/zV7/x1/u7f+T/xo3/4D3ix+iYvPzhnvoLO5VyuZhRFyfXtI8uVYL5q+eJnt6xmC5bzJe++fOSDs0t+9JMfc3HxIaNt+HN/6hc5bgeePT+j2tQcqx2zF8/pWsfVsyXWjTTTgC/nrPIIbyfONs/AS6yBff0Zb25aFsucLInx3nF4fMf+iy/o28BTncYJxkt++Iff5/LJC+5vW5LiwGgORGrJfdOQ5Qlg6fqWftiRJAW5ikMwSA80k0Oz4PBQ4f2Ooig4HjoyfUYiR272Ry6fPOPu9pHVYslmMadpaoQYQsCKPdaJQK5wjvrYIqXi6smG6jjSmhrTG9Iopu1bYnVGpi/Y3Y/s9CNlobm726L1njjRCAFVvWNWnJEkQbkehzwgeXAMvUQqTx7NUMmOvlc4Kzh0B6JY41WFnUYuLz4gz0pu7m6ZLTeMg0SIhrPNFVLA0NU83KQc9h1FWVJXI0qUHPeWRZEzL2foKGeRpLRti44VeZnRjwP9aJkVZ0jlkEpwdr6kSAPwfl4uQUS0bU2el8zmKdvtNrT4iBY7Fbx7u2W+DvWsr97+FM2CukoDBzKrSdOIvnPEIqXMJZmb0w57ynxOnq3Y7u84PA5cXK2ZpoHFckGazLi9uyPPU4yb6Ow9w9Tw8PqaIst59vRFeHYRE8cRm02JlhF+2nH25Bl1sydfKMahJ44Tfunbn1K1RxwFw3jA25Q8W5HnM6apx9gO6RXLxQodeermSHdsSZKM69fv6Nqe1Sbgin76oy/oGkeSasZOInVH1wjSpEObFbv2nqRbYg4jbbP9Zx0T/9kHyofmSFZG1KOi6fY8VBOR9ORzy/3be9oToPjYdLRtzdubRx4ODc4PKG3I4wWjOZAVC+q6ZrIG7zPevDbs9hUjNU3TYY04AWlBS4VSEaMxtPWBxWKDjDQPuy262qNOjCr0Vw9XGIaBuq5ZzNdEAsZRUJYlxg4kscY7Rz/0JGlEloUH+TQZwJDnOdPoTj6vhqLMMaNhv225eLqmqhrwhiTJiGNPZ8wpYRjScODoTYfWgYI/KxP6IbQGDKMm0inzeUC2qCyhaepTPZ3CuoEkjZBC4E4PY+cncIKiyIhjkHicE0xDT5QljGbCe4dWMUp6enMkSyKUjGlbS3Uc8V4RxfC4vQ3AcpUFtayrkElozVEiZ5mHJPNkB4wX5EVQQiNVhqGcHiHCMKR0yjQM6CgMGsfDgSxecjzuUNqTpQumwSBmnvmqZBygrY8kaYwXkr7vyfOcvp/wwpOmMRKN857JTdRdsE+M1lAddhRFRuQc9SGo1mYwNE1Hmo6h+cBpuskx+h3eO7rjjjxLyGcl1a5lPlvT1iGQI3z4vTLjSRmNEyRQH6ugTHlJEgX12nuLsR3rTYlwLVFchErMvkXIFUWaonXENCiE75gVJVrFQGCi1n1NkqYU+TlFIbFGoFTEZvX8VIdlebi/ZzbLsd6xXq4CmWC1Qeu7MNhxEtyEeJ+6/srb6H1IsX7FgVRKvfcq/hyzc6pL9OHvAzfQn5S9U6vQaTgNyBj3HjkUBshw2bH256if0EfuQIekrZYa6+0/pSACpz5xY6YTxsohvD8NgachVMv3qqfwAqU9ztlw0HuFcxZjLUqewjnv8S8OIR1gT+vwkExHehw2JLm9CwhJKdFSYZ04vR1hWJ7MiPOeOElIkpQkzRiGES9+nhIex57FYh7U1NN7/lVTDkDTBFVmNlucvjfey7nX716xXM2Zz+d0Q/s+U+WcpTrs329zqjowDJNE0VQtd3d3/PJf+Is87LbBSqLC6v32+oamPnJ1dcXhcHiv8Kdpyrs3rxjHkeLyMlhY6posz4l1hB1G2rZmvV4TJTH10BJJTaIFXdMjIsNydoFQJVJlKOGxZiCNNU1Vszu849vf/lWsHekai1PhkrPb3vHTn32BjL/O828VvH37ll/51V8nVppJWIwd+MlnbziOEXmSAApDg/OGrjvgvGJC4egY+5hYKiar2O9u+ehr32S2vuT3//Af8Z1P/zyL9RXf+/3/K6P6lOn1DuEPvFDnvH24ZZwqymmDjiRf++QTimjG6I785m9+He8l33j5NeLEIFWCFmtijty87kjiFd1hxI2ezeaMh/uRLM/R8imzoiCPCkbXEwGbi4i3b7YssivYdCSF4ea2Ic/XaHPPcnGBVrenvICg2bWkZYGpW+RUkboMaVKWxRnTdI0bR6apw5qW9bygqRvc1HKxzGnGhKo+cHlWcGhb4iiiO7TM8oRp2OKsZp6e4TrBR08/pu2O2MlysV7hfUNcnmFdR98bYglPnp1R1z12DBflSCpefLDCu5SL8zMmU2PGjG5wNP2BWGnevH3L+foqKKOTCNs3OWMcgmUjijXOhc96140kcRRCfComyWKqfU+c5xBZolhyOO5IkoSsmOO9Zb7IkSoiFTHHqsYkjqmbMFPD5DVJElHVB9pOkWUZZVnikDzutuwPHfOlYvtYsVrPGW3L5vyS47FCxR7pUpq2IkkTbvdbhAxBoceHA+dXF1S7hidXT9lsGpJoQT86bm9vWS4umS9W9NMIMmNyljdvfsbF+VPOUs2b69esFh8gI8njfsflVcnUOIw5sts3KBnh8KAgS2P++IefcXn5lDifMzaWRZlh/JYsjnjy8ZpYBxGimnpWizWT6VgvF8RxTFkI2u6OPJ9hpgkR9ZSzgqbvyIqUN292fOMbX2O3PWLtRJFrhm4kVhFxliKAcejI0ojH+5osySnzjGk0KGG5fXcd+s/bgbo+cnZ2xvW7R3bba1ZnBUUeUXU17Zs/JpI5V1dXf/ID5Rf7a870GV9+fkNTVUxDTJEVKD0RiYLbh5vTATtRzBI8lrf3jxRlSpYLvL/j/v6B2XxJpHL6psPbmMFs8eqId2EtqOKIqgpp4yRWXF+/Ik/yYGw/HpmMQ0iP0BoVacp5ye5x4Ozy8rRi8PTdSNN377l3QgQVT0eWNI3I8wT1fmhwTNMQBhXjUFrghSMr0vAg07A+W2DdSJJKnBUoaUGAjvypR1QifGhlKPPsfaBBqlBndDgc0DLBjBM6tkSRJI4zynlEVR1wToNQIdwjBJGMqA4H5vOStEjwXiAwOGtYLFdhdS8UXT9RlBnTFBqClJCnyH/oQhXCM1sWTKZl4VcnM7XFTI6hhyQ1rNZ5SJqXEToeaGrDTOeMfbgYzPIFw2Com4okLtBaME6PFFlG001MgwxNB8IEv2ScYU2LkJKbm3d88OIpzXGLmTqcC4pvJFXwZBYz9vs9aaSpmpY0yxjtRJoX4WfW9WwuNig80+RZzM/ohj0CxWZ5SdtVTH1QtqYx+D+01ljjqaaBLFJoldN3FqXDe/OVXSKOY8Z+YLVaMY4jOYsTi1GQF3MmM5CmEUKG35GQfO5QqsQajRBjANOaEAByo8E7GPqGslgQxYLFJsI6QSY3eL9nNJZ5NqfrR5SImM8XAQ/ESJ4HlXxoanSch98vrbHO4LwDoU6r66/wNyEgI5DvBy0pQwrbe3f6y79XOL8aEL9ae3NqFlFahwEUThcpj1QKJeVJqeS94hdQQuHP1lGEdx5j7HuAeRh8g1Jp3VeMwbA6lyqwXdSpOtFNAVskpSZ0IMdYOxHFCc6CmVzYmkuHowfx1ec1/DzC2txhrDkppeKEm3Hv/ZdCSfAeLwTGDSeUhmYaR0BxcX7F8Vix2215/sHL02sQSKHp+wGlwwPNnZiHP2dlfvV+jgj589DRV1/ew831l6xX87BOtD1CKLSWNNWew+GGT7/1iyghENjQ/qLC+jxNUxaLBbtdUKbGcWSxWHB7/Zbnz5+zXp9hTBjwmyaAxu/v77m6uuTi4ox2aEPIsOvQSjC2FZHSrM82tP2I06ATsMPEooj5g+/+I8ok45/7rT/PdndLkqQUSVjJvrt+jfcT8/mS5rilHxRJJHF5wsNuT5xIztYb2qFHJzF2lCQK7toWoaAoFyiRYo2nrQdG5+nrCuVDfW3V1wh9hrApzXEgUj2HwzWfvvwaVsdI4Xj95qecvfyIP3v+r/FHv/s9snmJmxa8fTSU85TF+TljK6gmwfE2ouk/YzG/YBgHvvziNUpptg/XeCxuEvTO0Q/1e4+olJL1es0wOva7hkjOeP7BOWkSkcUREsVyOadtJUINlEXK7Rc7rDfUzQ37x4m4CT4LJRMmM5GmKXEUgpGLRcbUCoyRCGoib7FWMpud0bR7+mNH107B2nOsMQ5eXp4HSobQ9PXAfDmja49I36Llis1iidaaNNEoQkFI21TMZjH9ZLk6/zpxOtEPFUm05PJFyqG6YewLzpaerHBcnH/A8bjn9bs93g0oObFaxvT9yOVmicBSZFFoH/Iebwa6vgWfMg4eZxu6rsNaQ3VsOT8/Z+gtu/vwXO/7jigWHPYW/Iw8LRmGFnGy/AxDh1IjZTlju7sPGxrbUyjAK6yHrm3Z7x9YLBYnESdFyYamT1gsVngh2T2OlHnMsdohKWibFpQh6cCYIAh0zenztz+CV3TDxOP2gdevvs+TqxVd63j15kfwfYezA2W+Ics1bVdzf7xjtV3x8HjDvNzz4uKMY93Tiy48D2SA0EdxyWF/x/3hmqaRSOWopy1nmwsmo0m8oukbDlWNUgX7+gEvWupm4PLsGeM4cLu95enTryOYsTyb84M//oKPXn5C12959faRJMnIS6hrQd+mTEOLp2Hcdyxnl1SnLfHZ2QVV3RNFirPNFXlZsD/ck2YJq03Kze0BNcHgBvpuJOs8eVnQjUciWdB19zTNgbP1OVpkvHu1+5MfKNvdI2+2HmMnhkYzTB3G37HbG4psjZQVxoFKMpq+x51WnFVjeNg24QFuFnTbGkxPEiXkeYeZRhQlUobVtLNDMAojsKNlvVizmC2o+wFrQo2WPj04hmGkrnqc1xzrw8/X10qCCX2e8/mc3S4kzJIkwtqJwVhiZoyjQSobapaQWDuAssRxHLqqZ/P3vstpMsEvl2Y4P5xqjkaSuMTYEaUCY6woY5ra0PdDeICT48zI+dMEM4VQgdaSujqiNKw2KXiNtTPsNLLb7ZDkKKHJc421Ax5FmiTEcRqGTm/J8gQdLSjKlKbuKeaetq1ZLjfcXW9JkgLrFa9e31EUKbEOh4WxLWmaUMwkUg/kRQCoTr2lqwRZmtN1Ry7OzqmbHXEyMI2Ki7ML+jZY4+Ik5djU5FahZM449kRxzpOLjzgedkitcdYzy1f0jUOieHp5yWAmjocGQViDjoNlNT+jaxp0Al1/REYaITTVsSXLCuzkadoaqTxFumBezpAiwkySLEqx1jONE8o6tIjojz2LxSKggdqGNPWAQeskGMKtYbFc4q2hnBdMZsDYibPVJfv9HstE2+8CUqPvqeuWWblkUc7pTc/N7SNaeRaznLo+AsHQPfYVPoYo1hi/Bx9hJgGMDOaGaWwRxBw7Q30MnfOpnpNlBdPUcne3Zz6fkyQJ+90OZ10YZIC/8kvPub6acziE4WCcOuw0UBRZWLsnGWaS75VD7wXH+p44CQNlUxvMqIgTWK1WGGfRkQ8VajohThTtsXpfCpAkyXvsU32sQnvD1DJfFDRNhXeGKEoYh6AGJklClhXsDw90Q8fzF894eLgLUOMooigyqnqPkkHdLfLFyafnGKYjRT5judygZMLZ2Zofffbd00pqGXxroseYKijfI0yjp+97pmkgyfLQ+S32eBP8fUqMpGmO89l7o/rZesPY1zzc7fm1X/nL/Jd+528w9Jbf/rN/hv/i7/wO/93//v+AL9+9wVvLs/OX/M3/6N/lb/+t/wv/5n/rf4pMCoahRSUCaebvz8T/1f/63+T51Yb//H/uv0dnG4yaiJ1gu+35f/zf/7d88MHXEELQDi3SS84WG3760+8xjEcuL6+wxoMwuEkh4pib23cUaUaWZXTjQJqmdENP13W8ffuWMi/w3nM8HsPrcwY3Gfb7PW1b8+mnnyKkxdgxnJ2zBe/u7vji88/Jyxnnl884DgY39FxuFlSHmt3ukW/86d9GRx5vO4RI6ceOaTCMY8uzZ88QQnI4NMRRHoIjCu62R/Ikpkhi+mGCSDBVE/M0pmkGkAHIPyIp8wKNB7Ngqh9Yny+puwPHo0NnA6nridKEtt9hKsfDwz02KbhY5uSF4HZ7w5PVEzabgn/8e9/jO996yWR76mrJoRJEacS290x9i/dnfHlnMVai5Cc4c4SygClDFwNrZairHdOgiaMdCMOPf3RPkk0BAC0bbnY1zaHnbPWcJIr58voz7JQSpWFrEUfBPhMninZQWJthf45yRUYx508ugl3ESnp9YJFKuhqKOMdHMlz2y4Sy0CyXEePgyWcFXTUytQ2tqYmkZHF2jncaKwUaydAMLJYZQzPR1ZYs0dTNQBzntPuGQ2Mp0i115cnSnN3xhsc7y2Z9ydOrJbvdIzfXW+r9l/TDESVnDGbEi5q6FkgiojTG9ILz8zNevwswducdZpywNgR7pBqIdcq8XPCwvaOuRubzkqbds1ov8LQY1zGfl6fnnqHrKy6eXPG439EPDZYOLdakZUxTd0gV83jcksYrpIvI0xXRwgUb3eRp25EsFdTNO66vq9CPPfXsdz8GMdH3YQvVDTXmVCcbxQKsIooz3l3fU+9Gzi/vT++35vXNH5Hql0TpyGRa0iLi2LyhGiKydE3f39PfVSTZguv7LTfXr3j2wXP6mx6lDWM/cHmW8mr/I3bHEMwZphFExOt3I4+P4axdNBHGtDg5sB86Pnv9iFc9L1++5A9/+DOyLMN7z676Ic+uvkUzDlzf3rLbV1w9PePzz+5x1JytnhCrnMddRVt35LmnyEpubx6Jkw4pg62vKFb0/cj9fUPRGbqpY7lcctg3oZZRL7m7+z5FseBYD9jRUuShO1wlE1muGY2ld9c8bq//5AfKwz45+Q5b4iTBoejqhKlXdN5gfOipLIqIrp1QPg7r6rGnTOfEcsbd7hWzRYxVFiktWi5QogI/0feWLA9rllj78KGdAspgGB1mkiw3Je1giJICZxXGjDw+7sjiBS5T9MOI9J44ETg/kMQF4wBS5EhpSaIlx+Ge+jixXIUVvZIp0IK0ZEVJmqYMQ0c60whPSBQCSZzjbQDsrlZLtFRU1cQ0jigZMYw93k20zYSbMrJI4UWGVkdWmxRnHlkun5GmJfvjLcLGzJYWJybSfKA5Kqo9rM4tUdxwLl7iqHi478gKy3Y3UM4ynB3wWO4fQz1hVYUkcjc2lDPFbGnJ8kt+9KMfkhY5SmZ0/QETw9inoSrSDXgLZbrh8DAicUiVkkYpCs2qjEgST3UEN6Zs1gm73YHFpmS3PdCPMc6lzOYF/WiZhp5EGm7e7hHCkebTKZGeUNWPLMsls9WG8fGRJEtpmo6hrlnMFlT1jmGYmBUx3icIA1KMpEwkzoLwjL4njZdEqsEOCcMAMjoipGE+23B0HXG5YrJbitkCZzKqug74pboni2YIpYi0Js8VTd2SJTldV+Nj0NKz3b99X90pXODlqShiuZzTNBXOTHhvUTookl6EgT1RYUWzXn2EZaBrQ9f5sQlrTe8ijG2xTpIXHud6ojwOAR4zhhYc80A3aLxxzPIZnauw3p3UPk87VeyPLRDhhaScz3AuAJ+nyeEGRzs8UJZz6sqwXCcs1zOGrieKJIuyQJzSnFImDG1FN47Ml5fU9ZHJ7tGZZrAjw9Bxfn5O1/Ycq3vmy5QkkUzHJePUIVRLXqxpK41UljixSKVYn2XMZmuyNOLiKuVuJri5r1HpGictRfacfrrHC7h/fMPXv/kEZzXeXLLb3/P6dQgNzG/W7HctxTKn7/f0bUReaKYhRjhP1x/RWpNlJUoposgjRYrWzxgng5cVXQuWjMkMRIkmigT3N1uEmPPX/2v/Mz795p/i9Rf3/OSHPwDp+LU//WfY1Uek9DirmVzLj3/4XbKZICmX1PUBJyAh4fb+9c/PxIcd3/nGL5Kkmr5RSD/gjeLx7ksme0McnTMOjvSr0BKWx9tblDiGoN+xwWcxVluUG3h8vOdskyFUjDQxvXMUi4TmWEPbMLtaEGclxlr8NDIpqPqBsWtZzAvybMk4WLQXIB1eaKpqT1akyDgKnb5DR5RKBtvxxZfviJgznyUc90MIF3iDEIp9tacfQ+iwaw9IBMb2iCRhqHZU2xs2m3NUFAcfaJziRc8wJTTHA7N5ifUKLSPiJKMbejIN7+qKD88XgfOLxLoJO3aUsaB7mEj0iNMzpIdxOqD7c5SydH3Ftt6TRg6pFfvHI2VuwDmG5hSw0kGNV0IghWcaQ7AtTwom6dC6CIn5eYmz4Pw54zjyyTcFUgeFfrI1bd2QlpZ6crR2wroIMLjKkWrBw/6W1WrFw/U94zSwHPR7H7WQEikVTd0HT3QUk6crlBbES5jG0N89jj0QzsKLq6sTGWKi7m5Zrhdk6RVaC4yzjKNByJZxMFxulvTDhBkcOtEMvePJk+fc3t5SpOdcnPWYfmToHbHIGPaKvMiRY8zuXctgY+bFmn7saBvPoXqF95b16hI5SZJU0u0d603Jfn/H1NWUxVOEvkBxTW8s1mjsCM4MSBIynREry/bhFUkWI8XI3d2e5WJDls+ppgZrI4oipz9KUrFExRH9FBNpTaZL4kVMVT8wK54QRQrhBId9TZ5FxIkEP2GmHpMkpHlGW1dIlTK1Ew5z2tpFiGlimjzjOCKEApcitceOFU44RtWwawRpmhLpAqU+YDCCeX6O9TXzMsG6A8PQMfgK6yIsgOvJZp5qF3O7vSOKBdYIsjgDdc/D/T19N3F9f481/vS5GXl8CMzb1WrBYAZE7PHuJ5ixYxxHrl/9EdM0ICNB241sFmvevL1lVz2SJjlKxfzk9c/AOvI8593dA7/xp/8MdbfncXfHrI8ZsjOiRDLYjL4beXi84cmzD+ndxLvqhyzljDxasN3dhJBYDNt9xzgNpNZy2PVkucb6GB+3OF+io4l+uEdRsJg9/ZMfKIfBEMeCthbEqaXrBrrRUs5DOw1eh5uo1CfVIqYoM6TMsW5kbBzPnz/HeYs1jjhOMd6xWCyQ2tG3I0k8Z7OYY4zhuOuwoiPPliffRU0/KKQrEZNmXpTsthWzQlAuDEkS/I5JDFVVkWae5XpCy5RZfsbDwwObdc6sfMbFkz3jAFfPctq25e52Ik0jpLTU9TVn5yucm3jz+hVZOkNKzTh6zs7O6LqOaTS0Y0tZlhyPR0bTBeO+sSRxTD801NORNCnxamCaJE8uPiIvPUne8eTDJVKeUdUPvHtzwLmMswvJ85eSLL/i5p3hR3/0jucfKdYXFmsT+rFCRxlepzCMLJcp+/0W5wc8MJsVeCZurt+eaqSeYHzL08sZu33C5jzl9rplt90iledsecl+fySJA84ojsJhIrxnsp76GJSosbccHwVDm9JVlraFNI/QzgaVsWspM01b1VycP2N/uCVPc4bWM7WGiAw7Ce7e3rLb7ZjNZszSnL7tGJqKsRuCz8+tEb4m1opIK9JZhtaaPC+Y5Qt0FNhkx27ETiPLZcAfTV3PalkgXUZSvGB/uMfQs5gtUULicsNhX8EYQhzHQ8fZ2QX7XY0ZNVFehoEahbE1eMk49iRxRCQj4lgHoLv3RFHM4bAnTTPSLAaXn1bKlrrbo5RnNi8xDmw1MU4hER3FJUWUoTTsdwfKxTkPj68pZgVCKYTPSAsYJ8foJoQSJ6g5COnJsiVedDRNT5oqhJZ4NzBNPWW5oK626CTw6tJ5TTt0RGJN3bXQNlxcPMEBWofX5nQ4QNr+AetH0gi0iikzxfrD51y/uWPqY1aLK4bpyKFr0PGEMyWx+gA/jazWnq4zFGWwDEz9jm9/5wllNsM58H1Ekmp+8uMvmfxEnGrwEZuzkk8+ueDy8ik3796RLzX39wG6e6wNWRJzrAYOh5757JxprDnsDeUsARFA74dDDaIhL1LiU7nAOIWAz37bsVyuOVb3JGnEvFzz7uYznj/5Ff5H/8P/JY/bnn/0u9/l069/wvf++HdJc/jo4w9OTT6SJI1o257Xb3/CL/3in0brYF/ph5FZXnBz8+79mdhNBzabK4QQ9EONThWRTvjy1U+o24bFfEM3HrFGgAgYr/v9HZdXL9FJifMNiojBThivuLt5zTf++d/iUNWB16ot0xBzd/capXo2Z0/ASkbXEuHI0g0P777g7uYnfDT/NlGSsn24RnuBl4EXuHu8JUsj1qsLHnePZMUaIRui6IybN6+ZzwCR0k011nqqqmE2K9je35ElKbPZLAQLhcBYi/ae3a6iKDI2Z2d0w4QxI94FD2/Xh5SptRMqksSRCudLHJh8kRJIkdI0E1o6vAl9xm3d0+xrXl5EgGAcLFPiMbFFuKA2TYNjuTgjz+ZcXErevXvDaj6jaxsApJXvFXbnAoc2+IUtWgVqhVTBQy1iCYTLoNYaL4K/OJpmZEt/sksRXodSeCxdF55zWlb0XYOKV4z9A+Mk8Ij3PuPbuz1vm7AxWC0KvLckaaByJNGa2WKBczFFmbJcwGQGNuslD7d3PHv6IVESfOGz2YymaXBm4PnlJhQZCIt3LYunq/CcS1Oaagu2xzkBg8M6SZGnjHVotzruerw9MPQTs2VG2x3ppx4tIopshveGrm3J0xV93aGJaHYD/eCYJ0/oakeUgh9lOAudZJxcULH6Aa1CY1dRZPSDAB2xWZ7hrMROHolkXs7pupYoThDW0rYtZRHOjrYdEcIT6RxjNFpFeGEQkcMycKwHxtExK5dYa6mrliRJqesjUkuGPoT5nLKhrzpNWK7XTKPleKzJo5TeGJRSFPmMpu7xTjBNI9ZC144MzZbJ9Bwf02D5iRcMrcGLCOcM1a4lyxKkzemakXFwmMkyxiOPu3fBrhNlHI6GPImZphC69U4AmrYZUUozmBprB4QI26O26YmzBGMHtFL4cc6h2pLnG5SIsNME1pIXMU11oJsMf/DH/4RMLfj2p5/QTDXN8I75bI2jYXJ3xGXOT778j8jynK7rSaIMo25BTIjI0x0ky1mJjqLwcywVSnr2uy2LdcZhd0deZJg+IS88Y2/+5AfKcOAm5HkYXJIkoZ8MzgU/VSxS3GQwzjMvwiH0+PhIliUI6bDWkJdzjscDWidMhhOQ2xJFEVmsOEVGmRUl3kjmizOEkEHCLedst3uyfMH2sUY6x/Onc4Yh4IratiePY6RuePYsI9HnLNYCL3ccd3uevYyYL3rMmFCWL7m9e40QA0Vh+Y1f/0Ue7o8M0wO//Cvf4HCoTmpOxvnmKW0t2e7CSt57Qd9O5HnB4+MjeZEADqVH4ihhtS5QYs95tqbte5IsBu841BVRnuJ6zTieVNBsyfFw5PJZxvosIo+fc319zXzp+fhbmuVmxt3tHmcUUZTSNTWzeajxGtqWb//iNzjsjiFUkLccj5bV4pvBtN/vWS2eMnYpTy46wPDy2QXRif0nheV8veHh4YEin4FwODNh2rAyLWYZm8UMN0V0rWc931A3Fb3ziGkg1gozDcwzRZrGnM8vAPBlxiyNiL3EuxjnIry1SKF4efVBOASEJJ8v6PuezWIZPDo2w6WOyU0UXw0lPigQGkWEYDQjL56v2W0PRCqiyBYYM+FoSHXJ0DTMC0iSlLZRoQJU9ag4xVt54vktMGYMbM0op5yl4CaKPGY2Bs7pk6sN4zgy9h3OaCIdE8UCYyYuLi5PLTyH4PNzLsDtwz2WabLUXc3xWJMVKeAZGkOZblDaMIwjZndksTyjsw1jO5HmOaY7NTXFGqVmKF0BLoReYsVqs2RzFjMMU4C9IyijnKbpSMo40AsmiXWaOMrZHu+YlZos3XDsDngX0Q1BlUc6hLX008DLlx8jmdDKM/SeN6/u0TolLTTGNUgtMUYyDh1SNsRlivOGqq3pGpimmqwwJNmcYZxzf1vx8YcvmK9K8mXC5OFi/R3+6Affpa0tZozZPlZ09VuEHIkSxy/+0qe0TQ9i5HDYBkyOAi+acInraya753zzIQ93FUk2kGYa61KsSzjUD2TJiqG3GG/Y7u5Yr9f03Z7b67f89m/+y/z2b/8VPv/sjqavmc1zJuP4/e9+j9/8rd/g7MmCu12DEgKtJa9fv+b+/ku+8fF/lbZtqJsjzgY27o9/+t33Z2JSSL729V8On+1YMrQjHs/nX/6QIl9QzpYcm3dEeo6QE8fdnnf3b/j4o/8UQqbU3SOJhTLPOdzu8K5muTgnn+dMdYudBrJiybs3rzhUd2TFnKGvg5IehTq09rBjXgZYfpTEKKWQQhOlOuDTppoiXbNYrBncEPylX4W0zMD5eYqKMvrpQBaHtdvY9/TDkc3mHK116AUWnP6/EdvHIzKCONEcjzXWBl5unoZLLgT/rjiRApy3RErjjUWrcO6P00heJEzTFAaP0TJ1Fefn3+DYTdStIY4sMhoYccQqZexC2tV4uHr+Ie04cXP9jm99/SO6rkWdKhu/GoDtGIohJheeMVpoklQxTSHMmMQZQiQnEoBFaonDhgtE32CtDUQCGXbZURQxTgP5fIG1cyKlWSxecrY7IuUB54NP2NiYvFyhtGR33CGEY9o34fOsHvjRFw3L+Yrz9Ya+HYh1QppmjN1IWs7QqsBLy8PDAxBS97v9A1kWoXxEmma0dcusmIWfzejZzDcMQ0+azLi/vydWKc61rJZr5NwxjiOzzYyx7TH1QCQjZvMVvWjph4ZuGJACVtmMtmsQVnC5vKTrJ1RsUFphiZn6js1qA7MV/dgwTj1pmjGZkaYd0HLGopiz3W5J44Q4ioilotrtmM2KEChNE/I0wztHluRMJlBRhskwTUeaVlEWC8piQdNWXJ5/yPX1NV3TMg5Q5ku0imn6A00VLDRSh/pOpRTWG/rHPXlenjzXgmm0tKZlWVwyyyR1faQoU6ahwxuLVw6lPUkclETTGawLNrIsT5FeYqQNYdRUIEaPihKMVWhdMk4NpjesludIH8LBSbQAL6nrlnye0/UNwyRxXqE1KC3RUYISEZtNxv3dA5Ee6UeHMxYzWpIkRcdrXGcRRqB1RVPtmfTId7+75+nF11hfSP74B9/H25RvfH1FXRs+eP4h1zev6GqDn2rq9oHzixVV26Hlkigew/N5MDy5nHH/+Iaha7m/E2QFWDeSZwsme6Q3/0zEoP9kA2WRC7r2SDkbqKuOyTq+/ek3efvmgWGaMKbGOYcUmjhaEubyiKm3DGZglhfUVQdOYKcJayes64LvURZMXjINTegm3d5xfhaSWzqeyPMNx51kPl/inOHpxYZsphntA9VxoqsVRZnw+PhIJBZ0zUi8GLl+13B2PsNOjrtbS930TP2B5cKT5iWzcs1qk3I4HPjmN79JP82YTMfF2cvTDTfi/PIZX3y2wzjJzc0d1sgQlrA+BEwiGIaGptrz6Td+GWMGNpfBt7na5GzOZtzc7MnyCOM7bt9aquMjZ5cRn3/ekiZXNM2Bv/93WpIoYD7iYofWmv3WUZYZx+ORSMxIVEy1mxh6SZat+OmPbyiynHrfkeY5bet4uHlHkkpwCduhBXEkjiOm1hInmnk2CyvyrmccerI4IdYa73q00kxKINDU2xphEmBCypihaVDe8PxJUGT2x4bV4hJPKKEXSLKsIFcSY1suNyu6dkRrxTi1OKsYx4HNfB0OQSzz9YamG8iLnK6vUEpR1wND25EkGc5bqv2OLMsQLkV4SV21CGnJ8pK7mx1RLEliy+Kq5MnFC6r6gSiKuLk70LQ9KhLkxQo/OrpxwFlPkWfkmafvO5xvwE+kacliMSPLwkNuuVrhfEjInl8saA4DRbE8NZdE7DJBXdecnW+o6wo3Rtzf7VksVzx9/ox3Nw9EiWS9mYFMmPqJNPfc3D0gZeiYPrzZkpdzhDSks5Svf+uSw+FI/9kj1vIeE9QNex52jjybMY0e5wzlLMc6iZARSibo2IKo0CpBuII4g6bdEeeXPB63aCVJ84y6bXBmhE6htOZ7f/R9ynxGlkQMQ0fbVcxnq1NITtL3DXm2wNsVo39g333JL//Sr2GmiD/+ox8hYjh0O/ZfTFw/TvziL3zMT969IlZn7A8TBs2bx5/Q9DuM6JDJmrMnK5arDOEzBAlSwu3tljRdM5pbjk1Fma9ohiOHwxEhY5aLkpvbz2lbSZFtsLJjHA5YX6D1jGGsiTOFlwotYupDxycvfo3v/Lnf5m//rb/HL/3ykXn5LBzQWnFzc4OTD3z6rb9M3wWV3nrPoljzs8++j9aSzeop4zCglEB4gTeCz774/fdn4mp9QV4sOR5rohjyNOfu+o5jfcNvfvufPwW6vhpGEn785o+Zxj0fPPsG/dQjlQ7lD1HK2+vXRBksFk94bO5RNiHROeM48vhwzZOLK+aLFePQ4b1imARp4njz+R+BGXn67GMOzZ5x7FEyYTErefPmDU2z4+nTF8F/LgzOOgo9o65r6uM1TxdPSDJFPYUwlRABtXZ394aPXn6bKIqomvpUw+mZhuGEZiuCJ3k0eBHWjMJZtts9wofX61xYaes4RTgZlGql8ULhEMRxBM5iEZjJksQaJYKqZFxG3Q4IXTOLc/bbB+qTz7fISx73e+brM7xQ/PAnn/Ps8gIrHeLUkCWlRCYBn5aePkfOuVOQKazGp/6IccFaArwfIKXUodBAayCoq0opRjOQph6lQuDSe0OSpnhXI4UmjRM8nvPzD4munuCd4OlVoGV0XaB/WDNi7RgQZEPNdv+WLO2p2p441tSHjqRrcX5gli/Bh2DgennFdnfHbBkTCUGWheDiLA8Ys74dyLN5aItTHu8CSxbnUdqzXCxwzpLMFUp7ut4wdCNCRsxmC87OEobekCUlaRQRxSIE5jy4RAOCbP0BTyK4u7tjtVqRni41QguOdUUWn/jOo2Fqe7x2xFKRaE0vDLFyTLZl97ClLMvwuZICYwY8nqFp8H4kT1d0x55yXrCanTN2sJ6fcazvWc5XVFWH0Q6cZD5bolREP4V8w1ec3biIqasjUgYV2tiArmvHiubYBCyfM3gv0CpCKYUximbYEcUaKS2x1kiVEavAHpbCIwuCBzFOGadAl5gmj0ATxwmaQKlQMsVOoQJ4ubhkGBqUzEhkivWCaWqJo5xYQqxitvcHsnjBbn/Dk4uX1HXNaPeo0SIpAipQTczVU4bpgPQSbyxf/uzH3LzJUDKlqq9pmwolc+qjomldGIptT1me09Tw7u6eZaEZx5iz9TnWtOhoTqwyVBxx9gLmsw13929xk6PtYrTK/uQHyq99/JSmHuiHiuWiIE1zjKkRriONYb6a0/cjdd0yDUciJWjaI0IIilmBd2HwMMajvGa33XJx9YTJdIzDCdkigj8m1QmPuy1FUTDZibrZE8cG4yKyqAAGHu4eGMee7a4n0pqm25PqDePQonRMXU1onbJ/UOz2BwzQVnPmi5KbuwfSZMGxvOdhm+BFz3b7OVJPdK0hiyeEiNgdRu4fP+fh/kgRZxz3D6TJDOMkzkpUJBmGgEt48vLbSDKcdaE/Wy8wo+Pt2wNFtqE5HLh5JfHeMlnD7qZgHAR9+4C90xyqgaI4spxvOD6mlHnJOAgwE6k6p68GhqkKQR+tkD7cVg8Px3DYdebE/JNEKqapDMJZ4kQyxQppJYkSjMPIhA03fTxPLs5O7R0dUz+wWs1wPvRNV8fhtNLtGIaBi8sVWZbQdR1Pzs6ojxPOT4zDRLmImTqPIEK4BGfCmkkrxdCHB5WUIWkN4KVCqJgkEUgRsZwX7I9hTThNEzpWKFGilcC6lskNWKsoyhmehDRPeP7yCUkUh4dWmtAMPV+82nG+eUIca5q+oesi4ikPzRJJStMNp4E6dMP3Q433jre3D8znc7TWOAmT8SRpzmKZImRElhcIJWjqHabqyIuEYp5QlBEffnIJbo81F6zWC6ZpYHmu6fqW5brEWUXTKcpZSjbPUDJmuz0Sa00Zx8Sp4lAP3N1UpKkny8wJ8g1KK/wEAkvXDiwWK7bbLdM0MQyOumo5Owt+wXI1IHyMGyy7akc5Vzh1YLZe4CZLWWYkuaQb+5NfVLJYzYlUhnE9Ko3J44KszPEuputaJq9oBxDRkSRLqaqKv/sPvkekSxwt2/sH8rxAyEfGeuTf/3/VPH12zv39P8KRkMYbPCPKR0SJ4O6hIc0WDOaAVgld0+P8gMBj2GEdZFnCrnoAl+FxeCv58st7ohgiNacdRkZzIEnB+Q5rBTiNNZClc+rjkeeX3+a/8df/bf4r/8q/ymh2fPTRf4fXr27QqSeNMn78xQ/Jiomvf/0XmZwliWL69oC38IMf/EM+uPqYxWxGNwzYyQCS23e3GPNzJts3Pv5LGO8QOtSjumnk8fGacTzy4vmnIWQmEsapJU3XvH33M5Sy5NkqeEFVCqJCoPjp579PVhYgYpqm4XyxBGOo6iMPd19w8Y2vE+mcoTviLKhE0bYdQ7cnjSPibE7XBlqFNwrv4bC7Yz7POb98GgJA0Sk17yTV4cjtw4/49Nu/wuRDe1EaxXTjxPbhkXE4ok/IqK/YngHcP9B1W9abC8bB0I8DXobfO+ECu7Oc5cRRzmQDWUArh1YR/TgwmxdEiSYpc6RVSOew3tF5y3o5p5zltD+7Q0QRzlvGqQVy9rsty0VBMctJsgSrBU3bM18uGdqON29vWa9mp0YzGywjQoQ6WWAcT4OhTjDjeKKIpGQnIL+XnGD9Brx8j9fKspC0n6aeKI6YhvGE5wpKZhQrsjwOn9NTEUCcOrwc8MJTNSPjaMLvV29DR3qaIoQizWd8+p2PTuQFf9qKVNR1Tz8c6FtDP7R0/T4E1s4X7N7ek6cBd3dzc8NyPiPPc477PS9fvkREkOQL5mVoqItigR0VeVJwPFSI2KEzQRY5hE8Dvm1oGKcWoSKkSNA6oWsb6ipcJPJZwjT6sIr2A26wdMeW+XLFaAzD0J+KSvYslzlaaz549iHV/oCdHHYa8ZNn/xCwbqYZOHYG5wxpnhFFGXmWsrx4SnWwRDpjyifaekeapiwXZ1SHHZtyznHfh0HQWFSUhNBmHbBEsc5o29AwttyU1PsD4zAw9DVREqGFYBxbylkcKoy9JY0jvAi/H0mSQDQxjmHA7YdQcKIQpGnB2A+MvkNHBWZywVLlw++ZkjF+mvBCc+wO5HmKECOHfcM4lMRRGpjBjAjvyRNN2+2ZFWcI4SmzmL5ruTx/Tl1NeK+Z58vAXm5rrOnQyYAfztB6AtFTZs9o3TGA12lYrVYMtqGuDY/3Lav1jPky5fFhS11FdEODcyuO9sh+L7i/rrg8e8b3Hh746OVL8kXMsxcF3/+jz/naJ7+MMYZ37+4oiv8/DJRNHartCr8mmyWn1pOE588tWVawe6hYnG148ew59/e3RIlmvU4xLnzwDocDSkmkgKdPzvjgxYrd/ohWmq6bcG4KJn09gFMUWUYSS4xJCLS7mOYomPREVb/j7GyDoGA51ySp5HiIGVvB5HqixCDcE6beMck90zCSLgxZusSMPcJrqsOOrhGUsyDrj+MtZ2cX9L2hlo/c3T0gRUScKOIkoW8Hvv7JN/jsp1/w/IOXHA4V9/e3FGXG6mzFw92OcbwNB5m2LOeXdG2P1hkPxz1Tb8lSRRwXOLukbRu0jylm4XC5uDyjqWqmsUdayeP9PbPskmPbkmpBksKszFlvZsznc/b7A1k6Y78/cn9/z2qRU1WOfnSkxYziTLB73GFHifUtZbnB2Z40Tri92ZJmCUIqps4RyZR5OcNazzh0FGWEnWI+ePaMqnlgGI9cXHyA1ANpkoRe1qTg+Ytz8tIzjZ5jfYszmlivKIqMrj8CEmMhzROObcU8WwTUizGMo8Eah5CSKIkxk+eDZ1fUbQVywDvBbltTlkuQGU1rSWJBNx1x3jIMwYurI4/HUL27xYsjsS54/bbHuIbZPEZLj3c1/diwWCxohoaq6cMw0xvq+sh8MSOWE8fqEWcFcZzSK0Vz17BeL3n4/BXYgBvKsgQhFLePB8oyZxg6rHvNxXnGarHh3btbtNY0dY/1li++fBuGaaVI05xphOPxhiyTzMsldpBM3tNUNSbSTH3MWZfhnQnKiRNEUUoaxzRNxXDys3yFyFJacHt7TVEUHL405InBc8fYWUxSEpURkfGMxgX25RSqwxApZZFzrPZYbxFSh1S9TRlGg3Mjk+tw0tONE1pMTF2GTosQ8vItUaQokzVaLHF2j7eOrPDc398yjoY8K+jbXYBiRw47hJv89e8+nhqmQvNInufkiSPOK7ALkmjNci15uGsRYo1XO5JcI8gRMgbZYt2AmUqUjEmSEWMPIDW31x1/+S/+Dv+ZP/+v8E/+8Q/4w+/9Hv/6v/5vYKfgYUMaZL7k1evPOBwfuby8oh8PTGNoKDlWW169+QF/6pd/i77v2O0PxImiLAs++8mP6Nrj+zNxs/wI8IzjANYxywrevf0ZaZoyK85o2yORngE9Zpyoqmu+/snXSPMF1p3QHTj6fqRr3vDso1+lnToin9MOPYkS1Psds1xw9uSKcYBpNAhlEQLubh9YzGLKZQg5mNbhtGQcBrIpYzQHBjfSjYJcB16pF4pYKz778U/YXJbMFxdMo0BJR1UFGsd+v+XyYs5yuWQ0A0JpnLXEccrj/QP74zuemHOs8oy2RziBtyN2nEBMaB3TdR1RUuKlo+k7FnnMMHZo5bDeEKcJrvfIOMZiAjuziDlUDcY6slwxjj1SC9zkqaoDZeTf+yLHacJLQd+MLBZLvvtPfo8k/oTNZhEsKM6ghQxhSe9J0xOk34vQlnQqiVBKobQEB904nlBbX1UEThjjT6gxTVuHljYGg0AhpaZvJqpjFephBQRyf7jga62JogQtQ2OY1po0kxjTorTEGEfTmZCSP30lccRqUwIbrOX0/QjMBM6P5C7gqoyZmM812/2W7XaLdROHP/4p80XGbrfDOcNisQhEBqlo6iPPnz9n99gzm6eBzTw1jG24HGsrKdIEqT1eSHI1J8uiUxWywbmOptlydnbGixcveP36NVGUgRB0bY9QkrPlE4wxNG3Her0mXZe0bRuCmFHO3f0N49CjSMiSDCkhz3KG0SHHCKyn0HFQl40gTVcgJYXOOYyPRJFGGs9qXgQaxrEJYHMdVNrjMTTTtG1LtauYlwu6rgl4NC9x1pHFJXkRMw49WiuGoT9VvYZSBIdlXiahBKN3LMqSSCdUxyCOje3ELEvAwnyeM009wgcE29BZRtsSaY+xHUUx4+JyxeFwREWBv+ysDhcbaVjNCuxkER7iWCGSGDNNFIWmOg5MvaYol6xmYWsw9QmNeYNpE/IiZn/cBaXcaFQ0Mo5NQOIlgV3d9jX1TUMcFQyDR+uIeVkio4bjzpPEMWY6YIzlxz/+MWU247u/V1MdG4SHNIuZFTOk/GdrU/xPNFBuH5pwe2wG8sGhlEApjxeafd/QVj2JKqi7GuFEIL4LzWI+4/b2nsuzl2y391xeLinKmPu7m5B86h2xjijLkof7ijjSzMo5oBmHGiU1RVGw31VMw0TnWs4vz/FWEOuEzlqwkovNcw6HA1n6MV2/px8fWC7OcD6jjJfoxJHnc+pjgxkLsqwCUsTocYPEDIrHmx06CsnySDiyNEOgmKU5Ty7WFEXB04sPyMqCH/zgBzy5uAQ4+So71mcLmmZguZwzDoI8zxhPq2YfxSgZuJFpUnJzO2BGxZP1nONhot47lmVGlDiaQ0wZ52TJnDSfsdtVnC1XrFYLlvMSj+H5Jy/o+pGXTz7ks+Q1+4dHXOc4XwuiyFMWa2LZsJifc3gA4zuSNGY5XyJ9zLvbd7x8+TL4V5RA2RRrHM7DrFgzKy549vyKu8dX3N3f0laa88syoBvyJcY64ijDuZppGkmymCKfk8Vz1qszPn91ZOgNcVZwqCviLOb24RqtY9I0xUuHV544jjFuwnnB3cMtoxswpgckQmt21ZY41kxWgJiIY4PrHZ6gKAzjkdGOeO8oZzFx7GnqGuXCWkvqAeE7ynmBkwMfvDzjeGjZ7Y6cbS5AJEBgrc1mBeNoGIcBYx06sYxuz9mTImCjvMdMPWW5JClXjIPDE3Aju73l7v6ONM4wxmLdCEAUp+GhTMzx0GEMzOdnTGOHtQ7hJIqEJ5dPTweOxPsYId37lfdkaromJ43DTb7rDggP4zASRQkSFRpQyHGxoe8OzIoPcUPG1CuO+zcokTPYBsQU2HW6oGt6lHZ0Q42cMoRocYQUqpYlxsYYM+Gdpp96BJ6phyJZUuSzcKlMPfcPX3Cx/JDJ9Hi5I0oykmzNODjWTzR5sqAfBOMQlIBITzR1j1KSOFVYB3VnsEeF0gfSxFLVO5I8pqlu0ZGhnK3CA951SCtCWM9YzNiRRgu6dsDYlv/y7/wbfPziN/nii1e8evNjkPCtb32Ltn8kjiOcjZl6wWev/gEvX3ydWbng7jE8rOJyzh9+7/dJU3h29THj0GGnHqsKpIh4+/aHaMr3Z+IHV5+E7mVr37dy3T++4msffptYp+EsZCROYh4e71Bq4OnVp4wKuqYjdSCznNubN7y4WvDs4msc2iOzLGc/hjapm7c/oygdxXyDMxYhYex6ZuWKu5trmv6Op8t/DqEjvKsRXrOcz2gOR+r6gbPzMy4uP8Jpj28NOs1w40A5A51doZKUpjUYa3De0x9bvDOkWbjUG+nohlC9WuiIceiYz8MwhHQMQ4cXEZFUdMNA1z2iop6+y8iIUMIxDoYyDcq9ZkBKTZ4tULJDGJhczlHXLOYZkxHEUSiJEKdq2KYJ5ISyLNBJSjsMQWFMEjATjzdbvva1j4nTiNev3/LixQvMNOEidbKoKJTS7z/fYa0t319uQz4gWEyEUic2cfBEf6Vwei9CgNRrvPRAUEGl1Mxnc6RUgYfqPWYaydKYyU7EUYRMQm+28GDsSFaW9MMABP5qHMdBEXUiWMKswVkZKn6nCedUKJHwGnxGHIV2qKU454PnX8PaCWOCX7TvBM+uIsbRULf3Iacw7kndku3Bcn+7ZbuzpGlA5bVtT5HHSO9Yr8+Yr1dMZiCOUqYpoKuGsacsS2alYug9XV9xdnZGkhT0w8D5ZsWxqVFSYoXETTB25j22DwtpPOPpZYTwluMhIOHqZs/+MSj1sUjYHw/kpaIsc3Sk6DvPbL5CjoqL5ROMm3hyNqNqK/q+RyOYup5IxuweH8nSgjRKcNqEAguvWJXhub3d77DWni4qHXms8E4yX53h3MT+eKSIS6SaQkOrtmAckfZ435EmECeK1eySaQxMXuUnZouMthmZZXOYzajaPVoFpV84RdO1ZFmKp6cftwi3QAqLlB47QKQ0F+eXtP2ORIekdywdL59fMg4W7yaeXC1I05jjrsGrJ/RDwzDBYTeyOZvz+LDneDScXS55eHhAKRUEqI3leGwY7MB8tsKYlmFIcb1Bnz4bx8PEerFEC01WOmbLgkWb8PkXr4nkjDiWFPPiT36gnC8ukEKj1I5ZuWS73YIYaRuHF448Sbm7e2RWLjCjwmpJHKXcvKlYLp6j044PP/yE4/7Amy92KFkikjl5FGH7hvp4JIkkkcgoi4Jq31Mka7b7d9hOoCIQTiFJqPY9kU4xg6Eo5hS5oasl6/kM53uK5Jzj8chqVnLcDzg/8eTsOXc3X2J7R5ZNzMoneGkQasQaQRrlqGgMsnEnebrJedzeMJkGb+DxLmIrghn95uYGhaDIMuqqQynNen6Bl0eyKMdPEmkijvua880L4mxEeEekS8zkODzccbFaI+SCYdyyWV7y7NydBqSR3MN8GdN2jjj1xCoPBwoxZtDc3dxyrx548eEHvPriLYlM+PiDK8ZLSTVeE0Ur4mROml5heoG1tzy5+oTzixRjLOvlFR9+9AE3928BgY6hrzOyVBIlQd5+e/OG28fXVNUOZzLyIuX65o6mNpRzxWZ9RdUcidKGslwyTcGg/zDs2R3uSTOFE5p3797S2wNd5+maliRJqOvjewUgz/NTojnB2oEo1kQypKfHqSEtwo3cSUMkM8p8xeDbwCyl5snVObudYLXqiOMlXTdQ5A6lIqahxbmRPJ1xbHusHcEHXEOSRDRthVSQZJp4GvC+J4kU1kKalhgzUuQ5XdeE1J0Wp+73jmkM/jidwP32mvlqiRcDzTgSyZxh6lE6pj125POErhqQug2r6l2LcJ7lcokxI1VTQ71ARRHWHsiK/JQyDRBvZyMEhiROaJsDm9XydHmakSQ5h7FByhYpLWmcoMUlSk003Vuibk2WZczKOYd9WIGmeoadcmScMImgIGmbkKYpowmp3K6rMcbw5OIpTy+/ye/9wd/h6tmc7e6e8/MV797e8MHzr/HwsMWNFqETcILzi3PGwVI3B7zw1EdohWIae5IkOQXbAiLMWss0jKFuMwp8zTQpcNKQ5nPM5MkyR9NKmlOtZT8cWC5CGKFvap49veDxYQcu5r/9N/4XKPec119+xte/8YL//f/h95jPZ1w9/YDDYYfHE8clb96+4nh4y2//md/BuVBhirCYKQRqXrx4QZrmHPYVOgnDU3Xs2B1f8dHLb78/E8t8Tjs2J8+U4vFhT13d8+1v/zbO2RAGMANZmrJ7fGC1Ltmsn9BOA1GSokdQ6YK2fsPZLKNIV5Bk2KFhmixGKIRsubpaoJMlXbvDq5FZMWfsBkZTIeORJDkjyWa0j+8wMsOjaKqah8drPv7aLzE6iR06Yh0FoP3o+eEP/zEvv3bG5B1dX+EMIPvAMD0eubgMinw3Du/B9sY49o9bzjZFaCZC4pzBWIXQnqqqGKc9MxmRprNwARDm1FzkTxeQiUjFSBXjoz60N4kILSNmZcSh8sznS4yYkAhipRh6c/LGTQg03gmEVNhpDNWqdiRKIzabDU+ePOEf/L2/z2/+5q/j7IQS4L1lGEKxwlfrZaUESRQTz0rcKaE9DAPtOJxW/SGFK05d6l+VBIxjj7ehzlNrjTEjw9CfqkwtAoGSMHQtQqnglT1VnXov0FoFHE6ehyDX0BMpRawU1k9YGdLozmnwp7pVr+naFqVinL8/XTSDl9+PEVJr1ImzK+WAtWOw48xegrRI8QJEGFhffgzW1VT1NpQXTI6hrznsdtRTjtsNYRMy3OP8SJrGlOWcprKnz0pIvGutw1CnNf1QkacRRVlirKDIM+bzkvt7HwbK0/s9jj1FlnAtH/HeUxbnpzaumKpu+fjjj0mSnHdvf4aSnnEYeP3FIzpWpOUM6yQ319foKGO/37I8mzEv5zxud8zSedhU7Q/vPbR911OsSrb3O7xQlPkMM43M0pQkUUgipmng7HLD+WZBdezpu4hpmFgs50Qy4sWLF9zcvkMnGjsOCGPwWcxqtaHqtnTtgaH3GOlCq9ZQEC9yxmGPl44sTRimnkVRkiU5UZLStw0vX5whhaCvPHHs+ORrH/O42zHL57RtjxQa6ybyIuJ4eCBWEq0H0njN2XLFYhkoEX/wB1+wKDPyRLNaxaTJOd///hdszi44HG5ZLp5xf3+P0iBIGMw1RbHA2oGmchRZyf3uNVrk9GNMWWzoO4vWKY8Pez786HngDf9JD5RxrOk7w2KZIWyCnyKiZGA9n9N1NUkUIXPFNBhms5ymclTDA2WxwI8VUmV0zcDUGc6XF8EAPVqSPCHWgjyZB19JmiJtxJOzFY/7A4IYmXSUxQY77smTBUmSEkUaWwx4O5HInHJZMJoDXgi0nlHmBbOFBldRFAsUkjK9ZJZ7FrMYpSFNVmy3Rz65umCRX0Dcsz08MJ/PcL5BLM4wZiItcqpjTV7GdN2AcsEzU+/CMJVlEV2XUNcSpRVmAM3IopTMyp6+FcSZCOZeOpbLkKrGD5wtnnF9+zNcmjLLnyHFxOasJI5jjsdXjO2SxUaRRnO89+yra3ob3u/f/Sc/IMs1SaKI4iWzdc60XTFZhxAVCENvBubrgn3zJa/+iWc+L8lLkCp8iKM0ojr2SA/ToIlsiXUVu22Fo2E+WyHijn6yJGnK+jxUCz7sH0PLTA+je6QoZqgkJlYe1JGHx47Hxx1Cw1B7NosZ8dUTHg97FvOCm1dfcLF8QUONpUQKx9C3YOcMY1gvRLI41VrOmYbQYV7VbejmnpdYE/HwOJDoDJ08paoHrBM0hw6sJcszIhXRtjA2AoMIjToGimJOXR+QEryN8ZPCSgtChq7oLgwSx37CSRVSt2YiyyP6YSJOY6q2Jk9zZkmONgnKZ/RuwOGIIknf13giNEuKssMRBTpCP6AiT109hISkPdA2O3RkyZKStppOq6bAoURMRPGSpt8Rq5K+FpRpQZbnDGPHBx+BGedkeYy3OX0rmc0K7rcSM40gesb+wAfPvkGS5WRJypefveHqas3tdo+1EZvNmi+++AKEI400xob3/fbuju3uwOZJQje2WJuyO96Sziz321uEslxcfkLdPGKtY7dv8E6wXJ5hJs/tzY7Z3HO+uOSwr3DWk2YRx/2esiyJswQtU97c/ex0wXNMo0SJmDyP6fqWONaM44BQPfkso+tAyz2Xlyuu39wSpzP+tX/1f4IST7h9+AnxquB4PHJ9/T3+8r/wG7jYMe0iSGqyzPN49yUff3TGxeYCZzxinJCJp2oth/vPWZWSPN1Qtw2Fi4hXKY/bay4jxfnTD96fib0UeGMRk4JUsL27RSWOojjHWIkTgAlry7auWRRztHhOrCRjoxkyg3I9tv0Ry80FvRWovsX0ljSbaFqFGI+QLZEqYhw9DgU4hG1JZc1eJchizVgfUUkKQiMzzXH3gLITx27OpRYwWgZGsuWK+vNrXLsl17+E8SnG3qM9SAHbQVAkj7y4+iaH3jHZjkxs8LqlG1qk77k8Uxy6kno8oKeY3vRMwNjsuVh7vFFUVjEMexa+QCQx98dHssjwfFPS6Jx22JOKmDGegxnIZYurOnpbEs/WxK2lNgaX5rj7A4vUEscKdMLoO+I4JSPDNBHV9sjF+YLqcOTs+QuefvJt/t+/9/v82i9+h2masKZBqxwrNM4ErmMaSSbV04011niiKAGpmWV5WDEbw2TDqm+yE845xnYk0imIcJk0Y2je8i7gwb7qi4rTiDTPg2qpTjDw0WNdD04hsZgxeMm1UpjJghdhxS0dWVqgMo/3UfCT6wgzlcgoRrjsfeOVcw4zGaQPwRYnAzZJSk0zNFj8ezXWi1BnHOkEIQRZvADhSBJNUa64ePIRxoTXWK4CiNx7e1r7T/RTC8LR9COJjujrUNBw3O958eIF9aEliiy6OHltx9B69bjb4oylKAIM/nBoqJuW1WqD95ZymQY0U5IyjQZ7rEnjGWmkSVTP+Uqzr45EMiYtM+pYUpYl52dzmqYhkRHKeryHPMsY+0dyteL24ZZIF/SVYWwMkpTJKMzkaER4hq/OSx72W7zNMQISFbZly3lGrDUvnj1n39xyvi6IVIw1Ct1aNi+e0AwxdnCcr6/o3J6qGsgTxeVFyTQYPv34I+5395jeMIrQWIMAAQAASURBVGpHNEsw+4hEQDzrGQfF+fyC9dOOZKEZ7I7nLzYMTcVqeYmQiqq5J4486+Un1P090ZAyK2Lu7q9JszXWtXz08py8mPH69Vs+/dY36bodzX3HMq94fPdINl/zm7/0dX7y4y+Zzy6p/UC17/BSkC8juv4N87MVbgCbjNTDnqzIqA8jm8uEL998hhvTP/mB8rDb4xyUs5K+3RPFnijOyNMlOkpIs3CTnKYJKSTlvKfvc5SWTIPH2gD4nKU5eZxS1zWx1ihrKaKEyY7EaYBh18eJoT+Qxp6XHzyjqQfQhsuLK7wbkcqTxBFSBsyFGwVpIknTJzgrg/fL9/R1wotnz9nt71ByZF7ESJEQKYF3hmlwbFZrpHLstw0qtkwjeDcipMXbgKAZx5FUZRwedvSd5+wiYxwUnoa+GuiOIZ09K5YI5UBYJqcQzKib4A+NXUbXV1jjkVIxTYDXrBYznl5+yth2HI6PCOFJVhv2uy0qMpyfRzhfMo4Tw2A4HAZA0zcxm4tAxa+aimFs2e5TzCSYzQseHkOqME4tXa2RseHrn54zjQZnA+Nsc5Gw2z1yrGLyPA3omXHP2XJDvhA8PvbIuGcaJdaPYODxODKbrei6ht2xIk0KohheZIuwHspatvctRXZGuQoHcbnMOTzWVA8GlUT0ZiKbfY3Oh+HBO9jVDcVcME0erCLOAAd2Etzf3hBHYY3Tti0IE1YxfUoSxQxTxePD8VSRqFivnrLf76mOLVEsUcrT9wYpPV4T+IBdh0Az9CPCnvA/qQYU49igkxyHJYkEVjj63pCkGiscwzSgEn1SbSaklIy2oSxLLAZjgn9wNIYsSznW74jlgiiJwg0/NswXCeM4EUfpyWdT4V1AtETpqZcbAE+SpJRzgCXCJcRxQqQD/282m5GmZxzdA1I6+qHCeU03jDg/EEUx42R5uIeuf8c0htumsxOjG7h7qFFxwt3bPzhdHFO6oyNL59zuK6yv0ZGl7xbkc4kQMWn0hGO1o+sfkMoyS5/jraOpH1E6AZdQH78kTj3TNFEdIx5vH08A+IS+74mjgidnH3L/cMswdaznC6T2TM5je8PZpqTrK9bzjGny7I4j1gqEVSyXMVl0zhdffJ/f+NU/yy98+z/L2GX03SuKWYmJ4ObdgZefFPzSL/xpzJiACtxRbyV3d6/I85TN6jlt2yKED4GS6shqoVkuy9D8Mli6zhCtVuzurjkeaj795Z9Dfr9i8BlrEU7TtFuePXtGmhZY12NN6Pcex5662XFxlTGbzagPB3IVY7ynrVqKMiFfzGi8BOuItUJHEY+HLbvtHS++9jFCB06uEKew4X7Hu+vPyWZzVqtNwNsQ4dwEbsn9w1sQE8vynCTW1F2H1AlmMDw8XnN2tiLLQvOR8ISgg5Xs95/zbJ3RDw6ZxiijIQuK1MOD4epqiXQ1kSrx7oB1I1YYmmpgluekUc7tcaTHIYSha45EKqEfWs4TjxUpbR8sLToN1bR1fcRhkElO5mYYISGKmGUZWkVU5pHzQtANPYkcyFWGsooRi0gti6xgniXsrKGqO779nU/ouh1/8Ic/5Dd+/TvYIUU4mOwBGWkyIYmEoxsmpEpxSIZ+RHvPJH9e66l16KiOtUJqTewjJgwgcEKhdIz0hHAVAiFCh/zY9IxdQMztD/csFquTPUiitSJJMqZpel//mKTBFxonMaM1gZcJp3W3AzQq1u+ZmIpTVapSCBn+PSFD9WhZBlSOlBIZ6ffVq19Vsb6vRrX2pKJ7jHNMY88wDIzeM89LFAprPTorESesUhQpJOIUUpqYzV9w9YGjqo4kq+AnZ5J4J2mnAe8CHzRNYw6HA7v9ltWiYBpGjvtH8jTDSB8yEsYgjcdHHVfn60AVQIWZo1nRDyE08/Rpyd2NYFGu6A5vqLY1l+snTH1Cmg9Il+OtIyXjuN2Tq4iEiChyNMd7ojJHdTk+Nrz6yWvOz8+5uLgg8ooffvY9jHEsiw22nfjxH/0QtOXq6opZOadYrlmeSa4fe0gUv/yr30CYhP3+mvTDKKAS7x7ZuYlFCgc/cXaxIYvO6GjwZcRqViLkknpoQO5Znc94vN2zmb2Eoca6Aw/3jo8++g52anFiy9AeiHXG1z+6oG0mnsefYGzLuvw2Q3xkvgm4s8vzD/nZZwN/8S/9Ng+Hz7i6/BDJgq99/QPS+Jzbu1d0g6apHvj4m1dUR4kWMdImFAUIOTFblxx2j2zOz5nNc+LUYKboT36gLIuILC5pj23oOU5yytmSvg+U+nHy7Hcty2WJM56r52vsANfX1xTFmqdPrtjvt6c6JMU4BjZZURREkeZQ1xg7kegElxmUTABJHud07oazxYqhG+m7nsWsQKuYu7sH8IKz5RWrM8XQad6+26LTI0msOD97yjhVxEoS64Q41UgR87i9Jcsl3kW4UVAPNTpJONYVWkuUFPR9YPcdDxVxqRlNhEwi5nlCM90jZAp6pK33lPmGsb1GSUXXg/OKpj3iGamqig+ef8R8VmAmgXFHxl4iIhinPbc3Ds9AHJUBbqqhrm5xcuDDD19Q5At21SNtA0k2Jy0ytvtHsiTH8kiRzSmXGduHCi8lo99TtQ6pLJOvUCbHy5ayjMlLj5lUaBAaW7q+Jk4UH202GJ+y3x0RegApiOKYzdklSkOiL3G6Zxg68t7QNgPL1Ya67kLvshX84R/+AfP5HKknlMvp2oq40Bgzsb0/IPwc52qaW0leDMxnMyZjiIoMiEN5va3RScnQGiJpQg92kSLlQKpWWNGHwJAJ4FipR4yvQXi224ZyljCOmq41RLEgK3IOhwNxEgbXtEgoioxxnHDO4xzM50uUTOm6ms1mzeHYkiQ5T5484eb2FVEmcYNlvVnQti2Puy2zWckwhAR8CODU5GVC3exwVpLnBQgVfJFDAyJBuGCKHjtLnq0x44idBO1gMeNInGq8zVEyITUjQvSAQGmNjiKMsUyTxfuOdhBcXjxjVixpqyOPD0fwBe3xGBKrztAPDc4Z9lVLnudcnn/IZA03dz8iTTXephyqe5z15GKBpiROwkMriWK8DQ+gxXyDdaf2n60nigV3tw/EieJs85Trt69IxBGc5Osf/Sl+9vmPWCwzjvst2s/JVE4UGZSdUOLkETMDz59fYaYHzLQlTRYU8yXbwwNpFJMsk8D9nDwSS55qVpsFTdtTznLy5ClfvPpdfuvX/wKx/0X+z//Hf4+/8V//t2hMG8oN0jVfPn5GPx6YL875/7D3Z8+Wbdl9HvbNufpm983pM082NzNvWz1QKABVBAESBEn5QZZCZIQjFApbCtvh5g/xs+0XhR8sW4JskxIfKNmkQYVJAkRTqPbeul02J/P0Z/d7r36tOacfVuJCfnBYES691XzKiHsy4+bJs+caa4zx+74szWlMhe9HJLsUSxbcP3iM58YoVaKbBt/r8ubFz3BFTRQdk1U1CIHlWAhh2C5u8eIY5F+rFx1LUDcapEVZKN6c/YLBuP18FXUbVLClzWazoqgW1FUPpEBUDVgOtuWy3SQEgUNSO1RKEfsOlaqxlcfi7go/sHDcKULaFOkOO2rQOuDy/AojMvrdp7hOxGq3bhmzEoqiJCtXFEVJrzskSdeta7sW2Ahmizd0enEbUjQ1QhjKuqDrj3Ecm3S5weq/g+todO5QmxbxU6sVtl7huntE7oBlknyF6imLisNRiCUklhWgGzBVBdJC1UW7rtAsucXFGVoo1ZDnO4QT4bkuWbMhVxaWF2FrQe2BNAZPuqzYsljNCToPkcpgZI3tOginzya5xBIbbKvAtk7xQ4/Nastv/9YP+KM/+hf8/JNXfONrj2iqHVEToaVFXhXgwnDUZ7utsGVbUFsYatp9xSytsIxuPfTSplYN0raI3wLjjbDejnwdHFsgMGBan73tOLiOA0YyGk3ehoAUUkLVtOxmtCbLEoC3el/Z7ph6Lk1ZYAcurtc+nlWjkMLQoJHSoaxrfNdtwyZvx/JKqRZ9o2pct93LVEohMai3TFCl67bL6jhYUrYYI6VwLAvL9yGKqFRBVbV3W+x5SNlyeG3HajuytoXv95GO3f6dbAfLdhHCIktzkt3uLeezDau13u4Sx7UYH9wnsOy3qxEFZZbSJAlVmRKGIa5lE4Sd9mel1tSVYrfbYds2o9GIbJcwS9vEteOVfOvXPmK93FCWJbc3a+q6ZH88pahWnBw+a01elodWNsfHQ16/fk1gB1ycvSLqnvL+33zAv/7TH/L5jz/jm9++x6PTR4w6A+6uz3Bth+HQxfY0qszxQp/Iihl29ki3Z+wdDvAcC6TFYLiHH/ooe8uSAcqXDCLB/YOYynKo8grbH3K3uOP+w+8QuxWXr18R9iJ2u2uGdpdIdnAGcP1lgTAxn3/+Kc+efkAQnHD26pxe0EOVhm6/IHAnpNmW3XaO7fiY2mP/YMLr8y9wHAff92ElEMbm8HDCdlXx5NF7hFHNR9EzlsuCs8uXWMrw7tMTXn5xzqN3HrFLb8lTw+m9E7qdAYvlFcO9AEuGv/yC8v7+iGRXYnkx3qCD0YIiV0gKTk/HSO3RjzZk2Zq9/WOO9ve4vrrjZO/grSVhh7TLtoVewN7+ENuRFGWJkRX9nqQqYlzbw+tqpMixpMtuu+R4OmUy6JClG6LDKWVZYruCw2EXKSXr1Zwg6GLJhgcPW6yMFA5luSUKXXy7HWH7bttm78T3sB2fokxJsxV1o6nUmigMmI73yLKCylPcztYYnWELF89raGpBUczo93wO9qfc3t7y7fcf8YuPX/Po2UPiuMv17QVBx+X2OqAbP+CLLz4nS264vb6h1+vT6QlWq4r79+8hbZvtWtEoi7R4w3BwjNIZnhOx2SW8eP6a8fgJP/zxz5jsO3SiKUl+het0kH7W4gqEAVEinBIlU4aTAUrBcBQjrRDHcZjPWtVbC271GAz6vDr7HHAQxuX6+pK0sAiCgNV6xUZk6EZgOzAa9dlt1+zyAssS7HbpWzRDm0wM44iibBgMJjS1IVm5uJ5gl+5g3ZoOMB4FNzjawXE9SiG5LTOkBDs1OJWioE3j+f6WwLfJ0hKtafevlMsmO6ffH5FlNdIW1KpAYLFardnbOyDuSIoiw7EDpCXYJgndbhfHEyTplsgbIx1DXmYILCzbwrUchGzxTz4u88XqK0PG3bxNa69WK2wbtErIsgzXdgjDiJD2oizLEoSkqEp8L0Y4bsuHpGG9WqA1OHaI6xXYlkNdtngQ12tHjMooyrKmrCrqeke/v49ShqZJkEKijWG33ZI6DlLGCByKMuH6+pamzomDmH5nynw+R9qGKOhSVm0oqFGCTjSkrg22I7m6mbG/d4gWmu26wVIKz9Gk2ztOjx7QNBUvX7/k6DikrjMGgxCMg0UAJsPYdjsOlAKDoswLBt09bCmwjSLdLrh3NKXbGXGyf8J6c4st2gdPbnLisA8ovO4AV4IV2QjRw3E6zHcbbNcl7HTYrlJq1RD32uBPtz/Ety2OjlMC/4h/9a/+NT/47r/D6cHv8R/+h/8+/5P/6N8n6tjcXpQEVgdb+Ly5+AxExGBwQFat230zSzO/u+Xy8ktc7wnmnqFUObaxUVaN/qsusxpTlxobTZlXWFlCUa2I+mMG0+Ov7sS6KtFaIX2XYlfQFEuMOnrLOWy7QJaw2ewSkCm+e0qlDbZojUW2F5NvNmhvQ+306EUO6Ib0bWggSe5wKLGsHsvlktCxKIqaKtuSZjt6fZ84nL5l+YLWCiFssnxDUa042H/QMnNR7cuzUJjakBQLtK6IohHz7ap9ENg2daW5vn6Fvy0ZvnOPqs4pmg2irunLAdeXL3DESzx/D+OVrcnFVFRFhS0ks9vnHI4MWRW/tZZYpElFGOSsF3N6kxRl+xRp0n7OLInvQpPXjKYejQSlILBABgG6aqA2uH7N/ftTlDlA1QId2CANtlWjCsVwGCPtHYG0AIlt2SRJyt/5u3+Hf/SH/4hPPzvjvadTBt2I5eoWPE0hGsoyo98fU2QZ2pLgxAxVTaMNQS8mSbZ4odfibXwHRwrKxpCVGQgLN2xHgbZvIS359qdCYjDYrk1TK1zLxbLaYXgQxBjhIOFtoEd+taPZoqlAG6jsCse1MapFKmkapLAIHIHj2l8Vk3+Fc/qrsXZLXmkT9nmeA+2eZ9uxbL/WdW2kbIM/f3XXOZbdJqilZOL3AI1jtzuarmVRVVWr6/UsjPFompoo9EjzkqasyOqUugTp+KS7hDjutsEQO6Sua3qjCWXdJui1qukNezhS0OmGWFIjpSDNdm0hLQAp2vWmdMdgUpKlG9J8Sdh1CYTNfHmFSnsMhxaNVvi+zwcfPaCuFIN+h7wYUteKwLOIoy77k2OWq1sGvfcY+ZLio8fEvkNa1/z2b77HfHXH0ZNT7u7uEMrw/rNf5+LVOePxmMGoy5vLK7SSxKFPkpaMwg7peUZuwcnxHkYX3F6saFwHL7aY7h1i8pzVTYYyDZQ2nWGfKAhJ31wyvNehTi2Wdc7+ybtcvrlGdnxevHqJFEcs849bJujlhAenIzStbU3IF1jikKvrczrhBMddEndrHNvl7HnGyaMhb15dtKpOo4n8PcJQUmUhi8UlJ4ePSDZbpuMOVV1jH0kcW/D00TNM5WIhGA19HGljGgfPCekNA6TwfvkF5fF0hBk7bLeaTXJJb9BjcVcy2u+jKPAtm8lwCOaAKAgpC8O0H9Pp7OHYBmUaLi8bsGK6/R6e57DLUixbEQQBHgG9vs96c0fT2LhWj7xIGA87TMZ7rO4S7p+OiP0utTZk5RKtBHt7B9zeKcLeEWmaEnccykJyc7Wi2wvwPI9XL2/o9vust3MsPFarGZbTMBiNcP0QRYNPiFYORbWiKhNOH+zjBZr++AAtG5LNhr3pIXmRsL+/T7Jt+Pa3vsbt3QXP3tvDCwWCil/77gfM5rccTCfMbzN+67tf4252TlIqTk4H3N0kPH2vT1luyTKF41d4dMHtUTcNYRxwe31Df1JTqYzz2x9y8qDPennHZC9jcjTgZz++oD+acnh4QFX41GpNqc945/H7FJmgEw9ZLTOGo4i4a9HvZ4wHh9zNXuP6Hut1uydqWwGffPIKAN/xubm6fHsZCO4WO3p9lzyvuLm9xnNDgk6v7YI0JdPpkCxLcC1Dkpfku4ay2uHaHRzpU+UCY2yKphXY22WMlAWOX+IYnyZNcJ0RaZORkVM3c0J3QFPllCagqUDpAsu2aeocjMFo0e5PBiFNU6GUwXM7qKZd+LYtH6UUUccFEVEUGQjFeLJPXSlsD6qqoqkNNu3FWilFmmdoVb9dnG85aJUp0Ar8IMSxLXSpmQzGVHWNMG06UmkDxiaK/RZirQVGaTzfoijaQtG23q4hRHssFwlaS3odjzRZU6mGqtRMxsck6YL5as4uqQgTF7Com7odUam3HRNZoDUsFglxZDMed3CtLovbFs2T5QmqbNl3B4djym1NpWq0hlcvLxgMfQbdCctVgsUWNwjIdglPH5+QLHe4rsd3vv5tyiqnqDL296fMZjPq2uDHHcqqwbIE/fGYzWbF/HbOZLSHa0nwchzbaZOpeYrjWASOZDgc8/Llc0a9kzaUU28RWqKKEOlaCGWxf3hC2dSkeUqdVTRlgxt1sTB4EvKkJm1W5NmI9e4z/v7v/s/59kf/Nv/x/+F/g+0q3n329Xb6YXlo45Cla3abKw73HxD4Q4rimqYBR1tst1uELOjF97F9i3rVJqeX6wWv33xBL6rZ6/cxokXhgKRMKhbra6YH91DS/euCsszwAx9lYD67RoqaIJy2APBcY/ltl3c+W+JFkjCcUhuJ0QZpNEYZkuSO5fqW0fGTVnXalEir7VppvcPyHLSxEaZGG4OpBbbr4fmwSwv63SM06q0lxSHwY744+wQtS7rdMUEQsdjO8F0LgUWepLw6/4T33nkfpe23VpgKWwrKsqap7lC9GMcOWOdX4Fp40mOzLJHS5fDeIcoECFFT1ym25RIFXXKx49GjHrChWlkYv8KzbbRrkycZg4HNR998xF/8tMb2Na5jqLWLKQtWiznd4TWd/a9TlC62UJS6wXJc0tWK04dT+n3B2VlJb9Dq4lwsLFujy5rvfuOIbbPly7Mu2pG4pqIoa3ZpQhRp6kLx+mzL6ENJbHKqNCOOQ8IgxBUVgx5cz2/pRR7TcZfbmw1R7NM0cxzHML9LuX//BCMKiq1GWOatUUfR6HblyLQNSjAa129DdVHcMnuLovV6J0mCZTlfFXK2tJDS+mofsi0o271MpcRbzFEbjlKi7UCWpfqqG/lXx3Vdqqr6KrkexzFCGHTz3/7aNlhkRLtO4zititUo/dYK9xburgW27SLa7Qu0I4l7XaLYw/McAml/VQyXZUlj2qR8yyfNOBiPKfKyDS/WFaFn4TmCScen1+sR9KPWWvQWD6SVaMOLWfb2bnZASpqmoatrDG3SuSxTiixFKE3op6RpycXlGYvlHbbs0u1GYBTnl1ekSclg0OPw4ACNpqwvUI3F6ckx8zRFC0lebHn54oIPPnzKyekez8/XvP/0fV6+fkNaFnztW9/m9vaOz758ieMG9LpjbFtycm9Atc1p0or9oxG2CNhsLnh1/SXX1zmhG3N7WTPqjzCmy9G4T5M3lAYmvREjmfD67II4HJJXK15/9gIZGoxZM4xDXD9i2DzC94eYaoglE1x3jmpc7uYXoDK64T0Wd9f0+jZX1z9msu9w//T3yLMEP3AJfQ/kgI4zxaYBW3Dw6ABb9qiyHVmVcnTUyiuEKBFSU5cOTw+/R+javHzxGtt26e0fUtY5i3nyyy8oPTfAshxcxxDEIZ1ewP7+PmHkkBat4qvdTxzRCT2KosF1Q8KgfeOpspz94Zgg7jCfz2mU4fToPtvtlrzK2/FRXTPq7eN6AiFKtO5QVQrXtjh6uM82ueaLy09x3JCs2vLknQ/YVCUy8Hl1/pzT+4/oDzx+8clz/NDFDwXKlIT9EqwK2y9xHIGvGmy3wNhbgmhMfzJA0HBxfsf7Hz3BEoY03fHR/iOEbZEkmk43JoxsuuKANF+j6LJJ13hhxHHXZbk5wxIdhLCIgjGb7ZzTRzGD3pTyLzYcnAbEPcnd7YzpwZD5PGUw7KFFQuDG5I1DXqRYVoMbu+wdDEm3Dnm1oShyXDmg2xUcHx0z6j6kPxzghTl17nJzm/GDH/wAxxpye3tL4EZ04z7rzYybi7plVr3d5cnyHX4gcJwYdMRvfu+7zObXgKIxc6oyo9/vEPgdsnzTjm6PBem6NXD4nmL6+ADX9Vit2ovkYDphvloT+ALLKrFkSBjauH5NVUaUVUPs2hjH5vVZymQw5N5JQLI1FGqBH3tYddQuiTuCQb+DFILBoEddQZalbNYZlqPwA0NZ5kjRQtGj2CVLExzHw7U7SMuQJElLATACISV1JdCmBuyvTBib9Y4obgHBaZbhOW7LYWzawiwIw9ZokFYMBwOiwKExDY4jmS+XdKOYQjVv7RslcRhjTEmabSlLC8+NqQtDZ+jjWILXFy/ohAMQmrvZNVHUYbde4bouq/UM15KEwYCyzrFtD6MNlrRatp22CDyPqiqpipJhr+3Ml4nECkOgINnUDEcjilIR+wHb1Y7+oMfZ6yt8L2Q0PKAuNyxuFigEg34L3he1QdcWnThmt9sRBy6R57LaGKgF3bCFi9u2RMoI2/JQqqbvdxk+GJKlW+q6wnVBlxZRHJNXObPlml6vx+3ljsO9+ww6Y7KsoBtO2O42+F5IliVEfsD8eo7alThYxHGX3niA7zhoVSL9iCzT4EUkScP3f+sfcLL/61xcvub8zTX9QcTh4QG7tHmrMatZL7csV885ffL3qXU7EZGuxhEud7eXhB2f0fiYXZJihIvtKMqk5mZ5getNcAOfpM5RVY7n95nfrJitrnjywfcQ/60HuaZNNnd7fe6uz0Bqer2WpWpZIUqVmLqkqhMsS4MIKIqM0LWRwmAa06oZHRspfPJ0i5QgfIv1fAZyR1YaxqMpd+sLctUQhjFJVpCWM4Tl4Qd9iiJF2BZaNzhCsktWSKeh0xtSVi3ypi4z/GjI9eVrdvkdg/HffrseodG6QVohu3SNJWycuIcyFXWlcKIIVMN2u+D4JGBysM9mEZOVOb4jKZXElCWH+wMePQq5Wwp46WOZVrerBawWCV/77X3CvsUmTRgHkt1ui+P08KVivTznnad9ZvOMwG8gsPA9G9eJyFdzBn2bbV4SRiOUZYEpcB2LyvIokgRHKMpSooyLoETKloixWRR8++vvcHxvwh/+Z/8N84MRf+93/haf/eyHRF5AXiiEL0jrLd//3lOqtOTi7EseT+7z6vVrnuwP2Wy2HD7c5+HpIReXL9gU17z7zlPWixXbZMEg6DIddLClQGmFMhrbNEhdIo0i8lxq2yLuhFSlwaii/b6/ZRdKoRFYhEHQjqKddoytlXkbiGlQiHYSYhRCOLhuu/Ljux5KKaqiLUK1MTRVTZlnKF3j2e3um7GcNqUuHRTtzqKUEqP+ek9TiHbqoPwSq253MeM4JnADwjAkikOEMLhOa40BCOLW5CRES3TxPA8/cN+C39tUfJplaKPYbBZtIrzW7WhfNswXa4SwKPKKsq5IkgQsSV60L/1COFhO23FFtQYkZSlE0MGRLoPOu0SDp0g8hFBsdtc4TUYke9Sm4eJ2QRDapMkCx4754sVzws4Y38u/ern7N3/2Q5Zrxf5ohCUHHIwPubu95JPbT4jjmEf3TtlsE9bzW548fsRP/vINoSeZTPv87MXn7NZtGEiZiPeeTihWGZ5jY0jxIgtkTm1B5YTMCsXegeGDw28htaRMrthtDN7AZj4vOJgMyFTCcntAP4ohtAnsPXpH+2x3M4b3fo27xYL9yRGO71AWEKXf5hcf/5g4/JJvfOMHTMYzbi5vmYyOWc6uWS4Ve9P73Fwusew5tmPIFxV+1+XB6UMur3/Mdlly/+QpTW4zGNyj19tiu4Ks2iGxcez/HkbedihbiXyxZbjXpdYN3Z7dkuK1IQx9tBIICrSlkH7FcrsmydfEUQ/LralrTbZZEXU7OI5H0+Q8eLDHZrPBCTrc3NyQZmuGnSFJonDdLlW1RjgltZBklcKLQvrDEfUdpFXDze05RbYl8C2u7l6w3AQEHYcodri+ucF1XaKeAbnh4Pgdkt2avcMD4mhEXec4XtMaBZyAXzu6j+1pNqstSrjkZc1uucKWfdI0wTQWTZ1xdDxG0uH86gWD/j62FOxPnnE3n/Pq4jP29vd5dPiQ7SpjvU158sEDkqJhtVrx9a9/nazYANAb+EgZAoZJcIqwa9bba7yg02opiy4fvv+EL1/8hKenhyhdUpWKMDDkWWsxCDybe0f30IXHutgSuhMavSboOOx5R7x5c04QRNRlQl23IZRa19hWQBRGlGXB/dNDlvOc7333gCyvmc2vyPwtQbyP50ZoscSSFRIHy4Ug9thtczr9DhN/wuwuodsXhGHYBqh0yYPTEcpsEaZLXUleXJ7xwf0+/6v/0fv85Y9+TiamfHq2YL/f497BkJtVzmbRdmf6/RFNVXN4uM9qtWE49vHCijAacTuT3M1nRFGIJGrZYoFAa4GQGt8PmC3uiGNwPQm63e0JI4v1ctOCsF2P8TBmsbxttVw17LYrel2FYxs8L2zTzl7MJi/YLncs6ho/CnB9j7KqSGlxMdv1lr2DfTA2u/Uazw/I0oLdcsN0OkXohkaXb1OOhl63R1Nq8sQiDvdI0iWWgJwS1w/xQx8/FW0B8nYZvt/tcusUdKMpczXn8LCPUXB1dUVZeFSl5uBgDyFrVFO21hbVsFtvGXXGJFlNupvhWC5x5GGkYbfb0uv1cI3Pdp4Q+C6PHzxml2wZj4f43rB9iDUwGY7Ja43jpghjoZG4QQeta0y9putPCCOHqqrxA4fQ7b5Vpd2yP51itEVVbcFY7NYFruvQiXwEremkyA2h67LLUvJdRa8XI9E0taYRDYPemHU15/f/zr/NtP8dzi8/Z9Ad8OXLT7h//z6B32e+ukIpxaDncnnxGqV2TEePyfMdILCFpCgqFps39HpdjLQpGoV42wFa384ZTlwG+/to0WDqBoygMZr15pZ7Dw6IgiGqLv8/7kXn7fjxbv6a6f6UKO4BNUoXCBRpUpDlM/aGQ6TjYVSF7YSIRrPZrFnvrjg5edCaNkxFpWo8bbOY3bHYXLM/fkaWZdiWRfU2Mb7ZrLi5e87BdEIU9lgXM6QVYNuaIs3Ybq/wPIfB+IBaNTi2j6rSNvxgKn7/7/0NhvEx23RLWZZIS7d7b5S8/6GHZEpWpUjj4GIjHQuUxWK2JG/WdMMHuLZEKbAsQ6UU477L8f0x/+rPnyMYIzHURuOFHmHikZaf8cOfOJTFfdJkhzENwim5uJjz/d9+l+9+9x7/yf/tFZ1YUqsKUWmE9BDCtOEE2cOyI6SwcByfot6S65z9kYvLkOUuI+hW5GUBVmvSur2Zc9zbcO9owg/+5g/4p//FH/Hetx7y3g8+4sXHP2YwHXGxvCIcRgSjPov15/SnEdFA86x/RCfuc3fnEHo+0k55+vSUySDg/skhX36Z4B9MyfOSbiXwHIFt+yhteHwy4NGH97m8vmG7q/GCGMv1qFxBWSikbBE/vt8GPqHt8kHNdlN+1a0UWNSqDQEp5FsIu2oNL17rg7YdCymdr7qMFgLXdRHC0KiaqihbvnC7tklZV6i6xEiJ7XkMei3jsLX3GARtaK8sS5qiIq8FeVqwXe8YDHtUboHWNZ5vvdWAGrabgiJTFEVNWf7VOL998fJ9/y1Tt+3quiJC/hXD+m1R7TgOo16XvdEeWb5tqQhpQdMYHNtjqWuqRpFnO7TtUlRbwiCmako836ZpSqRw6MTHBJbT0hXQINqXpU58vyVrIPB9SVNuub1ZoasU25HsH3WIQ8jKK3Szj+84LNZbPM9jOBzj2CG9aEy+LXj0YJ/56oa8qnG9iMFEY7kO+8fvMB0H1EpTNxk6N2ySiji2CYKG2rJ4+s5DxErx2YsvcWMfPBCBy/H9fRyZYokGOy3Ze3CP6/kdeb7EMRFGhRwOD8iyjOP9iNi36PWecXt3w4P3j3n6zjN+/vlP+dM//a8Io4Dp5ABThzhWQuArdpsZSJdH7xzx2adfkmUFbmR4ffYl19cXRM49Qm9AVt1xfvMZruuy2aYMJkOubs/pDWL+ux75//tLfnV+dX51fnV+dX51fnV+dX51fnX+v59fFZS/Or86vzq/Or86vzq/Or86vzr/f53/ziNv7bYpVp1VzNcJfuRzdXfBbHbL5GCMR6tP9MOKu9s7lC4RykfaJat0hnQMdVEzGIzZZBviuMN2vSYrdu3ScbEFO8PxSpJ828LEdwuE29Dp9zm/mqFKG9drsTDT0SMWiyuqasH+4RQpTcv/8wZgFFWdMp3uY4TGtsdoLbGk5vGjJ9zeXWI7NfP5Ds/tEkYOti25uLxpIeCrAtcxZHrGYOoxv71C+j5u7CEbzZvbGZo32K7H9fKKKApIVyucUOF4NkoH3M0KlK6oig3kiroyhJGL0g1VnXJwOKbINNoUDIcDVJ2x2y7ZOxqQ7rpU5QV7exb5bsvR+Cm2l7Nd9yiLW7pjSZGndKOHZPmKJCnQZk0cHHB5e0bU1TSNS12VOI6HwMJ1AvIqp24q6trjycMPeX3+Gav1NZ1sTODutbutTclylXLyoEsUnPL5Z89xA0le1DRViuva6CRjudowngy4vL5CmJjesIPngyGgUQVJMacXT946bzX39ib0BzWNXfEP/4O/wx/+56/xJByfHmNUDZQo7SCtkuubM3ynz8ef/AwhNN2oR5pUQMluWxL6I+pKUVVzfD/CtmLScg2OoqwaxpM+jUqpirrd54xsLGFj4WBLD9+J8X2fhVpglKQTdSnLEs/xyfIVOq0Z74+4u72j1+3QFIpc1dgyZLW8I/A9dKPRTcV4MGS73pEmObtdyqOHT9gVC6oqZ73c4LoS25H0B8dkSUFV5QjZctwkkjDyCAOPMrdwXJesSFFNy7Mz2uDYLlWZohwXx/fZ24uoqxt8Z8rx4ZSi3HF4eIDWEj8I6HUN19fXTCat6ccYl6h0iAY9HKG4uVzT6w3o7Ek2u4zR4JDe/Ygyy1nOlhwcHCGNxpWCoOMT+wFGWxhVkO40oWvRqBmu7WOaHr7TQxtDlqZEwRCUwbEltWrwvbe7uFlCXuwQODQ6o+tN2K0rthuN0C0OKYoDXEeTmho/iNCmdYmHbkRaZfzGr/1DusF9zs6/YDw65tXLT8nSkr/xg9+nqjdYUoMNZQp5Nuf+0WMCP265pjJHMGSxmHG3eMHJ0a8jbRdtWmWnaiJWdzdUaotwHYwAoWwaoylUzmJ7QRRDrzeiyP96QV1aLgpDsdtSqxwZdBCW00KmdYErHdIkJy+WuPYBvu+jdU2a7ogcjyxJSMsVTfOIvGywUUhpoRpDkSXkxZbJ9ASjG1RVY9ntHttut0GTUpVDhHRQKNDQNBpTNizW53Q6DbbToWxyhDBI4SGk4uz1c0znjOm7fwONoq5LHLfFyuySJd/7wbd4/bOQ87TARbCZzZBxTJ7dMTxYU5YCxwnItnmbZrZtjNFs0zWff3mDUhOUqBBNjRQueVEiheG9D/v8+Z+CalqntlEVtVxTVA2ul/Pxz36BaYYkuy39bguorJrW4jOdDPji0sLBwbcbtCWRVoBIHRw1pxIhjTdCFhaOrTBKoVSJlDnvvzfl6s0tw95v8MG3XvK//d/9E/6n/7NfZ3oyJHJ9TNBhsneILg17hwMWtynKzpiM9uhGU+Juj2HPZTGfsV0n7O8d0jQNg14H2zI4jsJd1kgLqqrF/xyNfdKwYuNlqMrgBgrLAxB0wgFZ3q4rKVUQxy51XdIfdlBKsX8wfgsub9PVWVqgTKuFrJWirpuv0uFA+2vdqgCbpmnZzha4rgM4iDiiaTQCCUh007InlfkrFqVmu16xMe2IXDcO0mrH1hiJUhrzFntkWRLH8vDDANe16XTDr8bbtuPRcQUdWdLU7Yi9LCqULiiyog0wNQ3GSmlUhdBtGBfderiLuh25W3jYjqTXG2DbkrrJGe8fk2Q78irHpJpGOeS5Q84GrRwsMaLRG7A0SoJtVSDdtwi0ANuKGY1kK06JLYTdwxdNa3tSEk9WpFVGsl5yPS9B+Oydvo/vu9xskjYQKgxlnXE8ntAZHKEbQ2SPmM9LnE4bhvr052dM9o4QkWZ9e8Ogf8De4T0+/eQznrz7lNkyhyyjNylJkhrfC3G8PndXG754cUEQD7g/HtLUJUHgIWRNELXfIztMCCwPYwSb7QVQ4NgalQ0wFIw6J4RPDtmlKXvTPrtlgdBDHj0KuLq6QmDzi49fEgY247FPGIeUhebXv/1bXF9fM5u/odE76trFsQMuLlfYgeTk/hE///nHv/yCssoVRbYgjByMNqzXt+wdHNKJT0mTAuEYtrs5q11NrzukLDyKvKQ2ukV/hCFx1GezS8jLJaOj93CbCD+WCGzuFldIKRnvPeD6aoYdgt+L2G0zbuYLilrj9QSuPaLX7XMzuyOOBnR6No4Lnj0iywp22ZoHD084O9tycLDH2dkZ8aCizkP6g4Af/eJfMh4dMr9bc//BI16+uCSpHRzjEfY16+ScnBLp9MHkLNcFftRD4dFYFavtmjjqI6VDluQoXZGkNbWRdMMBDgVlc8N6ViGEheM1xNEAy271c7q0GHVdXHuI3fXZpvP2AhSSo8P3SIoFTrBkdTsj8LtUjWEw9NFFl+nApxtOWC5nGOFSJhndYMAm/xwRuyzXnxOHA1bbBf2xZH6XU+Yp6yhGN4owchkMxvz8k5+9tR5FFDk0ZcnKesn11ZwwjlnME7abkjDMWsboLMcRPp24JnCmJOmGYd+jSFK6/pDldoYlBmyXCsuu8eyQ5e0NFB0alQOgRMF6dco//i8z/uP/4z/H8jS94QFJolkuN+hGY0sXU1lUVOg6oRMP2GULdtsdQriUeYPRkiTfUVYOOLBJZmjt0w0NSockSUVdV3SiHnm+QIqEwBtT1ZBn4AU12qxZbCvcwGd51/DOk32ePv4Onz//hNvZhsHEZbXdgmiom5S6FFh2Q2A5SPeAuknwPYd+36GpLWLfh/0+y9ucIl/RG7hkqeTBwyOC0GWzS0ibBsc3rGcLpoN9tNbczl7T7e0zHPZZLySDASyXBlYNgrq1WwiFIxpc22G3u6LbDWmaACUbkvyaXq9DZ9hnN0/QTc1idcXD068RRAqsNdulQxPkBLZPnuQ8eHyCg4vvhNjWSyLpMNwTiLqDIEQgsGWf9W6BdCU2Np4bsbt9yaAXUpUKVTtUVYjjaFzhYkyNb08wUmG5CegIo2IaXbOe3yKMJOjYaJO3gHvRw3Esjo6npHmBJqVWFmHkEob7OA5UOehGMEtnfOOj3+fewVM+fXnJuNejUSnnb77ge9/b4+ThCbsMSmOwmppGOWTFDH+wTxQEFFWN0S7GM6wWS6bjCX48Ji0Lai1xbU2DYJPMOD58Qte5j24MtVbYnsBoi+u717w7eoAfSKryr1PeyBpbuix3CZWc4bhPMUgaDUJ4KCVbPWlYYVQHZUTr+JUGEYXMVh+TVwnS9bGNbDWwpqHJBjSyYHw4xkgfJR1qJXAtg3Bc1rszbM9levAejWmwlINlC6TlcLFdIv2CMD4EQnS5bsNeukHlcLU5Y2gFeE5Arkr80GBUgNaGSt1x/qrkNr2HhUetGxQGCoEM4J1HU1arAYtbjbFspJugcwvXCCbTDkHPxwtucQu7DZI0ORDgeSt0PSFLG4zY0agIoxU6sTBNxt7RmOs3gk2R4kc2eV7id2OqdMXh3oTBqMN4F7FblygbMBLXdtlsFrz7O++Q5QnyqkH6LlWhaKRBZyWBlHT3JMuiz3ox5/u/9jucv/zP+Rf/5Yz/8f/6HQLHxV0vuPz8DZ1Bh8PpgNnLS0YH79A0NbfzC7JihZYHvLk+52jvMWE3YrNZ0d3vtUVM0sPyE0Dg+SHG1Fy9eo2OFKfTJzS9M66Xa4JujcozHu8f4cdjXAt+8YsLPN+nM/C5eDNjdPwR57efUBd9vI7ANzZ+N8YpHaSdY1kOVW2hTENRt0D+rKwwJkaZLZbVQekKaXeJAh9hFNpycB2JqRtSXWNqjWokoraxLEUQSRynDZNaIgRZohpDnlUY0+oupWg5nXne/j8oXVNWGfUyAy1aUYHro7RGUWBZDk2jsYRESt7udIrWRiQUQkYEQUBatPvISikWm10Ld7dyHMdhvUvwPAfHcfCCgH63w77v43kCRI1WAkxDUWToWrPZipYXWkrqWpHsCuraoagayuKW+aamLHPyymp1kIBltX+2Nq15yA86aLsl0CyTApEZHCfGdUSr8AxifvzpiiD0iGKfn5y/Igg93NwmSRKOj4+ZnsZ89tkF/qBHaiqevzjDjyI+/+wTbCmIOlBlDqtlypQYIefcLa9wLci3V2w7U3yvgy5ixn2PbboG08eyB/RGUG52PLzf7oZvdjV/+aM/4+j+KY4Xc7d6xSbJUa8tHp4+4PzNZ5y9EqRFiRtULLd3XMwKPvrgXVzhcr28I9+4ONqmzEsmkxPudm94ffGc/nBCmVfMLgSDYPzLLyjDsMN6vSYKh0gJUbhHr99p0QJ6ha40w/6Y1WZLmXn4gUOSrHHcgOFwyjot8UVKVrtgh1zPZmQ7mxja5XsjyNI52A7d0YTVdke37xB1YurK0OgVg67LZpmy2q4o6iv29x7QVD5h4HF9c0UQBAynIVmR43pdPv7FJ0SdDmUZYJRDsnWRok+SrSjKjDfnZ7ieS1Gk1NzgNccUaUivH5CsOnQHPaSl2q4VBSprjRi29JjP7+hGPfYPjimqOflOs5mvmEyH1Mpl1J+gyHFcwWx5h2Ubht0pZZESiAlNA4vVgv7IRgNd/4DNZkfTpNiO5qB7j3LbUFc76myNH8TcZQVh4NHrh0i3YbOdo3UfZVwsWbPLDK63ocoV1+cX6Cak0+/xxYvnxH6XIDhidpsRefusNlckSQxCkVQVUmqM0mi9ZToeslmnxJHAtlx8P6QsYDjqMZvdMh73qFWDxYDlcs6zx+9Q1hk0NUEYE3chdO9TlhWT6Qgp4eLyNbvtkigYUlQdgsjBFhG75IxuOGK5SBFkYEIwPo3OqTJBN+6x3ZRYTk2aLSkKiePZYKU0lYttReySBVj7FGVB4IVok7CcrbFFxHAyQJcu00nM1nNpdI4QFh2/i++7dNyS7WpGXmywpOL0/ojdLiXfGo4P76NVTaYT4mDIYn6BpMPefp8kXbDduoyHpzTOksCPGT88ZJdc0u8esJglDDsRYdCBpkZVSxrT8M6jZ4jGbx/gpebrH77P8+fP2ZZLuuIebuDQG0nEa4UtBFIKvCAgCCTj8TGGgiJP0Dpnf/8UV7osZtdEkcPx0R6PH3dJkw0Yn9AfM7wPquljGWiqkERJqizlw496/PTHTxl0FZ7ls0q29HsR6VqzSmf4keTm8grfDQmDim7cwxINnu3je3YrFBhPSZItVbNDqQTH6iI54HbxgijqoK2UYe+4DQiYO5bLitFkyt50ynI5p6hyXNfBdl3W24SyXNPtjWkai7xY4wUuRyffIV0PuLpZ0Bt41HWF0RZ1XXJ8cojvB281lTW2bIHq6+0l7zz6iFoZlK4wtC7q69szOv0A1+lgORpVSaQFm82GpLgmaNqFfqEEjgsIQ5JuMdaCuPMtGmWom78O5biWAAR3s0sWyxmu4yFoH06mMTRCs95eUlYlYdyhKAqMqLFlTFVVJNkdw9E+vjtpO1BN2SKr9NtQj3HpxH12yQbf89Ba0VQNSreGpOOjh9RNges5KK2xbZft5o5OFNLr7mNZNsb2KaoK2xKUVcrt9Qv68ddBB1RqR91IPMunrDK2K8nPfnpBf3BKQ4PUFZ7vsN0lnJzcQ1VL0q2gqnOCsEOR+y0UW/rkaUZVeDSVg+N7VPkS34soqpL+ICAMesxnZ4ThlKZR2JZDnuVMJkO++c2v8b//l/9P6rrX4rgcjXAD8iznu7+1zy65Zjk7pN/bo5EFTdPaqBqdcHUzY7MG1zmkERrH8qh1hao1Rm65WhYsVlMs12Oxzfj+3/5d/vA/+c8I/lOP3/x+xNCyOT7qkWYNy5nm2bNvUTYFlpI0dUnXicjuLhg6Ar9cktcGQ0WtIC9KwnBEPBojpMBgEFIQTB02XUnhOzRNh8O9fSJfMewXpKImu3vNwO3yzrGH8TLyHIJgiyeuuD86Jstu6Uy73G5rZJOiOz203WW7WWGJiiIH1wswxiLutC+bZaVQDaAFWXlDuvExtaGWFZEbYEmD48eUTU03CnGkQAgHVTfkCRTVAlhQZjUGG8txsD2rnTQajWksfKdDlrfyBI3CtjUCg3Q0NRm272Jpty0c37rQm6aiqtpk+26XYvkWWmuKosS2XTASKe2vsEUY662WucZxHBzX4hefvMDzPIwUeJYhCvv0+iGO3U7DgtDGc44RUmF7NcJWuEriAp5WaNGjKEuKomAkxFdSiqouSJP0K2xTmbs4vkOaKqRopSuqBo1DowyW5SEjl3We82a+YLcTDGXM5nZDVRleXL7iz34UozR4TobrCIwqmI477LZ3+J6FupT0eyG2W3G1OEdKiyzVdLoh2/UtxSuPx++4ZNWGoNyn25mQZHconfLxT2bcP3nI3WyD41rM50sGAw9dr/F9n34UELmSKofr888QpmDUe8D+nuJm/opR54DQXTK/2NLvTDkYn7DbrhAiwuiG67vXCGx64Smxa1OlWwQe/c7ol19QViqhN4jaEZGWLJZztskO17URtoUiJ81Eq0syNWWpicNDjo9OWa/XWNayZdj1Yyw7xNBQ1wYjKmaLFf1xhGVPSYqKbfa6ZUIuFf1uRKM7eJGgLFuQaX84YD4vWa8zxpMBQnhsdilKBygaXDchyUr6o0Ns20JaDbtqjTA2nX4PpRST+B7XN2dEMUhh0+0dst3UhCHE4YAyKcnTivHUI01CPNelMQ1GtwBWSYPvSQK/w9Xll1jaY9TvUqY5Wb5mMDhBSZuiar2llrRBO8S9ijIXdGIPihlGn1DVa+62Z0hp05QVND77o2Nuby9JixTfd6h1TtQRlEXG9q4gVxnj4QnScekNQm4XcwbDmNibEjmvKbMTlvUKITWBZ3P/6AnKWmB0ipCCp+88ISsSNpst+wc9su2O6fiA8Z7LarPk4aMR3U6fPBWEsWQxWzGe9IgDn7rJCPyHeG7AaHhAd9Cw27nYBy6Xr5eE1j77j/rUekbVlFSF4P7hO9RqQbJco5qCuoxxw4RY+JRZTa/Tss5uLxL6g4huv4dvj5nPFxRCsV2XLLY3HOw9YbtJCaM+UgbMZjNCO8BihyMUjlUQ+yOcTkh/GFDkNTJ0cUNNVRuqSuIHFrblURUZw4FhODhluZ2RbCWNUmyLnF7HZ9iVqKLL+RcXyKnF43vv0dQay2mYXdcMDvYQ2rA3PUSYgqvzOXGnS+D57E8kZbEmrRRlWjLunqDdHYu7NdO9Cb1Bl+7ghDCEwPU4ORwR2AF7x122f/JDbKuLEALLEuxPDwjuB2glWSxn7I+PsEX7MK+rnIfHMcf3T3n5/BVRGOLSEAVOi4QpYbde8+zxU+pmS3LxipNDH98MOTnc0QktbudrHFyS7QqjPKKgJi9Set2YLF3guArP7WKMTdw9RrgFr15/jJflGFw2q4Ju32OXzAmDAb1RgKX38fwtednasXZJRqMtgqjHbDkjCDy0stjtdgShJO4GdJ0BUg/Jsmts1+Fg/E3+8P/6T/l3/oePiDoh8+0NkT0grxOWyQUPT/eQdKhNhtIloddlmy55c/WKb3z0O20hq2ss6VLkFbezF4zGhtAfUZY5QgQIIUiSHUl1y8g8aH8u6hxpt4zT2fyaMLLoRGOqUmPbf40NqqoK1wmYLS4YD0cMe3s0qqSqClAS3w/Yphc4oU8U9kC2KKgWG2NxfvWiNYc97LWqRmNhC4esTLm8fs7+wQijnRbJpP+KQ1iz3swJ4hqtHBqVIyzQusWulcWKJEk4mgzQWlGrNuk6mkz5+c9+SmQb7h89Iy8apA1N0RBIl11ygXZfE/Ue4wYeqihQGESt0bri+dknHBw/w3FMO75UCdrkVLUEa8t3fvOEN2czDDGO2z7QpXRJV3M+fHdE6A8w6qbtzmoBb+Hu944PmS1vWG8sirKmrjRStlOtpi7pjSvWa5DEIBpQNq5jENpgOQXYGU3Vx3YcalXgSBfhSZLlBtffst4VNOIRwhgKDcN4yO/9rd/in/3zH3N09F3GX9fcLnKW2UtC5yE9AbezNwz6UzzPptc9pqlXNHmNsST9TpfL9Y4syzi5d8hmU1M3JUJohGybI/vTfYwPkbsmnAyotaAQDcHgAf0iIrddLM/Fdh3SzQ0mTbi3f59VuiGwFd1Rj9tXL+jGNovZkkX+kjiOKE0H23Vay1YZtiNu35DUDrd3F4ynRy0034R4rqFGIbBYJzmi9tDuBZbxuXpzgzE5lghpmhSBQ+i7OLZHEHpIaXB8MKbBsRwCL8RxLYzR9PUY1/eQjkVVFV8xMPOyeDuWb6hrBQi223Xb4ZSSqq7oDmK2u5o8LwjCHnXdKj2NgrJqwe6KvC34hESJiqJpx++bxRrPdzBliOGc2pQYY3DsAEt6eL6DtAWOafADFyk1nt+6uYWwqBqDagyWMFhCUGbt5Cz2Q4wxYCSe5+MEkCQJw2EHW9jkVYlqFIEfkWQpVVWhtSb0g7dQeU0UxfR6reZSNC171g08bNvG83zquqYfHtONQ7brHbXJcV2LbhSR5znN9pZaCU7uPeLFi1fc3BjyJOXsiyt8L6I/DJjNzujGe7zhCsdpMVM2bouTKyrssMODg8fYXqsync9SnrzzlHjg8sf/+s8J5CGjk5BB5xm7VU6aVBwdjBFHYz794hW9vkutBZtVSb8TUTdb8kQxGQ6ZDnu//IKy03VJ0wLVtDsVtqPwYxswSGkolKBuchwXNAW+1yf0prw6O8eIFOyG1TzHcUqMkRR5w3SvT1mW+E7IapnghwFaOaRpgevnqAbmywZpbbEFvHmzwAs0trdGaYmFZr56iSViHE9TlBuEJdlsM7IiZ9CfUqZQNQvQXdAV3U6AVi3oNfI7RL5LYRRaTajUx8T2HmUicCxNrXLmNz62M0KKgqaumU72cKwBtgjZJTeslgnCMjQ1NKqgyCVhGIJMUUWHXbZiPH7E4m5J1N2wWzRYbkq+MliW2xo4wh6pymjMDVo4JKlCyZ+R65qH736du8UZea6IezFpskabhk4Y4lkBvh3gSoHuuRhb4QLTaIB2Y/Yn91im5+z3R/T6AUVtIQk4eObRlAHSu8KxDymrlOPjY+7de5/F+hUGgWv3wHh0Y8mb2/+GwJ9C84B7JwPSbMVulxKEHpNJn7NX10wmHV6/fsPjR/uoygXxirpwCLwILwavO2S+0SSrJe8+ecr5zQIaSbYcEg8MQdhHmTXDUYTERjQ9Hr3fw7YMnThAmT12uwnrhcKRDePBiDDwMWaHrrt8+MGU8/Nzqsxmb9Jndrdmf/iQn3z8z5nu9VB1B9VsOT68h5ANr16ccXh4zOMnR1y8mZEsahzX5snjRxyMhqRpRscLWaYZ9+6P6UU+D46nFHnFer3meP+QbtdDm5S6MIDg9OGU29s7HK9pESJpD8eueHZ4yt31Ctv36N/bR3oOdZVxsD9GCs3x/h4HR1Pmtxs8RxC/8wzvao6hxf8MwpjG6RMPXZoyZdQbEXgRtm3jWDa2lJR1yr3jexTpks5wyocfPOXV2Rf0+4fMljOs2uGDDz/iW9/4GmVZstsk+Mdd9veOeSeLefHmjNv551S6QaF4+vh9dDPm8vpTarWmyFPW6xVm4vLBw6fc3Ebti2OzYTjeIy92OIEhzROirkWS3yKFjbRcimaDMhFRzyYp2s+1sTRlldDp98izCiUyPNGnZs5yt+Tf+lv/S/7T//M/5m5+xaOn99jsdnhugJAO6XLDcn3Ge94zbBmRFFc0taC0ct6cP+fDD75Gr79HkiQYIdCiotxWaLklzX08P6Ymw6Dx3A5XV59guxZxdB/XizBWDsbDdSLWmxmhP8Jz+nheSFHuvroTHcehLGt2yR1h0MWSPmXZauekLUjThE065+nR15FWSJZv8L0Iz5NkecLl5RVP3/mITidmubvEtgKkcNglS7bpinv2I+JowGJ1hee4OI7Dap0wu73gYe+UbmdCWqzwrbBFoDkhV5dnaCmYjh9SqQIjWl5mnpf4vsvBeB/XtmhESl5VSAtqlVAUK377b36T5K7L3d2uNUhZGlv6GJXxg9/9Nm5QtapIf4CWKY7jUeWSMBLk9WvWKwN0QOZEUUSWKYRx2T8MefH8As/vYHsSjIXttODtbr/EdQbM5iVOPybJUpRUCClxKJjNL5jftUicutniuEOkrVjebTm5P2A6VSTzEcrIdkWlqMC1qYqCZ+/2KJDYYghIsCHJcp688x5/+eMf8Uf/j4959uT75PJnlCpgMnXJdtf4bsns9gtcJyRLCuoywXNt4iiknp23BW0oqbKcZLNh7HlYVsu4tS1BkVWguzgevLmc4YeK2XpOGI4Zd0LuH+5zu1pRaIegv88me43BMOr32SULfvF5zagbEvUUl9clrhDki88YTT5kmVxQ5Asmw2ckZYmtfIxyOD6JSLJzsqzhw4++SZopOr0hm22CbTfoJif0H1DVO5IkI+41FKmL67Yg9N06p64M2yLHsT1U3aJ+PEeQFhusysVxY8LAYbHbYaQBJHqTUpbtS1tjNJbQOLaHlJLppNNC/uuaoqjASOxuie7GX6GRal2/fXFugenaBGjdYpTMW/akbdtEcYBt2/gmx3EsHDvE8dqXj6Jo3iokuzRKvt0d12R5gm2/LejiABBIu+2E1nWJEC22qH4rq6jrjGxjcCxJvtnRNA3aGFzXZde0kwfHKOpGYURDz3NojEYIiCKfIPDpxx2U0URRQFE12LaH0rBerymKgk7PxpIjpLERxmMyPuD03jOyLMXxJXt7XwflYYmKszefohvDaDTCD6dt8Z6WZJs1rm7QpcJ2PfK04nz2hij2SZuc8WiK58Jnn35Ksm34zneecnN7jqy6mMIwHsaMuxLL8ihrxbNHz+j29rEcm7vVa+oiJ4w8VCNRylBW619+QVkW0In7zOdzqqrCtm10rRGWg5Q+tguNzvHCgO1GEZiIm9kt0tJYjuDyYkkUO3iuxmgfW8bcXO3wfAg7HfJ1zouXP2TQn9AJT3CES5avyIsZvqcxZoCWJVXtUpc+UFBWJZETgmxNBX5U0TQCo3zqcsd6NceRA8J4QJIk+F2HxWLGg3uPOHt1zrh/j363y8/P/w2H93068Zg8NTx5eMqnn/0Cy3bpDgXL9QWj6IDttmGxWNLUBVHHRhuB6wUoo3A6AVmtieIJUegSdODmHA4PHrDbVJRFSthx8OwJiC3aNJjGYb1bYFSB7w64uYUwUHR6Do43IqtveHP3CWlioc2Kfu3he3u4dslwHPLi+Rv6vQ3D4YCyKdCNwAkquv0x61lB1IH5znD/3jE4OUbtYUVzdqucoFOw3/81Xm6e861v/jrL2ZYXb36IK0+IIp/rmwti7x6KO4Qao82ITGXt3mTYxzIeTx68R5ZVvKyek62HOJbLcGBjNff57MUZ3/zo+3z2/OfE8Zjp8Jj7D+/zIvoxtjpCHdjQuAw9OLy/z717H/Bf/df/mIePDsjzGelWM7vZEPtdHCk4PN1nvaz5kz/+M3739z/k9maFbbt8//HXWcwTpoMRo2GNaXpIoejFNo6VcXqvh1YOqhK8//gBN9cbHj8+5dn9YyTtW6anVxxMRvQHMcP+hNiLUcowGI557T3n5GSMKGo8K0dheHD4kKS8ZP9ghO/1+OFf/Ih3P3zKdrtl0p8SuT7a1IS+jy1denGHwltzdHSCsTW7pKIBNss5Lj6T/hBVVsSBJNmtmPo+gnZvCaNxLEld5GSmaotJ30UIjWVZ5GWOlDVxHPPhu9+gKm4Y9HuUicuH732H9XaG7x4jdMXt9ZbjUx/PhyDoMLuyWC40nt9+/3qdRwz6+2yyDUaGSEZ0Ro+4uPiULEl57/h9ilzx8ed/QRBEjCYdbmdviLshPeFye7OkVBmR5TAad7m5u0U3Na44opZzhGrdwQZDlqV0uh43qys68YgqF2TlHYo13/nmv4vvHvHzT/8Nj58+w3W77LINylQEHc3d7RzHhbh7COLtqM9YCBy2yR1Hx11U0+owXdfBcz3mN7fUasPDB79OpRqMsWh0inH77NI7Op2YTjR56yEWGN12/dabGaPpANfvUhY1jaq+uhMFFmW5w7IUJ8eP0VqCcJGyNQotkg1FURC4I6TwcF2J1q2p5O5ijsBif/+QJF1jjGptJ8Jltbmm24sZD0/QGqTkq6DG9c0b9vZHnN57H23aQivPc1zXZ7dLqcuMoNvBFiFG5Lx9bqOV5OrijN/57T8grTrUdQ7GpqlBiIQ8T5hdTbBVB6MMTZNhh6C1oakUga/50z//ETbP0DpHCoMlQ1RT4UftA3O76eIHDqUpSHYVvhchtIMX1lxflHheF9vWGEugVIHrSLqdjL/44afUxkJXOYG0MQKyrORgmIOA9dInjis0bYAlcG1s6TBfvWR6MkLrLnmRg1sjXYERgjxbARkOI7LGw3M1lmORC4+kTnGQ5Pmaf/R/+YT/6H/xNa71l+hc0IkGVKXH3tDj+npDmW/pdDqs1zfYlma92tComuODQyzfZrI/ZP3jc4a6VeVo02BsiRP2mW+XdMcjAjfDkw1SeNidmi9efMrxyTukaoulXE7vvUchc0K/g5v7nD7skxY56AX204cIETB6/H3OXlzwG/1DynrDdr2lKgV7Bz2SrCJJYDwdMz6+T5Y1rNMdi+0ZdhQy6N97a8WBRvYQQjCeRlxc3hLHHZrKo2leM4xgXx2QpEuqOsW2Hea3a+Kgi1GaKl9wPt+gtcZy7JZ32ygc6ZDX+i1TkzakY9v4vscuaZnLvtcGeBzLAWlhlMJyHFzR+rAbpamqBqVLXMfHtR2w28/KX4WEVFXS2T/GtiXdvo1rewhj4biSpjBU9Q6l3nrLG00UnFLXrfe9rmtsW7LZtSsrrmu3BWPTKiHLsuV/2lZbgFZVxXa7xfcDNrsc2VhoAONi2W/H+UbjOq3nfbsrmc23vFFzLBtC3yb0PXrdDpYw3Bv36MV7iECSpiWBH6NUjdKtFKLbG1OWhtLkWCbEGMX7X/s+RkuyLGMwfYLRNroWuJ6gqtpVI2NqgrEmL1PqJsfapLx5dUmvPyLPSyyr5C/+5Cd0oz793o67VOLYcHt7i9aa4ShiPNnj9dlf4jgWB9NHXF2eYVtu+1LtSaTz13feL62gTJOKNCnRyuA5LsYoiiynqQscVzHdP2I2v2qXcZ2Gszcf47gdJqNjqlLhhg2NKambQ4SEveOAN2/m+J0x2lgMuxMi/9dQunUeB96QphAcHN/n5mqGH0TIoMayDKvVirCj2ay3lPmY0wf3SDZ3bBcpo+Ex6zzl5OAeYRhze1XSjSz60SHnl19ydHhMkRtc22Ob3LJJzhnvueRJii37dPs1n335Q/wIdtsCp0wp85rr4ozA7+L7Pqtsg9IB3c6Y68sdfuBgvArX6VHqjO1ig9js0PWAoOmhVMnJyQl5KtilG4zxkXaBLay2iK3WBLZHr9O6zOvaoRFgi5jz69fEfRtpQnbbnEAG5OWKxWKH5zkU5Y7txsUKLe4fH/Hm+S13ZkFe17x6eUUY9Il6Q6TVEPiS5dLHdWvyXcPVLsHzoSw3fPn8DC+usZuQoK6Jw308a8Q2v0AIQSR9fCzyQmPHkp9/mpDnX9Id2uyfTrg7v6MfHpJtDbfXP2Hcv8dytuOdB0+5ur1jNvuSgbjPpHcCTU043MeVI4RaU4mMzz/713zvO79BWWX40wlltWN+d0fQLXAslzqpiEPFH/zdDzF4NEVrlKirHa57x2Swj+Uc0DQOl5dvOLm3j2g6DPvfpakzkvmc3fKWB5M9nh0fUlQbZndLHCvkD37w23z6/Cf4vsuo2yOK97BsQVULHD1kND5E1xo/kHz66efsTSKeTp7h+S6z2x3HBw9xKk1gFE6oWC5XPHz4EFXV7UWcXjLueUS+pFI1y9kc1/MJbZvbqwWh9MhMhi1tjKqp8vptwSKwXQfb8phOHM5eztqPrG7YP9pnMS+p6pKwV1CkA169ekUcxvzi458wmezhuwF7Bx3Obmd4jsur8ze8Oc/pj/cJvD5Z8Ro7GKPXMWl2SdxxePnqFsu3kU5GUV4Qxh5+cEhtvsQPIwyGwiw4Pjzi4uoWhAei4WaWsCuvcUOXPPeZz+ZkWU5erxgNLfJyxXprEcWtYnW1WlOrLransd0S25PM5ku+89E/YNR5nxdvfsh6s+M3Dt8H0cLhPeFS1Yr56hXH9w9w3AHb7ZaqKvHckPV6S63m7BILx+lR1RuSYk0QHzNfXCIcQ934SAlFobBdi6Y2pPkdXiSwHUFVZxjjYNmaqsrJ8g37zgGu5aFN0+6HvT2u4/Hm/Iw032LbfaRl0eQVja4JbZvNZsXp6SM8t0tTa5RWeK5ACJv5fM7eQbeVAeQ5XuCghUGpViwgjSTw+hRF1vqeBdRVQ6N3+JGH0DFC0KakLQcpYTa7ZTDwODw6xbZtSlUjLQ/f9Um2Ca9ff0roNjx4/Gs0dY0xDbYD6UYx2R/y/nvH/PmfLLGlixA2QgryTOH7Gsta04l8dqui9YOrEmErLCxsf410arSSaFMh0IRu9HaVyGM86vDD2zV+6ILIka5NVghGk5gPPzjm//5PfwG2hePYoCArFDor+I1v9+h0LRrjkBYJsb2HHagWkL/bMOzsePPKoSwPsV0LbJt0l2C0TX9o0xtE/PwnGVFHtJ2w2mDbsL6p+B/8/a+R7hr+T3/4E/6Lf+Lwwbua2/VzOsN9VFmz78UcPXyfy5tXzHYFUdTjbp7y9P0P+fLLz/j01Ut6vRFRYDOwW0uOEAJLeuR1wXpxQxi06wDLZMt0fI9aaurljjgccT27QrrQ8Qacv/4CJ/Lw7/Up72omhz52N6TMLDqThny95Kd/8gWT/pTrHZSloK77GJPy5ZsFr56/4d7xU/q9fdLbHV98+gUHp6dE9YD7Dyfk9S1ZCXm6YTSeMuwfcXeXczQUvHzxCYf7X+PDxyN26TXJVuJ0Ooz238dyXLYnW/LdjlGng6010tGUVUMQBGRZhutY1GWFRCNsgRcGNI3GcVqjD0bheU6rZkwzKt2QZyVV1bDZ5dSNIskqdkmOZ0m0tt/uaAqEEJR5imVZ7NYr4jDiRfoKjIXrSCwBUSjpdWOqUtOUDZlqR+patSKDVmP5dh/TsRj2+yjV+r9938W2bcIgIPQjyreg9jZIZNPrTdpRvmyLxyzL0FJQFQVKGZQ2KGWo63bnstfrYeya0WiAampGoxGhH+BadrvD3DSYlcFxPCwtEcbCttr94yAWdLoBfglx5LfTFVNQmQbXaSjLmiDoUQQVq127aiPdEbbTro84xsP3B3TslEflkmSXobVAq4w8W6Iqw+V8Sdw33N3uWC1rTu8f0OlFXF3tSLIK6eTMbq9QpeZg7zFGJKznW3x//5dfUApjiOOYuinZrldEcYAwmm7cJrE/+8Vf4LkBttOGJo7375NXNXm+ICtW2FaA5ZQ4btbuvQUPiP0hi9ktvYHDdlXQ7fkE3hikRVa9pNvv4siQvb0RZaloqhA/NMQR9LtD4iClyBXpTtCYim5/DykcpGUoqg1CalxfYmRCmQX0ukOKMqeqNsQ9j12ypihKktRiMrFQ5pr1MiLyTlBmzS7dMBw9pttd4zoVqtHUuaHbGTAcdHhzfs7TZ49YbS7Y7Vas05KDgxM22S2BZ2EHBeutQ5Hv2rSzcej2Qnq9Dp98+iOGgymz1SviqM/V7Wv6g4CsTqkKw7R/xN1ixnR0gB9YnJ1fE4w6JHWLExFijyDoss7O8IRANwG3l9dEccirl3cgNriM2C7mDKL75MUG2/PxnD5JCp6ryKsl42GH7XZLb9BjPDrg7PxnnL9e0Iv69HtLymLD9377Hj/8V2uiIOSjd3+DVy/uePc9gx8KKpXRj485+PAxn/78M9ZFwWg0ohsNsJyaIOwiGg8/LKmKGQ/29qFOeX615vSpT7XrkOiQg0mH0XCPvNgwnyVkyTkH+z6He8fcXWumU4fNbkdeGDART+71GE9cLt7MKAufyPGxvAK8HsP3v0Fv0OXNq0tOjvZIkozR1x6y22ZEYQ8pLV69uuN73/sIrWyU0Xz3a19nNI7Z7lbtG2slKTA8e3iK5UjW6ZbtJufwuMt3vvOI29kbbq7mGKF58HBIuZ7jdlzCaMiTxyFhbLX2CwV1HeE6JUY31LXPg8N9zi/v+M5vfIc/uv1nHO712dSQbHZYtmF/OkDrO2y7xVHk6RrhjOj1Yw6mR2zS1/huyd64x/lVhWpgtbvAi3rczmz2Dnts8ldoa8qPf/Y5AEUqwc/x7TGXi5d0eg6UAavlc/b3eqgAalvTOLDa5fSGHptqTbYLeP3mDb1+iLK3xKMO/mDMLlsSdT1Gewfc3J4xPXEZpl8jTRosJ8NyFZ2RxWYdstws6fU9VNOQZ1v8sCEIXYwpiSKHqk6pG8NvfPvfJbTepW4y1us1VQH3T/dRymBEhsWEslSk+Ws8mnY83Rhspx2N3dw9R7Fg0P+wHWMJQRQHVGXDbHGGJR2ieEClSlxPYhqLbb4iK+9wg36rcLPBaKtNei6WVPUWz3kPz/PI8xSl1Fd3orbAmJqmqbDoYIxBy1anJy2PRlWUhXq7x20QxqKqM6QIWKzPGE8jfLfXrgUUW/ygQ51nDPsRWdEjjoYUZYI2Gt91UEpwc/clfrfm6PgRWbVpXzsE1HWFVhUP35lgmqDdf3M96kZTV4p0t+UP/u53MfSRQkItKbKEft/hLk8Z7+fk9WdoPcazK0wDVdlq8R6dRlT1FSgfVbu4ToABtMpZrta8/0RSlYJGhUSBT1YW+KHPblMxHDUo1aCqCDsCywopVIZjh3jOkrpK2a0llmMhpMFUBsu2qeotjnT4+GfXZPm7DIK20MjyLUJPMBSUzTV52qUbeNS6RlUa2/HZLjOCqCIpduhmQtmk2I5ANprIi7jJLxg+dIg6a/ojn7/8y0s++Ojr5PaKYSR48+YNdZWi9iF/qzEscgdL+hij6fU7LNYVP//Fl0zGIQ/XHY6qGikEfuCz2Mywjnu4XkSDRtU2rhWR5nNqZbPLVpT5LX58wMJc0+uDLguuX78gUVtE1uHhwTFvXn2GG0eUZs77Tx7Sne5zef0ZSbLivae/Q6NS/sUf/THf/M3f4/rqC+6SG25mCV5kkxU5tcr4+OevGQyO2DueYvKSKplxuy0YD98lq2/5zgdP2e1WZOuCJk2IRzG1UvhBijY1WqX0hiFZvcWQMwoegyjJsahtieX5eKHEcSWXV+fYRYrnBaiswnEMYdAmtiWautZUqsELQrJiSxzH5FmFMC6+E6B124mvK0VRVyhlCL1OqzeNBwgDjbDo9Rx63SGe29rZPCdonzV+2wU1qnn70qOwbfk2xKXf/jeNbbc4L8tyWsOfaQCDH4TYVoDnO3ieTaNrPN8hSRJU3f7+NMmxHclqtcJzA5Rq150CJ0IIi9Z2qXF8jyTNkdJGC01WF7jSwjIWTdW8nUY42E6AIz3KpCTbrnGRZIsFCBshNbYDvvQJwl7bFS227IUxWkmENGw3M4w0GG2Tz29YOha+FxPHR2hTYIjpj0+pa8XBQ9VippSN0RZClLw8+4Sk0vRGB0z3Q+piy9mL18w2W/b3e4y7AS9fnf/yC8osy8iyHEtIXDeizhVCgC0k/W4HQcVgLHACzasXd6i6Ior71DWMOvcp1TW6HDEe99E5ZOsdnV6H8eB+iwwSNkoX5FnD8cEz3lyvKXWJNFtWm2scx2c8PGW2+oIgDEjTHENFFHuty9mVuG7rPu4OBtQVBIFHVVZs1zWht6PXmVKUGcbkLFdz4l5EURdUqkYaSVM79HoOZVHw+vJzjg8fsdndEYcD6lzi+SBdm6KomN1etf8gLz9GWhZaF1jS4vNPz9HWnIGZMp2GSB2Tl3O0aHc7ml2FMQrfmZDmd6y2G7ZJQX8kWZddirwiSQqUFVBWNW7gUmQWvd6wdV9nCUJmlM2cJLnj9J09rq40jsix5BJDQRxPyVObm9dzIs/hrPiCs4u2a3Z4ss+gP+Hq5hrHb1jNOtx/ZPP85ees1gu22zVP3/mAMofF6jmWUfyzf3JG9/AJ/njIL15+SVOuOOz2sMgZeh3eOfo6P3nzp0ymQwIvIIoESimEJdib9pjseXzx8eccHN1nEEK6rDmYDtBVTbULefTuA16e/RxpDuh3IwbdHqOVzcHkmO3a0Ht8g6o2eGIP3enR607xA4vF4g1H/X2CaIrApzec8OLNl5yePmndp0Ofcf8UVX5OfxDiyB5uYGO05Hd/7++x2l2yTRIa1bDKMtI7hdIVs9mOw8NDVtWMMOxi+dC1Qx4+fIjnebx+/QbbNaw3NU+ePCHJr3ly7yOy+pY8D5gtL+lOplA4aCfFdQSBGRJEsJglPH4y4J1nE4zJ+Qf/3u+yXRWsn3/B44djDsbvYH56RRjYb/VlgveeTZkfu/iuYDIMCVYjbBpOHo5oyBCOofcoJM9r7j0cEHctfvyjLYf7D+gPOpTFnLTIiYcPWc+3rBQcnR6iCo1eCZyOj+fVNHUOaL72/rucnT/Hj+HBySndKCQexKTlOXVuE0UDvGDDZnfHcl1ydLxHthvixg1+Z8tscU0UjdgmG6J4jONvqDLF++8+odY1i8XibUenoswTbNnlvcd/QOi+x2r3gnsHT/mzP/uS+0/6PHrnmN3mLYLLhavrW4Zjj/v3PkRRIhE0Tcvj2+yu6A99Br1DtGlHWNK4NKpBmYyDwyM8z2sLfash8ofM5xfkxZrj8AnCxDRqDcYgRcx6vcZxJYfTD8jzHCHaMfhXl6d0+OlP/pJSFwwHBxR1hWWD0i2/7/Xr1/zmD96l12m7H9JysOy2C+n4OUfHUyzRpaxKLEcjhUea3XFycsSTpz/g+k2DEBrH8ZCWpswaLKchjFyMlki3wfM8dF0TBh6r9YzJvsCmj9IlKIHWBtu1qaqMJL8h6nUoM4UwFVHokaY1ZbNi/1iSrAVl7uBGYNuCWjhU9QrHs/BtC1NMkZaPJmn5ssJlMKo5fXDEctagKp+wG1DUWxxboBrJo4c9mkZR5jZ+T1DkFWHgcLtW3H/WxbVs6kriRx6lrnBdl7IusJyG6XTK/MtVO7GpQdpbLGNaz7NU9Ec22aKLZQm0EDS1BdLG1Bnz7DVf650gRUBZJzRegDQOoqopjcX57JKDaY9nz0754z/+Y/7lfz3iD/7hM+Y31+zt7XE4OmI2u6YxUOqU1VLRj0bcnc/oDwds1hlPn3xEVc7JLtdYlsBoTVnkeE5LTTB1gxvHrLOS/OVzOtMRnX6EDlIWrzyaXU5/0qdKcqZ7Q27Kgnx2y+e/2CKajPBgSL3OGe4dEXljVssvaFLNu48/oFJ3xD2X7/3NX8cPBMH4MUVR4NsjyKEz7CGtPWQJm2RJRUZn0EMZwNhU1jWahrvlhsbcke5iJpN98tzQDRzOf/Tn9Hodjof7fP7iBb3JAZfzNbfXP+XdDz8iLxSuF5FkBXVWEkUB8WQPXWqUhjCMqcuKQmh2ZUZdFjiuRZrkBJWm0YYi2yEN2BKkrZHSpqgVltT4odeOoF2f8ThG66blZ5YNqtHk6wXbWmFZAtc2DAZjUnuD50b4gYvvW1iuQAgDnt+ujgiXsimpa00w7qB1G6wTtBpLz3eQ2qZuapKmoK5rdtsKtEQ1EtdyCfwAyxLcP5lg2WAhMAaaqvWzl2UK0kbVDdPhqB3zOxZxJ0CrkpqWL+q6LqvViqpa47sBKIUjXYpKt6xM47WJd5PgBBbprkCX0DQODTWhF1BXFb7XoWkqgrjVEId+QKMqmnyLahpqEkoqtHbxQ8O2stAmQVUtu7Q/PmGy79PUAiUEMjrm9L13UdUGo7bskiss/7+HlHfoGtJUsdhd8/jRU6rSoqpTlpslVV0gPI9CGF6/viXb+QxGCmlv8EQfS1RIv4cOU9JyBfhsk1c4VkPkONRbgx8XdMMxm82cu/WPqKoccoPxF/iWz3TaIUlmaF2xTTKkNUfImO38gsl4n2brk+d3aNMu1JvG4vpyh+dHdDsDEDvyHDy3S06NEJomA0/1OTwMMSogrzI29RajUx7ff8Rmabh37yGb3ZpGOly+nOGOJKETcLcqcaRGRg3FtmaXbDnY+5DxpKaoXCbD+6xWb5AmJwjaf2zjpdzdbsnVjLLOeefRRyQfvyDbvaD7+Hf54vNPsKjJdwZHrvEDG21cjCUIrVPmmwWL+Yz333tK3xEETx7w8voCj4qd2jIKHLxgRNxxqMUJ46+PCbwR2zdv+N5/8O9x9abip3/8j/jt3/kWZRBSIqlsw+2NoDf6iMMAnm8CPBz8sGTUeZdiUzA4Utwtd5jXOa4vOXoyZX19Q5WkfPSwz83lz9jvHnKZ/YLDocso7HC7miGiLp9/8Ypvfvu79Po7TvaH3KyuuffwEeL8EmlZFCIh293w8PGUcpmx3SR4AxfbjUi2OR+9+4BPP5vTlIKT+1OubnYoT+PGE4aWDVqwXJ9jgF6vx7Mn3+T19Ze4do9Hp0dcvDrDi31EMEFvNpxfPOfw3nv86U9/QRxl2LZDHB9zN/+C0UFAnrt0BoowUtxezTl+sEdZOXz58lOiYEhT5KT1kmKR8NGzZ/Tudbj8f51xMIJCWRidEI76CJ1zPf+s7cR3HPIyoS5A2RmWG+D4Npt0Q9PE1LHg+3/7B+gipixr6N8i7BZJA4LKSRhN38cNV9gIAqvH3kGfzWbJ4Wmf9W7NaH+f7XbNOrlitmj4xtd/hy9eXNIZBdwkCyJXMul0OV/+nM4oJuwe8MXsL/CjisvrNbbdZzzqcv/oARevr3h871t8+erPWCzv6I66pGmK5xzSHYRc39wwGLl04gOK/DXrZcNy82MCbwSixnPGXFxcMJnGNGVI5Efc7L7k9mZLVW9w/ZpOMKKsE/qTfTrBYwLvIev1G/zOkPkqod+f8bf/rb+F7Yek2wJpG2oVUOVLvvGtd1DZmDrb0dghRvkkqxTT7PjgwyeUK0GjLYRQ2AJ26Q6/W9KJHuA5fdAGozJcK2A+f0MU2owHDzBWg9Y2TaPwlSJJZliWhetEtNGWGsv+6ytztd5yeLKHsjKqqkKpEiVKXLvHbpvzjV8f8cEHx7z42MPxW/i168SsFmsePhrQ7QxJljnC0e0ISzSousH4n3J3Z4E4xHFdoELgsd3u+Oi9Y+5W5/huwC7PsN2axhh2G0PVvObw+Ovcvh5RuwrbsqGpELbH7fKOd6ISkwukF7HdbbGlaffBasl+t2F5M0YJibIUlrYRwsIgOZi66KwgKxucULXdDSpMYyPJkXVIYP2/afuvH+nWLD8Te7a34SMyM9Ln57/jT1V1dRm2YU2LHIKiOBhgRAiSMBcCBOhCEiBIkKD5C3Q1N8IIIjgy9xLFGY6GQ9fdbFtVXVXHn/PZ9Jnh3fb2fXURp4sSBEE9QM8G4ioDmUBmxrvXXmv9nmcXS18iqJCqgek2UfU1g75gNi1RHB/DsNhC0lQUNeed53u8efOKWahiuCpe6RGXOVkScNRvMTgUVC918sRB9rb8RFvrs4lm1OIaT2sT0aBAoS4VVBWkrEmyFY+fuBQbmziW6IqCnioUSoim+WhVwGDYYTVv8O7DQ+5u5vzyxZe89+IIy6+oK4lu2Ji+oO+1GY8z7CZ0bItMTZlMCippUEQLdNdD1zI03USUJXUt6bYGpE6I7+sUubYNvSQ3DMQhVlMwvUlp7bdRVRXTtWkN9oiKGFHNycscf3DG/dULBjttbLePUfjM89cEERx0PTbrOUJtYdk+qnrN5fkEv9lF1zv4Vp+MkDKWNFsaRncXq90lWH3DdCZ49sF3URWFq7e/4vb1NcPhkHkQsF4sgAabzQq/cUBYZhSbjJvZmNpsMbs4p9euaRgmL372X5LVTbx2G9+TFJlBQz0jyG/xDIM4MqmKEs2UyDjBaDq0nB3qOKJKS/aOTllMR/QPzjA0DUOVGJ6DraRYtomstqHfIhbIGrIsQdUcaplRFQp+o0EQxwhRYps6ZaWjGSqSglopiTcleQG6oaOoKmWtUOcZugzQzQa6XqKqEg2oRYGm+lhqgyKNkXqyfWDBxjRBUcB2JZpqU5UCQ3WpBNQioywFSbkN4dXVt6N6AYahoukGaV7+ungM45RalFvJiKWz2YQYhkGj4ZAmW45nWdbYhoksa0xLUpYpqlQRhYJKhW4oWIZFkiQU5bab67ountegrsttGFjqlBUINUe3wbO7gMD4dq/VLUp0bQvRL8uasjCpZYVUSjRFQ0iVvMq33wuXVu8Qv7356y8odctFKTc8ePyMKK6YzScImdNp90mzgt2mR7kx6DknHPdUijJGZBa+5xGld5iqjaCJqirs7ik8fvZDJvcJ16N73LaNYTTQFZuGtx2ZWJaFaznbRLlmksUGaTZHVbZprTBY4zZK2s0htzcjygJaXUmaVGiqTqujkiY1aTEiuF+gGJJuOyevBUUpcJxjinKE5XmUdUG+maKqGVJ1CcMAx/BRVcHV9TVxHuB3Bgij5vx8u6d2sHPIahXQa/Sx2g7vfPg7TG/ucUzJojQQSCoazCZvefbwAbOVwu34NX/v7/73+Nkvf58PPv5NLs5foak2hnfA3SdvOOi3aTR73Ml7eg2bu6sFh+8+piwyAhFweLTLcfuMJec8PnlCtN5aE5yGS11nODsNNjOVdsPk6KzP7WQFsuLv/Af/fZLCYH33J3z048doioniqWixy05bJYnueOS2QO/wTmvATrtNahmE44Cj4zbtno28XvDe4TFxVZKmOY8ePt+inzKJ1DIqZczxwz4tWnSsAarlEYVrHvWb3Lz9JTJacPcm5PDsOeQVel1zdfcVzz96Tm/4jHCSMg9/DnlJkXc5PBnwzZtPeWtlPHj8jE9//uekmw0N08C3PMogQ0gbxTBoDvaI5kve3ixJ8g2WpaJpGjfTEYt0w5PdR4RRiuEbeMU+imxgOVOW4QpT2wU1RlFVmu6QdH2BpVrc3214/Py7rDcJWXrD3vCERusUQ83pxAV6r8DWDdRSZf+4xdXFWxRXw5QOaXGHbbi4xhBFcUDEWA0VWbXQUQjSNa7WY7Fa09mRdDo7vD7f0GrGrDb3NIKMqpZAhWFoSNXnfjLGcxus15Jaf8tsuUMQgtMQJEXJeDEiz1NcR8Gzm2iWQiFuuL2P2Nt5l9VsTpIUDLpPccx9iizBVh4xHv+Uhw8fkqcqm3DKavUpnfY+by7e4PldylKyXq9pNBqkacZqGdJsa8RJSF2buM6QVqOL25TksYfneYzGVzx9Zxfb7HB3O0eqBr/9O/9Dfu9v/A9YbtZIbc7LF7/AskrK0uLTX8WIco7vtKjLgtF0xA9+6zmnZ/tcnFc4jgbo5MWWGbdc3+ErQ1StQVlVKGpJlkhOznY4GvY4X9tkUYnXytH1FovFCI0UXXaoS4Fp16RRTlGVBMkdXksjiXO6PYOyyrEsAwWD65u37Ox3MQxjmwitt/aXv7w2myXP3t3Ba+wTTGs0w/rW0lKxWE740U8eslyH1Lh4lkYttwmZIBrx49/9mKuLklUhaXoNirIEoyRPKp5/cECZ6JR1ByFCyrJGEZCXG569c0Q/2aUsSyzLohRbEPRqteSHP/wAv2FwXWVYmNRljW5YpEmE5a44PmtyO1a2gRxhIWVFlkXs7XtIAqJEw7YkVZ1RFSWm61NWEY7vcvF6iaYcI8oCkDQ7bTbzkMFeA9vXef3NAtv2UVWFhtPAMlQgo9cZ8NmvrrBNDw2JarSoRYqiCpptg/GNjSh1nJaPqFIc0yFKUjptQV1INHqUdUCZW4haYKgJUkpabRNTd1Cwt10mWVHXOULqQM3R8R4Xb+Zo1iNQJVWZo1oKy2WGqJe0GxbBdIu8++DjZ9z+l5f86R99xf/of/JbzO5fkFYptrGHzCosTcVwbKRRkm8yHEtHlxVCLdAdHa+vf8s7FSga3N+8xTv+kE24oeOmRFGE6zeJo0uWoUJUxaipydmT9zB0iVWbmK6HrRQMOt/Hbevcvkox/TY1CV999hXNfh9ZGbzMCgzDQJVTCmVJmVQIAS9e3NDeWZF1N4Sza1rNLlrjIdNXP8XyHHqDIfr6mtH9C+ZBxcHJMe/0j6mSBOk4dPsWvZ5LWNj0dof0ex2izZg0FlSKx3tPdpHKkmJVYtuwnGW4Yk48WYPoIB0NX0tp6C7L5JJO45BkPKc023S1CJGuyIqURneXJN5g223yvCCrYuJQUCs+nqmgGEs0ehTVik7HpsxrOl2fTRxQ1waGteHy9UuarTM6PZc6izCUBmG0wVK3xa1t2eR5TlkWaNp2yqOoJqrdhFoixXaqIYSgqlV0o2Kd3qKqApHVeG4DVdkWpEiLMs+oFYUsq8nyMYqioyo6Qmyh7bXI0QwwbR1VNdG0v2RdqjiGjaJoqKpBXWuISiPNMjRN30oiqi1AfrMJMXQLqKlFSVluuZmqqiOlRNdVNE1B1UoaTRdV2ZJqNuuY2twm6der7WpHXdfUdY3XaLBZZUi5nSSrao3tCmohKfISTdPw3W063/j295EVKbomWC3GVBU0/CYt/78BbNB8NUbXLOpyG+o4e7BPFJbbpKxvE8cRRZ4x6PZI04AyB121WK83DPcfbNEgoqCqc3yvz6tv7mm2Gwz2jqjqkiotSGNJVlQ0230URSGOY1TVJM1gs1zx6J095rOQ5SrAdvooakiSZxiWz86+zWJsUpcTjk6PMUyFJLpnHc/w3T2K0uH2fk1RzNjbPSYtV9xc39D0mux2h0gM7kYjOh2bVmebEo3CJZ2+R7e/y8XFEtcyORs+ZboIsPUO3X6D88trNBTedXq0Gn3WqxveefYuX3z5kidPnrDfsSlyBduBgfcBq3FJv98n2mhUUcVR/4hl7KA31pyd/AZZmRHlgqPjh5zs5TRtk/H9DWnW4bvfeUbHU/jjF3Nm8yVVHGLUGaaZ8/5hG1G1aGh3OIrPUlSUaYKTLhjPP+H+fguf1tSMTXbP80Ofu7HCQFgMnuxwcXtJw3DQfJs4mmMoPR6fnGGaKqObWyyRMJ3fsXdySLLcUKLg9ZoUaQHZhuFOh2xTkHJPpac0vROQgjQZ8/2z52iWwXI1oUpnXM8mlLXLwd67kNpk1yP8niAKVrQch4u3P8Vv/QaOO6BWGvzJn/6SRw+GNBsOo/sl8aak29plvlyiWAZuC8IswLJgE13z8OAp09Gapu+z+2iIVlvE1YyitjHtmjD8mmH3mCxL0Mw1m3VOt9Pi01/9a3qdDrblUtZwNxqhKzp16XD2/IDr6wvKcoPIatJKISbBDwqWmzWnJ6cskhF1qdNQW5i2x3CvwTIKMAwbxx0yXwTkhUqtQJ2nuK0uiqpyN7olzGIqRRJEEbasqKoa17OpKkGcJmQ5LNcL+oMWmuwwXyW0/BPqKscyPdKsJE8tbLtBKWveXp3jeD2Msk0al2RVTJYrGEafoki5uv2Sh6ffIyseoIkGWTbn+OiMunS3LM56jVQ1Fqs5ZZVRlHN2944QMqesElqNXYoyIk5q1tEtVVXx6Mkz3rx5wbsf7hNtwDDh5OwB09mEvZ3v8er1HcO9Iwxnj5/89t9gr9fkH/7Df8ho9CXvfrhHLUpsvYmqzLC9grJ0QH6rmMMiKxJKseDwcJ/xWxfTlJS1gkJKlAQM+hF57iJKH8/XUISDrBTKcsPJ/hm+s7tVPRY1lq0SRwWe28Xrvk/L7CGr7Y6RlPW2U6xFvPvuD7a7V/JbcLWi/vpMTLIVvjFBV58hVQVF+VaJJzRQE1QzIQpdpFApqhRNdVCkTlGm3Nx9w2TSwGs+oahK6romCiu6HZfjg32uzy2CpAK1RtUgywp0o+Ls4T63N5LppEKxQFV1TN0GkeO4OZcXczRruL3h1SVS1Qg3EY8eeATJhKp6n6KOqEWFqkjSSGJ3ShTLYrpKqUofRYAqBEGwodHSsRsVea5R1gLPsUGrKHJBWSsEyTX3Yw1FeYht+Wh6DVuUPM2GxtHhkJ//8RLPs0Hb6kS3mBUFqWzYbAo0zUHWOSgVpuZBCU8e71HXNRdvVritNnma4bgeaIJwseZ3/94zzt/eI3ID1QLDUskqQRkXKGaBbkhUmsR5galr3zY0II5TjvdtkmBCnh+iapLDoyFnp4958eVLvv7yPd57p8frz6/ZPy0IS4VKeviWg42kvePT6XR49dUrjruPKQxJHb6gKkp0Q0cKSKsKaxPiSpPraUxjx2F++w0dc5dG06WUFqsg4M31JZuo4Le+8yFX0xsOmn1KNeXq/BZd89HMXfLsnvaODyZoVY3tLIljg2a3xzJakRcB96M5qCYXLz7hO89/j0dPn/Hikwtsd8kiHVGEsAjvcK02YbggLwW17HN7P2J/74DTww8x0AhXNxyfNdHqiqubOcKI8fsNSDRUUfOrT7/m5Nn71GXBk3c+Io5mVIuSN+e3aC1tO3ZebVCLhOXlDVGcsn9kkocJlmVxfXFNY5BycrrPze05be+YveEhq8aILIjxmgNyaaBpkEcGo3lAr7HL9WVIms3RDJVG/5B1cInuFTSVLoZqU0sVw6/INwVllVJXW+pFWagoSHRVINSCMM5xNQdBimXp1JVKr9mnEiGG5aBpbXSloq63KCAhM/KkBikIkzmu66Fp5vaMEFv5gaZLfLtDLQVFLlAUgRA1YbihLEv6/R1WmzV+w6Usc1p+g1ani6ZvV+C2TFqJ61nfdjpVyhJA4DgOlmUBW+NQXW8fIuM4JYo22LaL69mYpklZKrieiaJlCAFpWtBuGUwmayzLQUFFUSTr5TbVrioKlmWRyAoht51OVdY0TIO6rvEHTUDdJt7T+K+/oBSVhdfWyYoNq2WEpmlYuotupPhul1VSoVkZSVVQlzqGYRLnOVkesnyZI0XJoL9HFBeU9RJT76AImNwvqSmxTBfTNHC9JnGUkFcJs/mYXq+DoigcPugymtwTxAlC0Wi6O2yimCLPaLg7NP09QvuSdr/P7f2Yzk6GZhkY6QnzxZhmx6OIDeKwZG1U1DLA8zxQc0qZMV7fsnt0TBqqqGqLJBAc7p3hNSS262F/fMSbV19g6xqnT3e5/voVx09O8XQT2+lwsrvDzWhDUdnMgjt29lRUZUG0dFmGV+wd9nh0/AFvXr6hP9whmq/44Qc/YHyZc3g0oIgiepbKm/uUHbfFQdcjjx3KLKHfOaW9p6FkNdLReWfvBzik5PWUeLqilAV+q0NwHTLoneAPbJabCb3DI4oyoRyF7HsWcSHQPRMdgVoZ6NUFcdWFqU7LPSYOEvb6e8ikIEljpvUFe1aDTr+BGURUqsnLNy/o2U2q0iavNBQT7NzExSYtIibZhtO9AaNXv+D44JDaafP2ZgGAY5sszt+gWBrdTo3vSSoFLi9+jnNt0Hc9PL/Pj35wTFwl7A56qLLCrRfI0mO5EOzsHpCnCW7DwM1q5otbylTndPiUMLzB7jcooxTXlmxmc7yBznQ1otX2UO2C6V1OFdck0ef0Wn0UBqhOyP3oGs0q0VwDNEG+2bDeROwfn2E4PT795AtkLdCsjJZlMZkKhFaRmys8f48k2+4Det0W0/uSkgolvSNKK1xzhzeXV6BUWJ5A0QSl0AijADUKSSINw1rSa72LIWL2ugamOaMqJaIW+E4TY6fLem1TZhma3KcsbijqOZv0BsfqIDE5ODhmsVgSxStMw8U2WthWzWz5inbXZTx7RaMl8N0hhpWxWY9otQRxHGMbHqI2KeuUOBtjOR10zaLZVnHMIYvVNXmsYFgVWSqwjAFptkHIBYPBEFGbrNdzen2TIgXP2WW+WiAIsbQn3I0k/YYkvbyg0gTtlke6GfKf/p/+E37v7/8d4jjCcTWK3MXSDZptjyTR0I1v96DQEGVFUV/i2t9BVg0UO6OqVGzDYrXIsTpruu1jJCVSqRFVi7isEUpGs2WQhQqGXyFRt2PyZEJ/P+DRo2fcvNJIkpRS1Piuxc31Ff/ev/8THLvNeqpi2Cqaon27hL+9NCPj8fMurz7JyXNz6yQucjTh4DUKdBNk3cd2t7zUGgjDkN0dl7K+R1FaSAqErLEdizDIsNoBddUkT7YWEF3VqOoQUZg4nsJiPWG96lOjYKrbYrsuJGm2wrBqTP2QsNBp+DaVFAhVQRHg2ILxpMKxmmhsbSfNlsv4PsJtSXJZUssWhmZge22CzQqEQpqvWaxzbHePcpkDJpoKUipUVUGrL7FsgyQBRVMp6wzT1AGVhm9wefOGKFKwPXXrSFcL6kri2gqVCEkyE93UqMst3UPTVSxDxzQlYbIhyVRUu8ayJXmZo2YqKjVC2RCFOoZhIxHkZY6mG2TJ9qE5SSviqEP9bZFfliW26xLHU/pPXAadYybjLkmVoxQa7733IRdvXvAH/+oTDobf5+jY5/b8kmlU8eDMY3kz43vv/5hgdY5WqQhNkGg1nmHidzrYpoOUgrwqcDSDSpf4TZfGbEY4Tnj27DeYXl2TxhY7fY+mdBFI9vcMXnz9B7i9PWaxQKLTdDxqmTBfvcHVPFx/wMX0hkenD5mvUsbLBYssotcbUiQOz59+D8MMUORj1pHkejLH7miUogDDpt1yMVUHzTJ58foLuq0+d3dvsfUtO/qrly+AlCKMOTs6YTp/S6bHNAZDUqGw/7BDq+mwszkgr3IcS2e8GHN4PERvqxy/8x5lFpKsIjK7ZD1a8vplwI/+zk+4vv2En//pHT/6G88RqsNicsPV+Vt2d4dUqxd89eUrHj17wusvfsZ7P/xd/HrFxcWKVr+Nba65u79nf/8B63iJKG26JDx5+NvkNVxevcLTPdYride30JUCt+HiaAqGoZOKCEUDS9WRBfT8FrW6RBXN7Q5xs6KuEjRpUxQpQg3Jaw3TcECR29Gx2H7ufb8BqLRbPmkWU1cSSYWUgtVqhZQKolYQSoaqqgi5De9JKem226R5jqrqBEFCEERbjKFt/toQ5DhbhF2Wxei6jqKCqW+7k5rGr73r63WEoij4fvNbvmdCUWQIUdFoNDCNFqZl0fRrhKw5ODgiz3PqeksOqSp9S41QVfRvmZxZUiBEgW3brJfb4tG29W2Y0BAo8t8GEf/aCkqsjE3ooSkqcbmi5bYQ5CzDNa1uBxEbaEZNEMW02j5FlZDLgrQOydI1+/0DFsuAokypZY6pQ5XkGJZO02+xDu7JU5WbSbxtScuKbq+FbpnUdc1yvSCMakbTGa7TxLSWlLlOFAXYVsmb12/R7ZDXr2d0WgekmwFVnaFqAU3vlI5zRN+T+A8HOKbF1dUFp4/anL95Szir+cFHv00UluRmjqErNHYqos0C2/SZLVVopjw8PGazWBFtQj589BG5nvCT3/1NskylqAtsQ+HByS47+wcIXfDVi39Gb2dI2z0gDUPsfkG3Y/Fo54ypuCCNNhwftxFZgebvk8cR7z/u0WoeMl+PcKwQ32qzXCQ8aDQZja7IA4WD/VMW8w2Z2SXVMoIix5Qa3aM94iIjKnNO9npcTUOKGogjVKPk6MFDLs4/p9HwsewDOn7GKkywixrdKNnrtrl++xLP9jEUlVmwYueozXw6w7NbOLpBb7+DrmTMRyFe3aBUYvymA+0BOiXDWBBeLdk9eMSr0R2Pzx4wnt8yvfoco9em3esjdBdVE1TpGqHDo6cf4Ogu8zigVhSczoB2w+Gr15+gFAE7wyZZWKKoNotyimGq/PQvvuHs4T7zaEpL2UeaJrk0cPwhcZWTqyVxXdJWVaStkeZL4g24zQaFIlF0jSCcEqw3nD7wODwecnW9ZDbfYNsm6AZe02U8mSDte6zK5XTosIhMylohyy4xmy1WgcpByydJlyBcagF1nbNcRbz/+BmTcUZRCTbhGKEsebL7Q4KopKhTqlKhym00XdJpnlEWCgifNN0ghQLUoIDjmsS5imfuMVu+RVXvafsH2KaGoqjf6jFVpvd3GI6g13NYLRQymeFbNY7dpC592v2UIvYYred4zhMWi3sabYV26wBUg8XyFrSKZnuHMAyxjTYHu99BRcEzhyhGQVELWt6Ar7/+c7rdLieHH7MKRuRJhmkp3Fxv6HZ1pHJFr79HVXrM7js4vZRNkKMqBqVac3Z6yP/2P/qPuL1ZIIS7LeJzHaHEbDZ3FHVBuEnR2SMtQnzXY7mcs3fgEm307UajkKAUyNIhr+8ZDFVublZYzglFDYYZs5mXPHm2j2YJ4o2CUAWi0vD9JudvX/LofYM6K6hFiaoYVKWKqBUkGZ2+xeg6AbFN26uaSpb+WyZbWkywnR3yVEdVFap6C0GOFwXtXZhMpiTxEZoeo6HjWBppktPsl6yDGp397Y1CCtKkwLRK8nrE1WVAXXwXVYvJc4GilURBxaN3NW7urpHV0bfdjK2POE0KWh2DssyRordFkaQpiBJF0SmLkEG/habvM5kUGIYDUiWKElxbw+tkvHhzhW49QZQr8qLGdnSSoKTd9UBxWC91XNdF17e6Sk1zqMQ1WTHj5soB+Rip1uiGiaLm5JGCbglM28EwLTTTwrBBQ+dmccN7z/fxnIir6xWee0BeQ12CooGqFrQ7CmEMSanQcTSSNMW3t3KNQdMgT1fY1j5JJVE1gY6OkNuQRcOrSKOaMNKQyrbLYppb7Jpl6BwedLm6ekNeSAxHJ4lTdgZ7nD485u03r/mXf3TIf/gPfN58HtFwfWyR0243uN8sePbuE17+4nO+8+EPwbLIxlOk0cD2fKo8o6xy9g8PuNNqlsEtzaZHHWbc39/SG+yjGiGr9T215VFmGg0sGr0ByWKG4vmElUS3u1R1zmDgc/nmimm8JpQa/+JPf8nRWZPr2SW9ToMgGnGw+yFvbv6cVsdHFc9IyxmWkiLJuX59wYPDQ4IwA7PJjq1xeNRgvhTk6yVdTyeZqCR5QmfHpSgMXk1eIPKYvYMdXr65pT/YZzwec3cHw8MHrMbnJGmIrMa8enONajU42D9jvQrRlZz51Q3FKuTv/oPHXL1d8eHHP8Eo/hBF1BDGpKVGncRUjQ6GtWRyfUG/J/ne9wcU1Q15fMVmcc39jc/RwwGL2TWLmwmdXp/uIGR5/ytmG40HZ7toRcHB8XMarXuQGetIJUlXFMIntxw2hY3r2mRxjgmoRoWiuEhNQ1Mc0rxGChVJRlqsqCtlm+oOlqjooJQ4lgZaiW5Y5FlFWQdYlkWYZ9iGBxI8W1KUGe1+m4qauqowDIOyqIGtJcwwNEDD8La7jJ1Oh7ouv+Vjfgt1FwLPd78tHiuErBF1hZQGWbZd9VBUEFJubTymTlFUmIZLnuesVwlVJbdhJlVBIJDU3z7gCeq6xnI9hBDbexzf7mH624T6eh1hWVsNZpQEZNm2yPyvc/2VC0qp2/itJnGU4nVaoGvkcoXZLfnm+gvKNMe0FPqdXSbja5pNF1lLFDRsW0XXNITc4DgeVSlZbL5BqAqziU67XzDoHFMLie/2MAyDTbBAEQppFJNlCZbTQNctet090qQkTtaI3GNyF/Dw+DFu/xlJPqJp97ENF8vokhRzLq+W7O/v0vJMwvWK4bFkOrnl9MjH1xscDg6I7QA7rZEVPD15xNXVJUk2p+H6dHomrm9wtv+E1y9ekpSC090uRqqj6pIkjHh7seR7zw6pzYLd3pD1dMb99Asa2oBgumIwaNE2u9y9uWA4POLu5hXdXpMwtJAtjdFFiu2tGA4PEaXG/f05ju1hyCbL6BzTdgl02P/OKaNX91zd3TC6XSAbCkkUEecT6iLF0nt0fJOr8YzgQEFzFVp1g6gKWJQBm9UNQVoi+xpRNMHxDtgzclytIvIlo5uMvaOHfH31GafNHVpNn9oF5gFN74j55A3pxmQp1piaijbVwJCM8gQ3r9A0l2bzgMbQYzJdY3g6sQipaoOD5x+BoSHKEt2sKBOFNFYw2yGbRKC1VHqdfX7+iz9msL9GKi5pmGJbgsWmwG63WC5HWFZKOc85Oznj/O0VzWafUhhM1+fMgpR0GtJsVQhR4PTabKqE0WiE41S4zQaLaEmSzml6DdB8FGfG9WRNf+eMrAzo2DZVVZGVAtv3yLM5HdtFKSTUexwcwItXP8cyfWzLJssKlsslhhsxmmUMbB/bbVCnKUESUVWSXGxwLANFOyQLLbIkxLQr8iRlMBgQRxlxplGUKX7LQF/Z/2/jVUFSbNO9SVoQhgnPnp9we/cS09tBUyxkDav5iiwpabX7VLLG0GseP3nE+cVLVLWFlJIq97HtBkVeEyUjCpETRiphdEGeS6RUabYsbs8zOj2f6eyWutig6wVNx2O2inj4dIf72wuO9g/QcPj680957+P3+fL2Ja5nMBjsoBk56+iebB4hs99C102ydE1ZKagY7B3s82/+8I/5f/xn/4SD4wOmsxs6nQGWYYJaUchLksKmrp6hqCUoKlVVEkVTHnWblImOaQnqyiIvpiiage8IHGOLqvEdA83QUZSMNFvR7uskqYOiGSiqjqAmTjb4DZt+74AiblPkJX7LpawFcZRjuzV1XVAWTXzLolYSpFQIw387/ukPDGRpUhYqhqVQlTm6paDrOkW+oOXsM69tNEdDkVsrRxjEeN2UXu8htysVyzUQdYXt2CznC975wSFhHBBFGbolKQVUpSTPapLijpbfYr2osTyoRYnjNEiDFYWYUktBFOfUdQwYCFVDqxU0LWSdzFkuTMq6QkVByBJVN7bhgjrC8w5IQo1MCLK8xjFUsizjyfs+SZxQ1R00T6BoW91cVWSkyWSrn9WPCSIL09vepEzTZBFX9Hcsbq5mWNYhrudt1YiGhaYrDHbh/m6MrvVQFA1NUdEdiMKcdsvAdBJ+9vMJfuMRUs0oEgWSFFPVWGaX3N8VpMUJaFssklAqJNti4NlDhzJJqaSNrmlUVUalW6RZSdO1iPIFGD3SvMSxFTRDZRNtODx6yJeffc0XX73i9vXvcXrwALe7T8M2KPI7Eq3GyVR6xwPKpeTsnSH/8T//z/lt5ylVnlPVFbZloxgavY5Nthb4nQco7orJ3RV3o3u6TYeD9hnfjF7R8W2KOKPSSs7293nxek7rsEFW3BNsEqKNxXB4zP2bMfP5mEWyIX8Fu4NTDM3k9ds3hPmStJ7z+sZFETcMek0enBzT9AeI+paanOubEYbp45lPefHqBZtYcnr6jJMn3+Xzzz/ddtRChavXX1GV8Bsf/jvcXE0Z7g6RdUrLaTAbb3j52Z8Rpgq2ATuDJlWdUYQpi7WOsODy9opBS8fuP+JqnJFbG1ZFj/d++D2++tXPcPoqMqlxfZ9aj9gZPuC/++A5r67fkhV7LMdvEIqJ3eugtyrCeMzzx2d8+auXaGVOuVZ5+WrMybMfUsZTLl98RbxO6DVdokWIsD2Ge23OL9/wzne/jzNoUyYxvtWgrGGZh8g4RSFnvd76r3XdxLJ1nMaA6+srVm/vGA6HHB8+xnY0kiTDtAySuCArEwypUuY1iioo6gJFkZiWgmN6xFmERANZk+c5lmWxXgWY5pZ3maYxZVWTpimWZeE4Dqa5NWCVZYmlbzWWqqr+OqRTVyVZlqEoGq5rbf3nbPcdNc1A17df1w0N07JRFEmWbTMo20CQC4jtmB5BFG+DQnm2LWQVRaHMtoWtpums12vSNOXw8BDHcTAM4/8DlfbXVlDWlcZqtcCwSiqRU+c+RWkRJzHNjkRKsV1YVzN2h0PiqKKsEjy3y9HxQxbzO6Sus3u4z6e//JRMjvHdU64uxjx6Z0i0CJFCwXU9TMOk3Wyyu7vL7dUNWu3T7zlMpwu6zRY0BVmWkeU6Hz39Ho7moxIwaO0hpYVt+Giq4M15yMPDE7ymxFErGt1dLEXBqFSePDsmXFmojQJTZri6id3ISKIR7Y6CFjosNymblY2iG4wWb/E6Cmpjh7qMUEwFkYUIz+bJwz7nr6/BgMX6jiqvOOh2KBIDxWyTJSbDk2PSUmc2D7BcjUW4oSx80vs1CSvc2iGYLUHTSdIc27WIYo2bK5PBUHI+nZJ8M0JJC548PeNmdcvAb9FuNrFVAylsBHfMxxm+5VDGa+6qGlVPaSg1WWBiRAtK1Saa5yi+YLUaM9x/yHiesIpCJpuAxw0Yj+94dHpKQ3H49E8+5fTxETf3b7HMmqwMKU0Dv+Gzmq1AN3h8ckIidVa31+z4h4yvr8grjUqp+Or1S6JFyHcPfsDkao7f9IjjMegGSsPH1zRkkPGHrz/n8YMDDMcmjCsyppiuSoVHpsbUIsRsS4qiosDg7fUFju2RZyV1nTCvdZIiwfP2yMsZpulye3eFqHN6TZesVHCsLvNJwMGDh8gSRjczTE9QFH0WywC3CbZnEQYRcZyg6gYH+zsspyFeu+Ryfsnm9RivXdNo7+A7DVRVo+GbbLIxR6cdbiZjfEdF5BW1ApvNhKMnx8jaRCoJuio4Otzh7eUnOG6bRsND0xRsv00crVitUw68JkJIFFWgKCqO0+ST15+jaRrDg+e8fD2ht9Pg9n6NaZpUdY5p5HQHB7x6ecnuXgdUyWx+j+/7rMIbxrclB/vHxPUYz9UQkc/h/iH3owWHpyavXl2RJwqWq3J49JRe54g/+bN/QbcfoyhbW0RNypdfLmg1PaoypSxrdvsH/OoXn9DvH6GpBm/Pv+DswQl1aWOr71OLXZo9g6rS0SyTKA4o8pj/w//+P6bZa4Bms5wJ2i2Vmppok9MfgJAmitpEVVNUzd6y6YoVrmOzCcztIat6OKZBsAxoOA47zWe8keGWB0lKXWmkWUwax6SZhqo435p6agQeQglIiglvXkzw/O+SpCGGYZPENf29JppWISoP6QrKMsP12uTp8tdn4kc/OuHLz96g8B66oVLk247Eaj3mqG9SFw0EJqBQiRpVKEglo7/XYHJfoumSqi4QUiDqbefQ8jJmSw3DNJGqiqaVyNpE1UJ6vTaLqcB2WuiaoJJQFTlxssTtJehGi6xQaLsmuSiopYFl6MTpLeP5DFk+wLIcZLnCciDPLNJ8ie943N35IEs67R6rLMU2FEw9IUzPSQuJYT4AQ0VBQdNtgnVGoykZ9PuQnBGgo2qCIlcwDECR+M2CxczHcVwMTaIoW2ySZ9tE8QhFU7DcFqou0QsToUrYVOztNajzjCproOoaeZ5iGVsOX5HnfPB8B1OPyUsLY5vBoRAZUlSoWsmwZ7LcWNtxtyzRVQVUlSotcXcqukOXt7c1tSLJkny7gyZyFHR+67ee8Qd/+Jqf/fkd/4v/5Q+4uHnFcPCA9coijZe8nS4Y9tqs44Cf/upP+J2nj7Cu0+04X1GohSAJIu7nGW23i5KX9LxT1NMdsvINXtWgJqe3N2AdLAizhGZrl0LtsHNo43bgq89fsLv3iGSxpFRiFssxq+sJiifptJ6jVwPyuObZg7/BFy/+GUpt07ZbHB5YRKXFqxcxj099kkCjFCGaXlDFNav1kmgl+Oij97k4v+Of/eN/TizWoKs8PnyX3Z7L9VXGcnOP6WpMx3cE65B0bwfH1GgYGkU5Ics0Ws0HjKcXrJYzfK/FdL7Bbx5zcqDwF29vCNYV/d0G08sr5E6bdmuX2lDZHUrCuaDRdGi2OqBsMX665vHocY9PP79hsOti221GNyGz2ZxnH55gGV2COOL0Ucno7g3ugzY//MG7fP3qa8p8QJFm5Jkgcbv4es3rz/41asPDUVXCWsFsNJCeg1YI3KZB72GP2TSl0fKpqowkqmk3D/nOOz+gliF39yuaokGaCpJMImqdQkjm0w29ziG6naKgkhcJ09k9FC57+x1qYZKnyZaZmsQoikpVQV4m2LaNqlqoylZNKWudJKqpqi3iDClBrQGx3cUGpNSQEjRNJc9L8mybPM/zHEWV2LaNENtA2tamZSClxLJU2u0uWZZ9WxSW5NW24+q7/tY7X1XkeY7rG5imQ1VVtDlA0zSErEiSiM1mRaPR+OsvKMtsQ5JEOK7B29dj+v0+D588ps5Uvvn0c44e7GCaA8bTOcN9n+kiRtdVJvMZpXRQTBB6k7c339Db3aXIHXTF5N/9vSN8SxAzoddtk6YJYbhkuLuHSkKv57Ez2GcejHnv2fdYrRbsHbgotc3oboxrefT6He7uQyb3bxEi4ezoEd2ugW+0cG2PpnEIdURWxUSLBNd0iRYR08lbDg6O6Lg9Xr/6gr2jrWh+NBpj6g1OTvaZLuYopMynYCgl0u2z2QQYuortOoyChIenQ/TdjPlkxMBpcrJzzPXlDe1dFa32qLQlP3/5JxwOniPkmGWR45pHqOqMbr9LXJXM6wZNS7De3DIcHjDLUwa9HRqJyWqd0+pbMEnYe/SM2eIStyPp9E6pRufMqwDftdkZnPF2+jWKNDk+ekLx9mfEmzu+Wi747vvv8OT4N4k2gsnkitPmI255SRkrLIo1/YYDeUIcj3l+9pzp7S2l6XJydILTskjqgo0w6Qw8qrBELQz8PZ+NqLhcjDDCmMps8/LuiuFwlzwyaaQZGgm9hyesN5dYpkB3DbSyj+YY/OqrL9jz92i2DTS94uvPvmJv94CwCjEbFllckWQppiMI5zOcpovvHfF2tKGsbtl3W1SFwHUt8tpDMyesNgLD84hDnWRToykKuSdwnS6r2Zw0WzOfSoosYbfXJox1dHc7bui0HpMkCUidhmHSMh3iVYZmCUbjJeNVzMenv4Hth4RZhJZnzBcbpNxn7/gDfvXLP0QxBE5TYOsKQRxxeHjEYlFgWRpREmF1SpIwY7f7EULYOD7cjZZ49RLLgNeXl7TrPZ6aBrXcqhfv76/Id0aUaQc3HWP4ETejkNnimu+99x+QZDOCMKMqU6Qecb+4pt89YLmS1DXoRhPDuSEIZxwcewSbkqrQePX6ika7JE+OsIwBifKWIhtyczXl1YsvaTcPCVYL0ghcSzA4MVmvJMvlHLXp0O8cIOmw13cpxIq70UuePvwem/Q1tnZCER3R23Ho9lpATRhkHJ4c85/+H/8THjx4wCefh9ieznI1YRC6NDWL9eaek2d9TGfAbJagWwJFsdisE7odh4dPdvjTtzVlYSDUDaYOcRzzt/87CypxQ5l2sfwEIXTyKsWxXM4eGHz2SxVZZ1R1hab4FEWBVHOE8Oh2nrLelOimgqSgqgSqqrCYByD2KavtWRbH6XaU/JdnYp3g2gNiRft2wR5EpuB6Bk+ePOGTX1zgOY8pqhxDh6qqabRgGbwkz063gUY9piq+3WO1U/JiTRzayGqLrzEtgzKPsb2CXveAYBWQRwWKCooiUBFsgiXvfn9AHMcgPVQJSBXTdYnXa1xf4Z33P+BXf6ShKxGGWlPXFlmi0u3q+A0VVQ7QNRupRLRaDZS6xG+YtDuSzarGMhqg6xRljKVZ1FXG6ek+wx2dF7+SWI6DVDNMw8Y2JKqyJC9nZOEBjUYLQYQqdFRFoioWT58+4+uvXqBgYlkqRW1T1BJEgutoBIuYKldRtRpZu1sLUV5QpCG+76AoPnFc47klCJWqLkjCiGbL5aP3H/Bf/cs3lHWFoYMiBUWVEwUxjccQZAmThYOm1MhKpRAKqm6Q5TH/4d//bYJ5wn/15/+G93/+mIOB5Oc//QKvVxFkGSGwyl2yOCBPbc46LbT6DlFLVF1DqRV02yKZzdk93kFr2Hz66lcMenv0ez3AR5E28c1XNBsWlCo9t0GphGg2IG1UvYtUmrz30RlfvXrBoPWEp9/5Hqcf9Hh7/oLVdINZeQxdlU3jAb/33/rb5Jng8nzG7mGT65svOL96xcHBCWl4R5212B++z93NL3lyfMb1q5Aiq3j38UM0ZUiYrzg9GhKEJpqxYnjY4+58ytnuDnnf56effEJvt0mVVjQVHc2wkWYXf5CximPWq5idgc9ofM/noYHtaOh7XfRSoisF9xd3aC2LrlnRsPfJh3PULGQyiqh0k93dExajt1TdXVzfAdlivV7S7Gio0uXR2TtMJhseP+lxc1vT2TFI4pwo8/n4g9/h/O0rTp8fk1UxqiaYj27p9/aI44jOoEux2WDXktX9kqOBTbws2cxWVMLgxfVrWu19dgZDamZ88sUtirFGVDaV6KOoOnle4do9ev0Wru+hSg/HNShzDYFEUFLKnJvpK+6vc/7dv/23qKoCQ9VQVZ04jpGqQlnmlFlJnqfbrqWlohsGSrGVHmjalgRh2zZ5ngIqeVai6RpSym/94xVV9a273LAIggDP3RZ8jm1subPfQt1Xyw1FmSFlDUqNomuolYmilJR1hevamMa2uDU0A8dyKKqMPE+p6+2o3HH6pGn+Vy4o1f//b9lert0H6eBYXXZ2z5DYTKYLaqVm72SIpzfwnArX8FhPIw72OhzuHtBwPSbTV8xGU2bTS6oSWs0ee3tPaPfa6HqGrepoaoXnauz0+jw9/hi1VImmG7JFTbqYs2O7sA459HrUCx0zsTkwB0TXl/i1QM1Ven2Lk5MzxrcjotmSs2GDZLag4QVkZUYlAtJ4haFvCIMpyJiLty9BWTA8bdFoDjHMJp3eDs1eC92y2T3YwdQa5IUgKUzyIONgb5c6k3S7J3Rtg5effoUtBM8e9Gk4DbJiQ3ffQKISJWPCoMBSFLJ8he2o1KnCfP4Kt2tQ1E1MY4DdayFUhf7BgNIE6bh8c/GGnaMBtFSSbEamRsTJGqWG4e6ArIyIOz5HR2dopkqyicA0Udomb17+ijpa0HH3eLD3AW3rOdejKzq7Bo57yHiyZKezx2T8AkcV2I0ux61TGv0hJhmFqSKFzkLERJnE6zaoCkgWAk2pqCmolT5e5uHpgk53l6JYbTVcooUgJDEFu+98hNfd5SqqUI46jOOISAgmkxm2olGpEZPViobt4/pNUhYkVYGqWNRFiFJIkiglV3ymk5RlNCGRn+MaBUWa09zfZ1lsWEcvmU5SkiglWr8gmN/T7XkcHJ3g2E0urs5Js4iua9FUFSwJo/mUUqhodJgEM65Gb1ksZt8iRnLCdI5tOhRRhtNoctQ7xuvo3IxvEUqLSZ6j+x0UO+Ozr/8paZGi6jGauovTbpMnOZNFhCKm5FVOp/OAII5YRLc0Bx5BEjObBBzu75IlkiyzGR4dkFcpVV1SV4K6FtQiwtWblKHAlDq77TamlDw5+DHj0RVhckUYpNxPrlgta2z9Y2zrDEXTGN0nOOYAW+9R5Bpv3064vFhwfHrG/kEP2ysQlcpseo/n+ERRRBSr5LXNzt4AWUG/pyCtClUcbbVklsqb8ym39xt2envIPKTTMtCUGEWLGPTOKOIdzs7OGO73abWaOJbHs8enXL1+yddfvOLp42c8eHBKJSSbKCLJJEUSoysF739wSHBvo1Q2cSWIsgVJGXPY7ZLNYbYO0OwCUxVUhYHqFPzND/49ymVNbVkIq0JISVlaeG6CoZ5QFx6lVNCEDkpNEATs7Frk5ARxgSIzauFRyYwi2dBqxCBsKnTyMqXWFKK4xvGWv371Wj5JaFJLBVFZaJpKltZ02hWNtkRVe9RFilJLBDUGJlE4YbVaUScNqBPqSkFoNSg2m+UbNK3ANg8RGFsTlSgwDQ+1VEmjObK2UcgRFdSKRlEJdD0h3MyYT0ws06BAR1MEHhAFCZtkxf3tCgUHIXOkaCIqSZLes3fQpOXtU+SCXIG8FtSyQlW2oZFWd4itPwFsanI0TUcq26K6u2sRRAp54aBqoMotYL6sVKQd8uD4BCnaYBfoWAhNgNYgz855/eKfcbcM0S2VOC/QFQPFEFSy4HsPjxl0hqxzd2sdEiVRGaDpKovphNHsnNEkQdTlFk5d50hZk2YVg57J55/9gsnGxLV8pFJT1du/eS0r+kOLxc2cKrMIK4VaqallQiU0NCkwuhm//Tc/Jg0j/vN/+mecnT1nMPTo9B/z/OFD9noOqlDp2A2eHTvcJnPKb38OUgVFoYgDHpw84z645c34knbHIM+WjBcKi+mCZZTx6PkPcfw+uweHjNch4+WaF5/fMp0uGA52CIJXLDYhjw7e48NHJxw+2yeVBxz0vsN7Hzzl3/nt32UwGPC7P/4u9/MlSTbHsy2SZM5u+0OeHr2DVVcEk5SW3qbhL5E1vHk9I0wT3j15SLdj8c3VLZVo8Or8AnQbLYfl1Q1pPeLNy88IgksOd3ewpYFKi97Dpzhmyt0iIsolukxxOymFqtBqdWn0dW6vIpqWSaEusdotmv097Fqi1QY3o3ugYpVuiPMlWl1SGQaWb6E7Cg2nxrRSTvYf0HEL4s2YX37+FZkimEwzGs02480cu+GglwWb+znvffA+Tt/E0H2ypMHe2R5m0+fxk+fs7O9y8N571G5B04p4/fKeqtpgygVeI2TvoUKy+Zy3f/FPGV28oj8sWV3eUM6vqCf3hJMpD4ctvFZKmugkUcz05oJVVPD2zT1JVHMyfIijPKGhHvOjH/8Gq3XOapUzC0ou5hNKUaOUIetgg+PmdDoeDV0jiQNuJzeEmwChbR9wqmrJdLFGKVRU28agxjAEqqNjehau4bC3O+D46IC9ocOg66EKMO2aTAYs1iuCZEUQR1RkuJ6Bphm02026zQ6tjkuz7THYaeN6FqqmgCIpypJNEFBVWyd7kW/H4Hmeo/6Vq8T/WmDzLrJc0fQ73FwsGe4fE6Vj/GaTjrm3dV82dTJb56N3f0RZpNxejenaFcNul01ac3S4T5ZsWM0XDAYDep0es/GMLCzY9buIJMVvmohiTcP2iEWNrhg02hppUXBysMfVRYDuFCjUGIZKZ+gzDwIGfZMoc8mCglbTIUsqGk6TdqdJGM8pKkGn7VAXKoYlaDX3GE9WVOqMyXpKs3vI9WRBw/NRLY9amExWMZ2Bjt12abW7mEaD0fia6VLQ3LVYBxqbqMRttpiuVFRXYx1taDeGRElAJSJ0x8CxazAcyiKhrBQqraTdaZKWkpvpL9F0wX6vRzBXEYGKNCVZeouhqHz2+Q2mZ2EZKpKcIH/FoH1KmkpWi3sc16PGJwsEyzrkcPiMm/kIQ0voDnaoU4f3nh0yO1+ws9NmM6vR5YxFMKHrHNMaWFA3CTYbOnYLvZBAh4ELbm2SOQ6y0WUz/hLTMohWIfs7AxTZYDxf4RsrVhsFddDA0F1s1YaqIEpShKowHSVkZcFAEbhJiyhNCPIFrVaLJ09O2awTVCx0a6vsTOsZopqj6w3iTKJoJoanbD285rZFP9x9jFaWFFXC7d1riqLgeK+7XXIuNGbTAtQI2+2hOms8Z5/nrQdcvXlL/3CAoUvGwQoMg93jIfPVkr3BKXkSoziCnWGD68ucpvuYsipx7JpSMbCsLsgA29EIohm2uc86/gJZ71EmFbYJ0dxAySMMN8A0GkyX59hVm5ZfUMuI0WjEkyff4c2rc9Ks4Lh9xMXFa7qDHT797I85PXtCu7WLEBfUdYWqQLDO2PngO8jqDbf3IY/dMyoZsApKhBoQbypajSZPzn6Hzz7/FbbeR1agqhnD/T5X9z+l1z6g3erjeD2yLOdu9JKinnG4+yFJNqfZ6NPrZag0WK9K2o33+ebL1/hexaDvU88q1rMNlSh49vA9dtyIp88ecPH2Kz5457t89eac52d/n51jyWzcoNseMthpYrnOdrHbl+wNB/zP/6f/M/b395nN5xiGRZGvaDR1NuEIke9wtC9BzdHUBoXIMaWLrivcr29oniXMZwqK3EeUNqLaICmw1CY//foPmCxVDNVEpjm6YpGkEZ5dcH75F8TREaatURYFhi4osxxDz3DM3lbfaDqUhMjKRtdL4nS03eHUQ0yjt+UvKlto9l9e9/cFCvtIKQGBopao6MTxijj2ELUBugA0VFUliXK6Ax1ptUhW/navSoRYlk2eqjRbNmkIeWKiqNV2N8p0iDcxihFTCcl63qXleyhaiaarZJGk2dmmQn3rCcuowHEMorjAsRWSbMkqmxIHRxiGjlAFUbjAa/pkac10/hbregeVB0iRo6tNpJKRxDGFGDEaR9Tp+6giRVNNqqpEN1zi9A7HafP2zQ2GfrLdw0NFNbepc0VErFc1tdxClDXFwjRNkiCl0dDQsNFlF8W0EAXoikmRrdFNiWInFGVJWUukKtDEdl0iSyQNr+Lpk0e8eJlSC0izgrpOUFSXNA0QUqLoDmFUgS4xNJ2sqCgTiSZLRJHT8J5xdzdh76FDmmb4fpM4jfAbNevFHYp2xMHBkJdff80//r81+ft/90Mub+/YLHMs38Rttum3G0TLjLMTD/WTVygoJGmEYVoIreb65lOu5xn9nQ6nxyZZuKHONDrDmunqHNt/lzKUoFagphiuz9m7D7FMhYZn4ndcbGt77s/WFcv5jN7Qx2vD1TiktF6jNQxmNxs2WU7v9Bm9PRjdr0mjOcN9C7JjTFFyuPeIMB7TQOXp8TEZJV5dY1UVjw8bwIwoMvGqE9p7bVajMQ3dRRt0sZ0e/UHGp19eUlpb7rCpPsa0XvKLT97ww7MPWQRL4mDG4cEBX765JUo3jKYT1ouEtDMBlmRZwZnxm7Q6K+YbyaBzjK4paMLFbrsEi1uSMEBpdVmu4Xbxiq6nsH/2Dlm2tfesRhtcv0XLVlnNF6x1A78pCG7u0WghZZv+mUZ+66J6EG9GhOMMZVCTz1Li6ZqnH38PQ1GpKhOpWvhqjrsLr1dXHLdtlPGKvXYTTXVIg7cEpmQ0VjB1AXHMcpTR6vvoaUZL03j4+BG3939EZ+8Ix29S5TplliMVGw0TSzFYrWIcq42uWNzMYlw/pop0Wu09duwVdZUhsyWa3UQpPdIiwTNyoqWOb5oUaUQRm2hWRWFJRL4VIcRrjUbLwfRTNKWDIKJ3ZJJmGxRFI00kdVnhewYIlSSN0Q0LVYWyFJTlFlemKBqatlVIJnGJohj4vgOIbRBIkf/fBeH/j+uvXHvu7XR5+ug5lmrxt37yE1p2i3fOPuKds/cxyiZ96wBfa3G6c8b8bkWn4dNo6QyGPo4v+c5HD9GlwunBHqfHDpaxRhE5lqbT6VgYqsTWmtiGR6+zD3qB23J48HwfRXeopM1yk6HbKoou2D04olQKNHOXTaYQBCWj+xVFCRgKmtVlvIrYPWlxN87JcnhzcU+UlszWAS8vX3FxP6JQFMJC4fXdOcvkjvv1JaGMCeuIVTbjm+vPiOWc0WbK67s31KZGa8+iMXCZRtdYTRNhJRiDFW9nXzAOIhI1RHgxuusyXy2phc50lbIuzlls7tGdEtu2mY5DugN16wZdRui6wHFVPA8sO0PTI1rtBp6zg+P4KGpFf2+Xi5s5jtvF1SyqJGUyuUStbNp7bfJyzVH/ANew2d99wMPjBwTLObke8er6l8xHv2Cd5wwGA9b1lEb7FFkG2FVEJWK0MkN32gxOnhEsF6h1zuLmG4b9ZzQaLQzLpagLsnyD7Sj4TQPTtInLmGa/iWkbRMkMKTRMXSVNYgzDIDV0IpmziBb0+g1sQ6CWCl27Q52sqIMlhuygqy12e/us1hP6Ow6+aVIHC0oxYme3TRiGBPGKQiZ0el00TaPVdjE1E0TN3k6fw70zBsMGmmGCXnN5e4ulG+zsdrm9u9iCjmuLvf5DqlygiJiqSIjTitvRNaP5Gml65OqC5p5BZ7dFb8eiN4zJipJ+f58gGrEKLmm3urw5/wzF6nJy9j7f/8GHHO0P2fHex5H7aFKl3/TIUotgBWWh8umvfkmeLfA9h4u390TFPW/efsHDJwNmi0vSbIWoJZpqoKgqdRVx/XYCgOOprNYpnV4b003RVIt3n/0QRercjc45fdjE8jaMJze8/PoG3+7TbQ9QFQspNKpSodvtUpYlR8OndNoad5czXAsGnX00adP0JYIZJ0c77O/ukycl6CXHj5r0+x1W8xoUBU3ZQdLiZv6SWkZ09iTjkcbNhU2j1cQ0TXpdB9/u8O47z/m//p//LyxWSxzPYbFeMRpPv0W6FARrnShMcDzJ5K5kNs9RVUGWFagqiGqD18ow9CFZmaOoFYqiUVc589k3fHM5J8paqFoCVY0soSw29HoKatmgqrYrBJVQKfIcKQqOj5qsxyWK1KlFiZA5mmoRp0tarT2KvEaRLkWWo6k6sirp73i/fjmOR7TZ7jGV1VYtlycZliNA6lSlQy0rimqLDFktVvT7Nu++8zF5XlJTU1Wgaw7rVcT7Hx2xs7PHJkwwbQlyW4jmmWB42NjCmbUmuqmRlvE2PJZIdC3fBq2CEtswKaoY07TIihrVzPjBDz+mzjuoqrodZbkGtVAwVI3+wELXPOpaQdVAAJpeo6oakjWPzj7Gs07RjJJaFBiWSRqDYadkeYhvHyMVBU3XETUIBEmY0O9AuKqwbBeJQoVEVaHIYj549xEPTp9D3aSqBBoaWZUipYKiKLgtePX2FiF1VE2hqAvquiTcxDw6GhBHc+5vc9I0JS9Ssnzrbc+SGLSIKIU43ZqShGCLOapVRLWm6QquLiJQFfJkBUJDKDaLxYoff/8j3n/+lNu7Je9//F10peQPfv8Cw+7S6EpsyyXOI7K0RigmV9OX1CJFR6OqSgzTRoqaVVWRNQ958uQh3313l9VsgtGQ7O5BOC4wOCXMDG4Xt5imybA7pC5zpAlBFLLOVlzfT7m/H/PN20/JFQ3DccnCBTKUdI1d1pOUaD4HNUetIn720z/hi5+ek8kWGU1KZZdG9wFHx+8wHl1SJQbvnL2D24Re9xDNcUjzNsPDE3TjjGfvfo+byVum6zlhrtC1D2g7HqksiIWG29V52LfYTG8orRxN7vA3Hn8IzX18bYBeJtwv7lDTkgeHp7x++YrFesbF5TUGfaqwIli85OLFHfeXazRV5frmLaPJJZ/86s+RhWQ9mdFSHMLJPY5ioeLw8tUrHLfF64tPsX0HT5c4ik8WhBTpAl1zsI0dVsEGzSq5vwqobJ2byxG3yzUrLaPKTKJEp//ej+jvPUT3m2ySO0SxxtcVDKPk4KCBawVoZsgmuEY0TI4//Ak/+Pg7ROMX5EGFbkr0copR37K6eYknN8ST13ToEt4ssIqSbnONb4W8evlzos01yeoSZI3QSwxzjG/YPDx9zjoM2WQbOl2H9lEPr99hWiX4nS77e0PQfIQiUGqB53l0hioNp4uuN5lOZszGI2y3ZjKfsFitmUwviWYZq9WCupaUpYbjunR6TUynTVFr+J0miq5Ro1CW28/GX4LTi2K76lNVBbq+ZfEKIVAUjTz/bwIbREpdZfQHTXp9ncUiodXwsS3J44dN7s4ndBpdVJmRJBsur2cEUYRl++x1j7Y7MHLJLz/9BtdqUJY13W5OKaESNrpnkKYhi/spR6c1mdg6kSvVJwgXHJ/sM5/McU0Pv9Hh8y/u2NvdJjej7AbqjFSJyNIlXWvIOjynN+hws8wwdxTmN+dEUYKwAsqypt3t0NltsAxGWxuDNieM5jh2i87wXdarhN5ej/Es5uJqRsM3CKKIYqHQ7ucUqY+mKQz7p/zFX3yB7YPrGTQHHq/vfkm/38Gkie7V5GJNEIyoCwXX1lhOYixp4TsKydrFtA0m93c4uqQ78DHsgvlixOHgOd32kBfn37DT76BqgrK0GZ72WK4v2e0cggmz+AarHJDWCd2uzvJ2wfHpEzbhnDw+p9Mc0u84iHVFt9XlYhVhaz6b6RLVjVGFRrttkbsaUbqg23xIVhb4vQbreEWnbXA9+owoqBn2O4wmCTv9HnfTc3K7T7+rsQxDCrVEqSt81cV3O8yX10gFhg8ek05LimLF4/1jVpMb+t0mReFg2tDu+KS1gmZLyqVAqWIcYePoAxQ/x28dc3lzyVW8orO7g1DWmLq2RduYFWm6JtXaRJHK5eWXPH/nEQ2vw/1dimPtYLs5s/AtlaixWx6rJMHwNO4XF6zmC9rtNmk+J45Djk+eE4YLiqrG94bcTr6gYZ2i6jrT8QTPbmC4JUfDjwnjKXXm8+G7v8eb62ucRw66aJKEF6ThEk3pMGjvk8YxZdWh3R6wqHL2dl32jiz+4pdf0HCPqBKXVruNbfi0fA0tzLAsk7qqEELjwcn3eGGsyCOLd579iJKAz7/8Gd/56Lf57NMveHD0HMc0ubu/wPParNdLdnZcnj17xu3tLZqhULLidvYJTx78mCga0fT6bBYJq+U9J8ePSfJ7krBks5lgWRpJccfh7nvoio2u5ridQyY3K2wn2u5IaRaz+R1xfc9sFOK6LtPVms3iPfzmPoquIBUbS3dpNxpcvHnNP/pH/4iDw2OCONpaYxQVz3MRZUUppoRBE01vsFwuKTKfihzTTokjD1UpKYqKyzcrLKtJVqxQhEZdwcGxxfHpE352WaOrNpBRqRlFUWPoErX0KCoN29SoygopVEyjpNf2uBAmopBoloqoFKIkYLgz4PCwx8WrBMsXVEWFKTQ2wYLv/ubxr0/E+fQahT3yIsawVZI4Q1NUDg5aXJzfo6vvYxg6sA2TmabBcnlHnBugPtr+P5oNsjQFmZKkc4pSxzR3KCuJqlkIWRKEa4L4Ema97ToEOqpibQ/7co3TjtisMxRNQi0QNVS1xNIrimqJYTZQZRtUEGWFItVv+XcZv/ndj/jiy4ii2qY8NU1QVYI0jtGMhPk4Ii900GqEkNToRFGM49QE4Yy63KWqNRR16wY2bIewitjf8xndW6AaaKa2TbKbJkWRoaEwm8XIeoBpWGR5Sq1ux/+6skW/hIEKcnt7ErJEaiplVTDo2FBXiHqARJJXOXmeImWNQsLhQQ9NONhOE0kOiopuqkTzjNMHuxwNm/ybP7qiFDZ5amw94GaCqkIU3PL2bUSte3R6PXZ3etzezvmn/8Wn/N5/u8fr67cUlUaZXtO0bR6dfshqdUeartlFbndXNdCNisntPfsffsgymyC1Al3pkeaSxeKG1nCf0WSL2ytETLQJScuK5eY1j88eUMkSw9KpqXG8Jl57gCgDRKTQ6rbQAhen0WY2/oJY7vLx9//HHJ+2ef3FT/n6iz+h3dkhWN4RyQWe7WG3u+wMDtDMkOnVmNcvJ/ytv/W3mKzuSeOAOAwxeM77z99htbgnNGKkrdJsNJmOXpIv9znY/YiBZaJrtziNJrO7nI8/fp9PP/9XJHGL4fGQVaJw+k6DL7++ZX84ZGe/z2qWcna6R9crWI5WPHv3xyzKOSoOj86ecHc9ouUfkkV3tDot7u4v6XV05vMbmvYTPNcljO5RFYekWnP5xS0dv42FR5FkmNJhvUx4cPKIn3/2SwaWycngCUq3S5DoBMkaQ7d5/OiEi/ANn351ieYbaLJgfn1Fa6+Hawt2Bi1my4AknbP/4JDp+A5n7wDTHvDd3/wheRERyh4fPz4mupsybD2E/I4wGqEoPRxvyZ/+/i8Rpc9kNuXDH7zD/PKK3V2Xr795w+PnByTzOWU9o0hWPDoZoloF8/MrMq+BXWrMlxGhn7AqYoykZNDs4z1oEEcBIm0QKWDkDpouuJ+84JOv7wkih+9+929yfKhTBBV+YwddV8nyDWGoMF+usb0SRXQIgjWGYm7B5ipo2nZvU1V1qmrrSndsnyzLqKuavEjJ8/TXvMq/yvVXf6dW4zY0yrKgrAu8BlR1ynhyS5pt6B/2eXM9ZhrMqNUlrqfguxaijLB1gzdXV1i+y/GDhziNDg8efczZ4w+RqkOQFVS6hmI1sLsuUS6ZRyNUyyKpAry2Sym2gOIkzyjqAMurWQQLknqKbteM1yM01+Ts0VN2Dh6immfczRPawya3o5S4rNE8KM0Yv7OL6bRYhBOW6wWr+C29fpM0qcnyhE14T1VlTGbXZNmS9TLnbnxDKVI6AxMhbJbrCWUB9/djDobP0LCpkhZ5HrKzc0gSWlvIqQGrTc1e/4hHD8442D/m6aOnVKUKSmO7wJ2m+G0Xw9bo9/dYLkqOj97HNFvMlws6fQtRS46PHjObLoiyKVE1wW27rDYlhjXAbytc3F9Raxp7R3tcje64XozA04gqg9fnG+Io4/x2Scdpcb+6p+PoZNWS/d5jikTjbrZhodaMF/f86R/+KybBglZ7QJybrAOFtMrwO7v4HQepJ3R6bdbJnE1R0h0eIQ2DQpG8vZ4TVwmKZqMaNm8vzsnyBNNosQlTNNNAqjbzcE5Sx8SFidB1VnnGfXbNqAh4u7xnFFxQ1hlFKvD8IXvHO1xcf4KoBJNRyuX5WzQEddJiGU6QqoHnd7i6HlEVTRTpsJwUxNElwbJElpDGMZtliKxKkCE7Oy6yzGj5RxwOnzC9vUQVKb5pcn+1YDmFq5tr0jwmTsH2PdbBgqLIMBgQbO75sz/7l3TslD/7V/+a3/8X/xmb5YjTk8eYro3QOqzjPsvViFqoFPk2gXx7M8dQ9mg1+7z/3vdw9B1efHVPp3XCq68XZGmBYZhIqbNYbPA8l6PDU8J4SlkKdnvvEoUF77/3gC+++FPmq3MkBbNxwcHeeywWC9JkRbvlYBoKmgqPTt/j5uqWqtS4urpGUSRFalKkAg0Hx+qx239MkTTZ6z2lKCqazSZCuMgo47h3wAcP3yNblnhal3Az5fS4gWcOSLKcOj9B4wjLkximjWUZxHGMphr8r/83/yscx0EIQZKlrDZr6rredqh0hTy2KauEd54/xzWekaQltbKhrEvqQsMxTHTLphL2NulYm0BFGORUeYVR7BEGOSqQFimlHhLnAXsHPZLQRFN1qkogZU2RVGT5jCItqQubot7uDVm6TxonNFoGlul/6wsW6KqkLAuqcjsu/8uX7+xT5BJVk9uxruqi6TWHRwOom2iGS1ZubRimblBUIbt7GqtFja55aJpBJVJQSnS9YG+nS7z2sS0PsNE0DSkqqjLi+Gxr3JJSkhU5tVAREoRYcvbQZ71MKTMboYCiWqi6RpwEPHiwR9NrEkcVeb1d9DdtmyCI8Jyam6tLog3omrVtG+QZVakRBlO++70n5JkkSQNU1UTUBmWVURUFnqfQavoEmwxdcyhFsQU+lyrrzRRVVqSJjVChLmp0XaWWAtM0+eijZ5juDlIzqGRCUSRIHeI44XvvP2Jvt02R2aioyLpC1w2KvEKl4OSoievuEgQZRSkINhGSgixVMLUC18gpc51gk6AqkkoKJFCWNZZTM5/PEdLBtFzyuKYoM0BQVxHDQ4Xb+83296fCw0dPKYj4/d//hs3SJsymyHrBwcEhq82at9/ccDJ8iNv0qaWkqnMUqdN299nZ1UgzCAMN121A3SJIS6om+J7k/ZMuThWiKjGGq1GUsNsd8ubVS87PP2OxmNFqDPF8h+lsxGI5J4gmvBn/nLf3F0yCBd2DU3ZPj9h72OVyNqK33+HB432evPOcojLx2sf0jvY5fecpoTLhZjHHsPf423/ve1wvvmG0mKHIFk5TY7V+i6YLwiCmYQ1QdIVSd+j03sXRJdPJiF+8fME6ClnPYq5vvuAP/uhnPHv4IUUxZxmm3N+8YhYIGrspUkQopcujh21+9fmfU9PG7Hi8ePMC0/IZjV7y+psrBt0DTLuk1TsCy6a5s8N0Pf4W4r3EsRvEyZK4LLha3DBNDQ4OjjDsgNLKuZnFDAcD/uBf/BMeP/6Qgyen3Ly5pdDbaCzplDqxmRGKOUYwZb4s6XSO8Fr77Dze4WpzyyIqUByHxu6AorfLeSh5//d+wqvZS15+9TXzWPBnX1/yz/+ff8Fnn64J5iZX4y9ZTzWS0KSgIlraPP3gkP09jfc+foRlpNjamm++/oLj/QOMAvrdHYrZOaTXjM//nPvXf8bllz+noQqy1TXzq69wlQ0/+eA5v/2bZ7S9GVoekC02iGpFEcxwvIqT40d88Pzv8IPf+Pf58e/+Nr3dHYLQwG8NgYrZbEZVu+hGTa/fwpBdTE3DVBUqanRDo9FofMuV1TEMDdPYnjWL1R2WI9H0mk6nwe7uLp3O4K+/oFQNH6uh09vd5X4c4jX2tvsvikGjdcTd+AbdsNgEAatoydXdPesoQbVcZquM6WrEYpWT1zbNfp9VvOH2PqAzOKbVb2JaLfxmi7ps47gNqlLj5HTIZpGjKzaTaYHt7CNUQVxElCxYhheMpndcXt+gG5Kd/iNmsxmvX/+Sy9tfsA7umIymlOUVlRrRaA9w/R5hnnA7PidMbvjBD36ErBpMxktcf9vxSOKSmgVv3n7N7s4p7bbN2dE7WJZFVdaIyqI38ImSCFFDs2VTZpJarMhTMJRt2GG+mNJwn2NZXaRikVdblEVcgjShuSeoNUkudPxGh91hnyCJyQq4n92RVCvshsVyU+C4Xe7uKs4ePSYOBaq2yzReEVcxi3XMpig4PnnAaJLw1dsXeG2bMC8odQe1CXo7o9Y0qjplmU5J9Q1ip8ndtOQ+e0UoFZymyyJcEWdrnjx5gtLuMQ9DSpmzd9ake2BydXdPUuTcjZZsohqzJQjSkvvVhFW0BKnjNTvM4zGlYVJJkyC8w9RbVJXBukzJnTbnmxil02YWCu6nIxYpzNchBoc0zQP6jQPqXGW6vCbIYP94gKo3kULndvQFq+gcy3G5v7mj069pdHS8pkelFuSlxWefXZEmJcFmymY1Rpcp68UblDpEJBKjHKAWPQzZIA0qBDpJqDDYdek3jokWkoZnESwlqqoym80wLYUwvUdkxyznGcvlaxrOHqDQ9h/z49/8Bzx6doLddBlPJJPVDde3L6irDFcfsl7fo9obLi7ecnzwPkVRULMgDUuyrOL7P3xCUd6wt9vFsrVt58tQEayRTsoqCZhHF4TJnMG+xTz6KZvsnEKETOevcT0LRYu5uvmCbt8hTlY0/D5SmMRhRZmb7A1bSKHT7kmy6o5KrpmvxliuznqzAGlQiDFJWlDVNZe3L7Edhzx30KycNxc3xHlCISdohkKRd6lKDdXwSaMTFKVFmqaUBYg6pNvq8i/+5T/m6uaGbm9AlKa/hvhulWIqoirJiimWWVHwiuvbL9iECYp0EZXNehVi2AmbcE2cZqiaJMvSLdxbCp48fcynv/wzLM9CKCV1bRCHPk3PZrL5hHkYYuratujTFbIs48MPHyMqi/vxCtM2EFVNmZcoQkM3V3z5+SWOa6KgUZQJSZTQ7rgMDxq/fk1HGa7jf6tqzCkLhaxYcXyyTxZvcR4Auq5vMVRKjO/rKNUuApBahaLUlKnC/l6fZsMkiRwqmYFSoqgVcVjRbGyh5cFKRdddiryiFCl5JlGExeHwjH5nHylUhKioKoGhqRRZgmsJylTFNBykIlHEtimQZwUfffSQcLMmDEBIDakU6EYNUqfXb2DpFnli4PkmUoCqb0fSVRnRaqm0vT1sq4FQQFJj6BaqVNHNhDjNqEQDRVfh24eGshDYjs588Zrr2w1C6kijRFElCIu6zCjyO9brNVXlUn0busmzGgWTNAnJyinTWUiQFORV/q2hxGA2XdBpt2k3Ha6v1jieu03IagYAZZbieIKyqshLFSkV6qogy0OCIEJHR1Ez0spHxUPRCo7PnnL24JDXr8/54z+65cPvfB/TkURJTS4USmVGsLxHqUx0TUdRJHVVoZsdzp49R8gFw0Efzx0yW0/IiwDXdLg+XzAZxejGHg3/EWWmUOYbJpM37O7s0HBbZGlEEMYEwYam67BersiqAtfqYjZKluFrfvnpX7C6P+e/+L//73j12T/hzcuf8Woy4vVogta0SfRzzu++ICt1+t0neJ6HaJqEVRul9Qh3p4vSLKhMk6DKWRcFh8/3kbbGeDljs4nZbFaM1xtsO8EzK8pKMl9Meffjx5ydNghynVZb52pyjut0WC/OySNwrDZX1xdkhcLR4UOieMN8vCJP7ijDDEuNqNM5o8sX1OUt8/E91xdjbkcrdnfP2Ns5o+HbhMuQptPGNVU2m5DDfpeL66+YzFPyyqI3lIxWL3h4/Ih0tsBUHaJ8xWR+w87wOb1Dj6+/vOSX33xJVj/h9FmTv/jZH3J+dcmmikhjWK4t8BrUekI0vma8vOb1my8xHZvBoc1yfoEZqbz34SkfPtyl1RDo1YaXX/4xL95cgN6h0TcRdpfW3gmlVpGXFlIzabQegLYmiu+4Hr/GclsEoUGclpSpyvMPvsv17S3TpcKPfvghO02V1Zd/ys3FHbbnkoxf03VBN+aceAF9L2J69XNkfkXLntPWY2QwxqiXFOGYyWqCahmk1Qqp2qC0qPWaIMtJCwvTcikEJGVJlGasw4ggTsiLChSNTncPVXMoa4VKQpjEJHn0Vy4o/+ocSgxMp8EiDPF7bWaLKevlBE0via7WNJs7dLs9Lq4v8RsNbsf3VNUUv9FACIV2uwtqjm7ZTObbjsskeEmwTOl1uri+xXKzRNNhtlJQLY3VSmKYDSazOVIzMXSP+WaJJ3yW8xUPzoYslwmNdkmnOWC+vGQ8vsdtCHRTYW+4S1K8Yqc/oOGegDEnSFNKUaFoCoPee+zv/C7oL9GMHEt26A47XF2/wtT2ePLw+5Rlyc7OKavVHZZhoigeO8Mms0XG4ycHpElFFK947/3vcHt7j6IILDfm/8Xan8TItm/3mdi3+71j74jY0UdknydPf85t372v5yMpUkWKlIpVBRdsyB7bsGGg4IkBa+aJYY8Mw54UjJILqoJstUW5JFVRpPTE7vW3eff0J/NkH5nRN7vvtwdx9TzlgAmseWRkxj/W/q/1+743vxxz7/57KEZENI8QBANFheV6TpgsqVo2r1546IbM48cPeP7sGUWzhyRqGFWFKPEQVJGlG6KZNa4mt9h2j+6gz2waEkYe19MpigpiWbB0FQxLoN3b4yZ7i+PP6G03GU6vKNO3mJqOUrOYh1MsuYMtdjifLpmvbxEqHYTSQ3dEGoaJlOT4vosnQJKscf0JYqli13uQLHGdgLZtEcVjTLUBUsBqmVOt6QiSjGKI2O0dbq5vqeoVOp0GWq7RrJlkFYvhZEQulQiqRZYJDKdnGNEYSdERtZzjq4yd3n0CZ4ZhlBTlkhevLzi5eMvTR0d0Wu/zxbNn5Omc/f4hFavHT3/+Of0uIBcolZhcKNja7VNeluzv3+N8PEIttvDWCa1Gh+nihoq9MTslssR8MUIUErypT7NZ43y4YFvoIikRd3e+xcXtV+ztbbGcJ9zMf8ag36JWOWC5XPHkve+xjlNMaUKu1MmLlPHyjFKIefrkEd4agniFIm5uGbXKESkujU6G594SORm2XUUsJVS5SUUTybNzJEkEcnb3dvgqjAmiCUZFJMpeMxrbyOUWZRbR7RiI0iFFppAVE7Z3W+SZyGLuMpq+QdcMDKVJHI9oNHeYTlwODu6xWN4S+rC11ce0DF48+xEHhyU5Pnazh0iV09Nf0G5BJEdczOb4rkdaRkxcgTTxCCZv6fS28fwDsqyGbrlIRYUs3SjvLi/GfP7sX7J3sMvZxQgBiVLICf0AARmhBEEqycMaai8mDNa8ej6jKLt4YYCqypSUaOYSRe3guCmy4QIxZVEniEZc3lxgKQ2iVEcRA2QxJowENGJ6vQEXb0yKIqPMC0RZw/OXuK6OXdsDInKxQC4FsiJBEUREycdzUxTxIXE6Q6ZCEkaE0YjpYvarM3E+A0kJQJGQJZUizbBMkdFohFA0KEmgLClycJYuD+732epGfOYXCEpGlISoskaeQRit0LQqYRCjWCJJnqKIMmUmIUhLqkabImmQCSGaWqLJOoEbIQoxvpOjCA2cLAJpg+YpUaAosW2V2TgmCkvkWoGUCciqSlm4dLoCWVxBEq3NWKssNruFoY9dUahWa5SZQZrl5EVMKiYYikXgO+TFguF5gyK3kbWCMEowVJPAcajUItZORpAIJGWALVmIZYEoV1jOv8JdFyjKNgWbnU5NUfHSEhnQqwVvTq65ua2it7vEvosoGfh+SKOhUbUtvvjJBWmxSxD4lGmGZVXJ0pBWs0USpyxWOW7qYVeEDaJFkimKiKdPHkA0JQw3+Lso9yiKjNDxaFowX3i4iU2ZiWRlwiIu2Nk+5Obihs8+v+T3f/89DNOjVoXpZMn+3n0EKUAocgQEFEUnSRPm43Mqe4foisgqneO6OVolREgsVtOYwwcDVn5MtFaYOG9J/SmGVKfWs7i+PqNqHNDqlKC4uG6KEQXs7vVI8hrT2Sk3qxFWZZdqtU5FqVDttvH9W3TdoCnWWA2P2dl9iDMXSZOEW/ELikJF0TJq5hbL1TE/e37FnYM9Cq/O/YfbfPnln7N+N0EqbMLSodbdol5rkGaXSHYF119Rq6qgy1hSDUWw8BSJ0ctnFEYFexBSzhMOtna5uLlg7Y1ZOjmzaR9FgK1uk05DYrWYsVi+oWW1afUCxrcrOsoeuXxOHposE5/CrbDV0pkMPXYG28TuBCXUeDzY5fnr5xzsDdANgyBccvJihVmx0cSMNLhmcVGi6DIEIMs1CkHh179T8q/+x1+QKq8QJlXGoxW6ViVaZgxaOlkZcDE8RSxS9naeYs/G9LUqPzl9zttA4Pu/8VtUduaMJnOuV6esVyGd/hGDI5uV4vGTX/6Qpt0iWaQc9rawW00u3l3T7Ar4qyXBMEMVawTJGrNW42Y0RxXWbHUaPD89YRbEqFKT2Shm5S148eU5Rx+9z27Uwmr1WPtLgvmanacf4y5G9Jo2RR4hFD5qnGFIOZ4/Z+7pGB2TIi6o1TvkgoeqyqycnE5fJYkF/GDDypVUA1WvoGj51/zUbENpkAo8z8UwTPJs85AoSdJff0N5cvYW27aRJY2KZaAZNSrVgrx0MZQGt/NTFukNkmESpQKSrGFVayiSzmK+Zqtfh1KiLMDzApASDLNAk+u0qwNunWtKqSCIM+JiTlasEdI1glZB0SJkaZcCh0IMCLKMQkhpNHa4Hv6CmARHWYMSMNjZRVZEEGLCsGQ0WrM1aJKlK/rtjzm/+AsyYUi7foDj3/LP/of/IwcHD4nDCmpdJopT6nYTw9CwLIHlekSanaCqNrbdRFYqjCeXRFFGsHbQNBNdt3l3fouqGrQ7EqPbJc12B0FZEEUy/d42njejLEu6PZs87RP6BTtbIp1Wh+PXz7GbVVbeFF3pgNLBrneIE5+VO8FuN4nSgNKc8id/9gpDqtJuNzm+nGBUUnbaLZz8CjlXmc42C8DOOqRwIoK4oG/vkxQx6+mKVuuQ1XhBok8QMpVBew+hkIjiW9RKl/HcBTlGE10EwUQSYX/7IeP5KUa1ZDrNsbtV8mKJqtQZjSbY/ZJO9QGL0CEOfe7fvUfoGWThJYYtsXQztKaD7xdMFy56TcAPYgxNR1Ij9re+zyJLaDf6fP7Lz7FbEit/QRKFFHJJItm4fkitbqOIbebThFqjjbcqePVmxPOTMw63e6iqznji0R4YpJnIl798xcfvf8BPf/IX9Hbu0Gv3EbJrTCtk53Cbn332I1rtA9p7R8ShQF7EDMevKZSY7nYLQdbZ3umSxhkfPf4up5cvWTpzVFnbqC+tAEGQ0GsWRexwObxAknVKSaTQRMyKSJBmePkcVXMo8xaiZCKJFRbzFe1uldk4pqKBIBb4bgHkPHyyjSACwubGKU1TciGg09G4Or9itVjz8fu7mBWLOCpxJyVb+9v4Xo4t1wjcNYtZTLffIEiuWS8zyixHF9usvSFpJpDEPdLQRpAm+OmQ0XlKo2NzfnXKw/vfQVUs4iTBrDZwvQhJkYAKtl0jz0LatQG341M+efopzryOE3cxrTpRuEYgJi+v0ZQD/v4/+L9w78Eef/jf/0sksUKWl5BuQhSiIG8MDbJMmIzp9g6QaJDGC0olwg9zFE1mscz45Jt3qNZ0ylymTFNUSSOOCgox5PGTI4YnJkHmYmklZAZpsKLXAU3ts/Jyanq+AQJHOapa0m21efP6dBNWEkridE3V7LIIrmi2FS5PDApcylyjLAoU2SCVUhxv/KszUdcekCOS5gWSJOEsHQY9CdM08fwQuSKQJSViuQnuZKnD6ck1Io8pJJEyVsnIKEiRlITRjY+mdYgyGUmRKYG1M2J7p6AsBPJYJpH8zUg2AN93uXs3plqVyeIKZaEiySWIBUWRIhQCe7s9FreTTWo1nFKRdSg1gsDBDUM0pUYSFxRmQJHmqHKNslzj+UuSVKQotI12UpRRFIHYT7HtGt/6Zo8XnxsIpY4gJoCALKsk6RKzlmM325zfZlh2hcxJNtD6tKRixaxXC9arJkWpIKBTJBllkUGWg1xQqw7woxAt2ZhIJBHS2Ke9rbK1s43vTolSHznTkfOSOHEIw5BWU8UPfBw/R62r5HkIkkCR5+gyeOsJ/lLY7KkWxdfJ9JIgmmFbJd3+ET9/4WzS2hqIqsrdoyecHT/n9Zs3/LN/0uLekwpqXNBSNYZnY3b3+lhVixIQhY33effoHjO9Sq1QuL5+g2A2ybMIf7igrFaZz6AUZbJshaQKVIwGflRyMx1SrXQoi3KzH5sXdAcGi2GE71U4m5zQlmvsDw4QYxutpnF19Yb+0X169R4ECropo1cbGNUAQdZBLEgTkTJr0O7oXL0bM2hr1O2UdtvCVi2ef3GOJEsIYgu9LlAOwbka45QmkvJN3OQVmtFhHaQ83B8gKykXx1MkvaTzoE7sZNS6v03ceM3anTFZRRwe3UdbjQniEYEfY9QONnt6YpO3V68YdEQO73VZTSbMrpbsdg6paOCsrlgEM5JIxfcV9PQWXbb4td/5G/zpj/+IWm+bJM442N/nT3/xI44evA9liWJkLB2XUp1QM2wa23W8ZAWawNSp0N/aIxFLvHlO/2CX7a0qxSzCzyViJSILCgaDbRbTS7RBjdfDF7Rtm0KJ+fLLn5AHBYPtHiMxxGrWmbo31AcN/FWEUYi4yYKmrDBc3CIGPpWqiaDlyOKS+cRhe2tAU5NZTqc0TIvd9mPKYoJHhcP+Hmav5OJnIw7ubPM7f6dDUkrk82te3szo1fdwFz5/cv053S2Zmt1ivhhiiA3qlkbMGFG0kSoe0WjMdOqztb9Le9Ai9T203EXOBAy1ybuLFMuyyNmY2vIiQ5HkDXuySJE3bkeyikCcRGRJ+itN41/l56/OoazoLFc3GFbK5dU7ktRFr+gs5xGqAbKokKcimmJAKVGr1ZDkAk3XEQSDd+cvOLt6yevjZ9RbIquVQ9XaohRVzqfvNmlFoYpd7bG/06Gi2mShhKZaUJik8QyEzSEgCwn1usX51S0PHn3KoHOHqt4l8urcu/s+WZbirNZoUpea9ghNFTFqOueXz1GkgI7dIwjn9AZb3Dv6NqJgIokWi+Wakoy6eZfFMsSPriCvAjKCsPEm/4dYfb1ex7Jq6JUcxxtj12UajZLLizFCITPo9Cgjk/lswnD0glZrm9F4ymIRgiCh6RaO63E1nFKr9Wja7xHGYJgVdBUuL65ZOmtkNYAooyJViN0lmiwQJzmqVrK/Y9KwNaJcQi4lAl8mCWeIiYwXrLCsNmmW4BVLUFJkUcV35uimRF3dpmZtMfeW2HYVSRK5Gn5Oo16hYbVo2Hcw9CpxknN2NkbC5PbSp7vfRdXr3I40LLOFJCmsbiyCPCHyI6J8yR/98IdcjE/ItJLP3rwm16bcnFwxGY8oZZfT8zkXZ3NevPyCl++e8+4q4vpiwsnbMTJtxFTh7uFTlIrN9WSCqra4s/MRreoRvq+yDhYEK5E8FKnaGvVKH1k1mE7m6JrJ4eBjomBEvVFgGk0qlk4ZFFwNLxktzxALh+XoHFk1iJIQopymDUF4yd6BSewaCKHC7/7gBxC3aGxXeXX1llhw6Q803n/vMZpqEYYxR4d30EuNJMxx1yvqlT0UQUQV6oxHc5zgBsddEcQScZIRxQWOO2fhvGa9WNK07iCUOVXVxKyWiJWS06u3CIIMuUBZlDjrGZZooRRtGnqHhztP8dcRt9fXCImKpKQsJ2MMNWO2esditabbHZDlCSImaRZRMeqs/RnX1xO2dtsE0YJGrUGnrVLRVe5sfUC3WeNwsEfDVrm8PuP4/OdUaxphdEOz2UQTTALfISoyUiXj8Xuf4KZNHGGPrf0BNSunohtEZcrW1hZ//j/8MR++X2U4vdisaYgieRFvLECCSJ6nG8ZZKSMmEmYTGq0GYaEzX68hLMhjlSxISAmZjBPIIooywy0i0rhEy0TqbZub2ylNRSNIEnKxRCoL/GzEYhYiESJoJWUEZSmQxRmSdMFotoRSQ0g8JCqEWYIop8hiSJzYpEVOFudIioTjuNiNmHBp/qri3CAkpWATdAlih919ifPTMWkIeVFSpBmKaeC6LrYpU6/ssQwnmzOl8JEkiSyR6O2GjJdTvCxDFlOkXCDKfLI44v7uAarcJiCHooRyTZbH+C6k0ZCVc8t8XSJpESIQBzlhVGJUEjrVEt9XSKQYRVQoIwiyAFWT2OnUyHKToEwo8xwZiyzz8AKBu9slWQpubKAIMikZUiIQxQHdPQPBF7lZLUnkTXreUCTiPCYKBWwt52acUzFM8jihEGQKWSMJHe4+aBJmDcJMR5Uh8xMywSWloGJW+eTpPlfnLoVSEIcZEjKZLBAVBf2mipmsGc8DSkWgSHKCJMUPJcSi5OhggFiqeKlGmblkZURJhu+X1GsW9w8rLP2YQgwRiDaNslIh9CIOBjXKEJaLHMNU0bQmulJg1CW6W0+QhQovXl1SN0SeH7+mauuslye8evM5x6cXZEVCnKQIosjFxWvWsxGCpuFkGl2zQTRJ6Dy6Q6dho1QSau2MVlckzaDU63R2utStDlkmY7VSzLpMHmVEgYCThIzmI472tmi2TKrKLpVajkDO0b2nZEHEyvH54U9/wtwXQKgyuozwkoiQGEmXWC1uyALQdFiFOYeD+4ROwNnNCbIZcvfRdyn0ECmGar1GobnM3DHj8xfEaw9NVfGCFW9eH/Pjv3zJynORpBq3t0sWbsnN1VfYzR3mQw+728HuGBRrkNOAVThkHcXMbhfEmord3GQkTi9n6LUuCCmVqsxoMqSqdtjrPEaVLJptEz8qmKYpn706p1ap4uVThkXKIir47m/8gPee7rDwHN68mhGT0exv8e56xovjK86Xtzw//inDmzeMVufE84CyEtK2q1T0NpGUEcVrJKGCWq1zennFMsk5O5lydO8xj+49pdnbodM0kLQI09LYa3XwvSWCnvH68meMJte0DlTu7G4j1wWqlkhzxyaKpsyXK7RulW98/0P0vsFKSdh6ss1O32Q8u+DVzRi5qjEPJ4SRw+6TCuN8zRfnr3n+/B1fLieskhbjLOBysSKTHKbrKb/8/JTbmzHP311zvR7hehWSWMBzc0LZ4tPvfI/Qz3hx6XA+uWR8tiRJ2sxWa/qmRulMUNIz5GyJniVU0oCjXouDoy3uHO3T78hoZcR79w44PNIppelff0Pph1P63fucn94ShC5JpKIINQ4ODpiOQ5bzmHZzm/UqIM0dFC1BkS3mizG7RwVb3feoV7vs7Q8IfdC1CkWmQalh6j2ajQHtzgAvWHF5fUW/36coM0ohoCgK0qQgyRY0Gx3KQqRet6E0OD4+JvBK7h19SBKX/OLzP6ViSHzw/qe4wRWataBWbTO8dJnNb+h2tom8Cmkk4HkOnr9kuZpQ5BWkssd6vUYxVvQ7A9bOkiSSqen7hGHJcLik26ujCruEPkTpiCwPMXQTVW6yciKiJCEvIYpz4jQDVKzqgPVCYHt7GyhIiynzxZhu3yLJ1jRaGvPpSwa7IqP1GYPtO1hWSZlkDAa7dLbvISouiu6jiD3KzGJ04xKHCr63wvNWSEIFz/NRjIyqtYtVM5nOLwiiW5bOC06u3pArPutoiReFxNnGSdyy6yzWr9Esm/39v8l05hNHUGQll+cvqSgSll6g6y38yMEwWqSJSiklvHn3nCxT0Ssab89/gt0RaTT6NFsma/cGWU1wHIf5JCdKM+I4IYhCSsGn1ekRhzY1q4OX3NBuGazdG4yay9JZ8uLVzyiFgK3eQ8aT14TFDFVXKMUAqzqg3uhz9GifVr+BWFkzWd1g9yrUWipuvESzDFbuiHfXLzi6+z5aNQd8ut0Ka09i4VY4OviYO9tP8MMLzs7O0NMKSqygaCl7D9q8eP4ZphpxeXyKRISzWm9umhKVD9//AQ8ePKIsJdqtbar1HRTVYrI8xq73KIsEzfBw1wusiomqpPjBGkEIefXqM5JAIIlzZDmiZra5uX6Lv5jhTyf4qxuyNEYQQFVUDKECxCSBx4Ojp7TbbXZ3jtjduYuil5SFSKUqc3HzGllW6TTuEwQB19fXkNvohsF0+ZI7B3sMOl1Sr2CvdYf19ArF36YhPGR4fY4iqsSJQpYoeNElvfY+RWZiVC2W60v6O03MaoXd3QPiOOHd+QkyD2m1e6imSMXso8o5e1s9hhdLnp38d3z63R1GwwhZVikREYX//1BElDZVljmCYvDRjkZP9nBXAoVUw41jVv4Uoy7y8P4ON6MxgqiQpxlCoeL6a+4+6hHGc7I8glKkLCXyvCCNcu7efUASWeRphSRNidOUtAiomBLNVg9RtEGCvBCR5K+tE2aBIluslv4mTKPJxFlKnmcE8Q26rv2qZEFHKIQNozIXMJU6nj9jMl0iSBJxliOKAmEQIJQlippxfLxG05uEsUNWKAilTp65HO0/RC67GzVkIW1CLJmIocs8evSA89MheSYhSQplViNJfRzvlA/e/5ThpUCaKsiCTZTEqJpB6BcoSspi5VGUEnmekaciGTlJlCILKWKpcn6xQNF0QAAhI8sFhDKj063huTGKKpGTUxQ5BQVyLjNxXrPwZYpEwtDWBK5Mlm+MG054jaZLpIlBHOcbjp2YURYJWRJTZjFNu0WRi8RZTE6KIpvEvodURtzOLlmsE5ByJEkgzQLy3CWLcj567wk3I4/VCgQxI89TojAjCB0sy6TeKfG9At8LkJAoEhUyiSAI0CoieqXPzXWKJrdIIoE8E5BEkapZo1qXeXv8Ek2vUpQSxdffO3laoWJJyLrD6cktv3zl0Lnb5Nqb0rt3FwydqqVAIUCRkaUxeZgRxyX1WocH+w0WyzM6Wwds12wU0yArRGShj6busHPQIy+vkSSHwFtjGAKd5i7zm4DRzTmXF2dsdQ6pVyxss0ospQxnI4JE52r0lpPTUybza85Or2i1Gkjpki+/OOFmfMmrk5+jhBKLkUOzYfD8+Be4yzVJHCCKMu8ujzkbHhPFPl/84qc8ufMdytka173h0eAOFU1GbcdMZzeMZm/I04KmfcDB9kO+8f6vk4SgqTZpGZBR8ur4HdW9LpVGg5I7PP3+J/QOnvDtj/6AyM+IpILp7BrN3qVb6ZNlkGYegqfiZBn93h6SVTJzbtjZeshO+zF3to+w6wW/fPtvWacyd3cGPLn7mO39LQJf4M//8jVhckvVDNiq3eXVs1MKIcLzh5u/p1bFWaWUUp0gmzK+zrm6PObP/uyHuK6HpMisnCWXo1NuRksMy8QwbY7fXvOjv/yM8e0Kxw0RkHn1/Jparc90MeLzzy4w9D5pWTJeeLx6d8LVbIwTJJwPZ+g729iHFvagz7pIsK06jw8PEKUUJ/bIlYhqX6PdgrV3wWKx4mq8RJMSTKtGvW/RaW/T6ZWMZj57D7s0Wx3SREDSUkSpglWVmc0SXr0b8vZywvn1FEXQ+flXP+F6dsHq0iVZe+TCnOVkRBmsCIMJW42MLUuiJl6zY8/I5ZdMV8+JLn/C+MsfU1FuEYU5uTckup2j/dWpQX/1hlIQSsJogWXVNlDr2xmLxYzXr45ptwbcv/eQ5WpKXq5ptCXarS18N8OyLGK/hqwI1KwttjrfwDR6ZMXmCbLRsml2LHQrJ4wcRKmkajVZLjwMvcp0OoaiQq3aIUvZPM1ULNbOBFnzcP0ZcTblxesf0+01CP2AslBx1yW6ZhLFLm/evKDVVdjZeoAi1Wh3TUTBQMy2CPycNE1Zeq9ptCVqVp/Z1Ofd6XPqlQd0WzukiYhtt8hTnfUqRNYCgjhC0UN63QFJkpEXEXHs8+DhXXRT5fTiDdP5OXZT4/b2Fj85pigz8iIlCGS0iookKSRxzpu378gSleWkgqErPH/5MwShRK9UuLpecDM9wXVCvEV7owaUVkRhzqB3B0N6QrtxFxGb7b0tZNXkavwcq96k135Ix97jcPc+FUMjDEeoKqRpShivOD27YjwJsGoaoRMxvb1C1X2SKCIvIuqNFm+PT1ErCY43YrlY86O//Cmz9SlWQyDNIzqDPkkZ0entcXoxJPQqPH74TSyz8TWrUiLwBGStYL66Zrlc8PTJdzg8uM/RgzphMmV//xABhcFgm0b1iKM79zYA884dup0ONb1LEsUoYoV6rc34dkKe+2QpRFFKRe+wv9snT+Wvb3wiBu0jOu0+CGum0ymSXKXXtuhYD0kzDbMhcX58ShlG+MuYmITOVg13KrLdPkBVBIaTEUatQa1ZJfKXaKJB5IGzDhjevqXIJdLQ4nb6DlXXaDXv0m71GQ6PUbWUnf4D/HVE4KTUzUN0VSPJFjx5b59mvUfN7HB5/YoomdNsDahoFYRYxtR6yLJCXqTkeQp5Tqtdw6xJLNcurr8gyG8YL85RKyboMUVRYW/rGzTsPjPnc6LYpVl9gu/F6EqNg/4H3Nm5T81o0G8N8Bwf27Z5c/YSUc/od6rMVmOMKrx693Me3f0u9brCaP6COKxSt45YrUMWKxeEmCiJ2Or8Lls7u+iWRN2ogephNRvIuchP/uIf8cE3qzx/+5bT1zGqblCWwsZhC5uAiCRQCiV5maFKLv/539lDK4eMhzeU+XiDAluX5MGSs9O3xLFEUuZk+QbPkwsxTjzk1etnXzdNJWUhURQ5kpJQsTJuhjN0QwZENr1sTpw4bO3s4roKpVAgSBppniOgUDETvDXkmYim6AShC6LAaj1jb7/Dap39qqI4RCgDREFFKHXKLKQQ1iyWDpIibzhuysbJK0uQFwuUSpNCEPGjkDiOEEWRyHMpE4WK3PuaBVeQ5hG21aEkQa8IVMwB4tdwcKSEMEgoC5XFakiaF2RlSkZAGksgFATBip0DnTjSWS5AVESSPABFJA5i3nu0iyIaFHmVOP0Pe1QJlAoUEUt3xHyeUhbC10GfBFlWiMOUInd5c/UcRekR+wKisCYvPZJUQzNWaKZIXnztKy5k0jQnS6FIYho1iSBMyDMZSdxghcpCRRZLGvUC1ciQlTaiaIFQkBcJQiGjiSq9nso6CYnijKLIiKOcsgR3vUBWCjrdHc7PVxSFSBzH5Nnm4aUsM2Q94Id//mPSVEaQoSgKFFnf8CNl6PdraFqXKNx8H4iSgCxrrFceilryX/zv/pf8wR/8J1S1PUzjiKvrgK9enBCGObqqoYgiYgllUVIIMqYu8Od/8q9ZrySupiK5UmeFyGR2haQteXnyC372k2eMblbIqsFk6lFmVdSaxHIOagWa7T7373+bdTBGUEW+fPaWNLMxGgnj2RCJBlHgYwp9pEzGFCrU9QHf/t4T6qbF06PfwJ8UnF9e8PLFCQfbj7C7DUShThisMYSSYB7izFeEQcJXb35MWNHwCgtFsxjeHiOUFod3HhCGMr2tNooEx8dvuLkaU9E1KEp6AxtJ0TEtG0OyiJMVX735Y27GLzk9O2ayuGDuXdPo1qjVYD6p8uib3yURJXQTuvsFl9cj1vkETcgpigw/c7hdXuB6Iabeo93tUalurGZ5qrNar6m1VJSKRrd7B1HUefvmnJWzxo9yTE3FrqqIeYZVhmjlikFVRVd0GtUazZbM1vYumVCgmCmyLNPZMnDWPu1Wl6JIKIqEwM0IHAFnHSFKGX/6w3+FTEnbbtLrdMnigouLC2S9xK63uB6/Znh9wcXZhMsvfcQwww+njEfPmc+nGKqEoErkZcDeYYvl/Jpet4YuqHz4jQ/JghWeF7Dwp2zv9tF0ifc+PWTmeKRyilXV6fZbtLpVuk0df7VAU0t6vSrf/977rOcz4qTGg8eP+OSJzYeP7hPlOmmQY6gG+ficy2uX09sluwcfscoFtgePmL87RowEqHgEM5+rl++4OL8mTEbgBn/9DaVpGWSFx2CwjSwZHB0NqJgae7tHqFqJgIpdb2OZLXTZxltlxEmAJGdkcY0iV7hzr827s69YrkZU9NbmVkHIOb34nCC7QVREZEnDNLqbvY9cwzBMHCfg6mqKZuiEweaJIohXOG7AnXs7qAYErkLgBRweHjCZXfDizZ8iyxL3Dr9N3TpEEWwEKUISdQx1gGXV6PYNYGMHcZYiy3lInm12u7rtQ8R0lzTNabd6qJKJIDr8/Gefg+iiKRtn5tXlOXatwXQ8R5MsFhOX2M+4s/eUTz/+LQzd5mB3j5KQPE+oaH0kGayqyu2lxN7+ALMqEBY3LBY3yDmoskspR0ydc8gLMn+OInYQRZgvL1ksZ3S6DZJsRZZl3IxfUrUrjIcwvB6hGjHLKdwMJ5DrvPhswuT4Bn8UI8YJoTNjPJyiqiqe53NzJXJ1skQWMtxVwOuXl7jrnIq6j2l18X2D2+ENv/4b3+Xx44fUzTs0qx+wd7DPzegNW1vbWMpDPnj0m0SRx3B4RbezhSK1+Pijb2HVJB48uMv+/iEfvf9tvPAtq+Ar6nYFQSgp85jQT5lPrylSGUqfR0/2CdyIm+ErKCOKKMCSddLAY3+7zVZXZzUdUdUtes1tpLTFTq9Gx9b46ou/YDl7Rez6rEYukjiiLNf4Kw93vsTWLMzS5KC/RbdlYeoSsTMhSUIOdraJhpec/uQ5W12DtXeFKkeUmYyipIwnl9hNCVkNCMI5XnjD0h0xvh0hCiUVuY6YGyTJmMTTefrgd+h2u8RRQavTgFLDcVxUI8SqqnTaA6IoAkHHT110W0PRNEpKEDbKvqkzZDFPWK5SVv6YtKxSaBGJmHIzXVNrDCBTCLw1tUqPqtZCFQusSooorOi1+1Tb+3zx+gVGQ+XyZkiYrCkLld/7/e8RJTMKSmy7T3/nIZWqhYBKv73Pwe5jslxEQmM2v+XozgPyPEPK92nUH1MKEjVrh4pUoZQUBrsH/Hf/+P/N7/72Pq4/4mLyc/xwgqYpm9Sz+LV1oRQQECkLyLKMIjf51z8Z8Q/+6Jq4NMgT8OMYx/dotHUOdw5wHJm0iH41Og+ikFJyaTf3iRORJA4RhJI4Tjb/M8xxvJgkiSiyAlGGJMowDZn5YkIciUiSRBwn5GIGuUi9plDRewiCQBzHCIKA47gMBnWaDYPIq/yqytxAFEUEUSHOYjJGVC0BUapukuw55HlOGMTUTIndgxbzZYRuSOhKBUEsmS/GGKaIVS95d3aJLMuIskCWZbhrB8tUePn2S26GLhWjSlFujBaiYNCwZVrdkOnMAaFGGPpfKwZLZEGmKBa8fnWNoGhfQ9t1srIkSzL89SmXFzcIUhVZ3jRDglCSpAK6ktHt1/FcBcSNr1iSFLwgQjZL/u5/8r+max/ghQmSpiPlCkWssHYXWBUJ34lZLzc6S8rNe5znJVG0Ge3NFhmiLEAeU+YJCOLGzZyMmE1ilssMSdQRBAFEhSgsSZMVjjNh4UqUkoosVciyDXw5L2JEdcnJ2SmTFSRZ+TWbMt2EbgKHWjPDD9fkgopigGJsXlMc+hSpR0UXWC1F0lxAMxWKXELTaixXI77x8bf47rd+l//8f/af8Tf/5h9Q03+Lb3/6v+W3fvPv8Tf+xv+eq+saaQ6iIlNmBYNmByWXeHR3n/lyztagx/TmJf7awV+7OIuChtkgy2b0+l2kskMaS6QsGY9dvPCaxdzBMLap2CpLd0IUJaR5hpwl2NoWmpRRr29ep2pq7N65T6Yl+PmI87O3NGp1SnfN+ein3MxPGeze5+bmOVdnN/j+nPliSVoo7O8/RpZ0ZGnN5Ood43yNbZq8uHiFrOgUsU+UGWztdYiikDfHp+gVgevxZ5yef4mIwNXpjDgMGM++Yu1MaTUOuHO0RxRmDLYMknRCr11HzFQKtaDRdDg+e8dOT8AULIq4xdGgQuxcEAYjBEFiMhsjmwkFIaObGbpcB1HbNFuzM2JXwJuuqSgly1sHTVfQKrCOF6Rqyfl5QpCWLJw1tXqHo4f3mY0Ver0WtZqNqe2zcmIWqzVZoKIrFaazMUmyYDoeU7PqWJaM644xDIFmw6Tfb2LIBd16k1/79kecvnnFejrng4f3qAgqq5s1TzpP+eTpI+4eWOzvNXDdEfNJTtXuEfjnRIFPRdHYHmzhjjwCR6Ol7yGmVby59/UZJlCUOS+OX5DmIo53jRskpFJBe79OquWM1ufkRUq7VVLkOe/OfsZP/uzf8tWX79CNmMszhyTK+bd/+m+JhQw/m/DqWKTobDPYrtFvVri4/oI0cHCXPh9997fp39mmnmtY1R47Tzq8ev2Os+cBuvFX7if/6g2lZcqIuY3vpJSlgyCWlIVMFPsoWsHF1Ruq1SruSmI22Yw6i1zg5mZEnN3iepf84hd/xnwxoShjFC3Bqm7MHY3aPZxFTOgU+F7CyckJiqwT+OXmA6MJSBq4fsDxuysUzaDZHqAqW1xfxYCNpqnMFyM8N2a7/5hatU2lUqFm17j7cAtZ0Wi3+lxeveNm9JZqtUIaq9SqAxRF4OjOXSTVJYxW2PUBjXqHhfOGwWCbleMynZ6hawL3jh5xeTYijpcI0RahK7JeDzk6eAi5jlCCouRoisFiklKmGkUeU2YNwnjBcj1ktRixXK6pVAM8f0mZ1ZnMZ2ztSbx8cQZpg15jm5raYXxzhbuYEsUTfM+hZiskScB0vNnpQFoxGAw4v/lLguQdXjDn3WuP6XROGCRf37S06BgfcND5lHhp4M9yDLnk7h2DvW2D2e0Vel1B0FQazS7bu032dnpock6tEiBnJR8+/Rb+WsJ3MuJ0xNo9Rig06laP6eyENJ5SxAK6kSKKCSvvBMuyGN861OsdwjBkPHnNchIgFRv8z/nFG+4+aJMkCetVQCG4TBav8L01Qp4Q+gFP7n2EoiyIQpciz1DEmHAFvcY9jg4OWS9cWg2bes0gDiMq8g5/67f/Lg/u3afII+pWg9Ap8aMhaqVKvQlZeEvPaqIq8MvnX1Czt5GLLW4nKZeLa0y7zdG9O8iyyHRxwxfPv0CVFB7d/Zh+Z48oXKPRhKyOF0zR9QZH+zt0ay3SeEXqC+wP7iGJOZ7j02o1OB++5er6HTfDOfValzRNubqcIAl1dM1isbzF8RNyoYYbRJRlgSKriIKKVb2DrkOtqnx9uFncnDvYtSoUOYEjUcQuaRjgzuYYskXVFCBbUtVqpMkI5/aKQa3NdmuPwE85u35HrsbcrlessgRRL3B9mefPr2k1dvGja8hFylSmahlcnL6iUk05HX5J5O7RbX4XUQ0RJR3SnDgv+PjRR/y3/+X/ncMHtwT5nGZznz377xJFMoomUpCziXWXm2aBzU5jngv0t0Qad3tMohZpklGSU1CyuA04uFNFEjWcZUmSB5S5SpZlWEaVp0/vsJhmIKgoikBJThwL6JpCrWZR5CZCbm6A30VBEhR88v4jksgjKyCKIlRJJhdC4jilYdu8eztG1iDNYkRBIc9KYi+i1awRRfyqgsgnjmPC3GfurWm327juGsfJSIscURRRFAURaNYVLi4uSMM6eZqRpSFFLrBeubQ7Eo43wgtyJEkijD0kWURTJZz1hCBaoMhNSqkAIUEQVQShRGDB5GZNkXZx/c2DcZwmmwlEMGerX6VaG+B4AVmebM5sSSV01zTbAgXgxxmiVJAlm/RnGqUMmgbvPf0AVe6RxBmCICBJErJqMFuO+PlP/hE3wzkyBlGxppAKdEMgCwJswyALDSTRQpIFkjQgE2OKssRuSry7PGc6F0HKkCkQhRyElCJXePB4n+3uE9ZrH1mNN7eQSUGahdTqJbP5kBdvRuSCRJr5pGWArOrEoYwo5aydKafDJSgSSZ4SpR5JkhAFKf2uTbMxICsMVM2kLCTiNCRJEnRF5epqyPBrQH+aRoiChihKGJbCb/z630QoagD4SUSlqm2arcwADP6r/+qfE4QpSZ4gKCJp4eCWKRdxyuBol61+m25zm2JR8uTJp4hFh/k04fBgh9H1Gdfnp9hVFVMVqCgKqeewbXchybh894YyDImXAUe7O1zcvmUynVKzLZIkodEpqVoyfvgWUxMpBQtDL5h7HtVqTq1b4zufvI/SOMPzPFrtGnmusLN9h3bXRDNiSnLyVKbb3qWtqviLc5z5kiSR0bUOYXrJV599gTf1KKQQVWtQq+7y5OEPyIo1ojjBXw8Zna25uLhi9O4SI6+jV3ocXy0ZTQKccM0yX1Gvb2HXdqipKpPLJYpsodYVssxiv/OEFIWLq9f4boBRqBRuyMC2UbSU2TLk7u4d8sghTxxmtxdIJBhiBVO1GHRV/id/8z/l4dYWn37YYKvd4XD3CQ27SZ53ePzxFm9PnrGcuYiCSaPZJYgCKATEXOHJg/c52L1DFvssl7eUaUarUSVLXCxTQiSjam2TlQmf/fwr7h0+5MnDI8okw1Isntx9RLWhEqQ+yWoTtpSEDMuIcLOQeqfLbLUmjRMazTampFLf6pKXHstgyHp9QSHoCBSkUUIcC6y8KZcnNyiSjxNEjNYJct3g+7/+v2D3YZNFIPFb/+k36RzsIDUM9L7MYiGBJPPV8RdkikVCxtpfcutdICkyo8kKZ52SuwHRdMj8+h0np6e8vpyhdpo4XomzDDk43Ke9b/DHf/byr7+hDH2RTnuXxXxFnpfMpg43NyPa7SZXl7fUGyppFiDKIapWkmU5ltnArg2o1StomkFFb3B4OMAwTKpWm5OXDqcnZyTJnEZ1hyTaOLqTZEUcRqiShSo2satNyjxgPvZ4cO+QbrtN7Jd4XkCjto/vJbjBFQeHR4iSTpIkGHqNxTzg7OycFy+eEYQerusRJQta7SqioHF2foUsywioPH30G6jlU+YzhzAZEsY+Dx58wBfP/y3T+TmWVYNSxvcnPH30MfcPvo9lWezubRE6Er4/hDLBD1ZY9QwnOGMyP0YUE5bLNYZRYMjbJKlDEASsVuck+YI8NQhDn+9/8juocp+9oyP8ICdyfYJpSK9tY1QgjlPSNCcJZLb791iur/nq2eeEUcrl9Qhdq6BXJJ7e/z26rXsM+lX2DpvIskR/u8nu/SpONKPVadNuDeg0t7i9uCLzY4TARJUsltMUb53TbHSRJR3PS7Bq+5QyXF4/I4znCMqcIJhRMTTa9Q47/R4qNe7f7QAODVtnZ/AYQ60xnw2J4jl1q8p8Oudw9yEt20RXasQeaAaEvsJ0PMO2q2x1D7AtDTIJZxFT1SvcXK6wtC0atT0ULUFVZWRJR8RGVSx6AxvX9QnCGC/w+Ysf/3tms4A8ukOZDSjFkkZzizLX0Q2LpRNRqXRZzGakUczDp99k4rvsNxr0TBFV9pk6CfWOgTd3acoKg6oGjBnevGA1nZMGDqqUIWQFd+/s4oczbq/fMRmN2en3+fDpIWkAnYZNXp4TBgH37+5Rt3oMOgOWU5c01gm8HLveRFRKRDFA1UpUTaFetRERiKNokzBWJEyhTbt6h3vbP0AXa6jUeHzwLQ56OwxPX1KrVLi7/wBDlWiYJjV1h/cffI+O3aWi1BFQKHK4OY843N3DNuqkbkC2jqlLClmWYFmgG2Pmi3PioEIYJ0ymQ2LfoVLJiNI1hnGIKr2PUlEpRBk/WOO4twwO9/iv/6//L5rqax4+uIte7aOpNm+fD7GsCooqIysbleB/aFA2QTeJLMuwDYnwJkFYiaiCT5moZEmOnIaoosDV9Zg42tgcklSEUiF0HZzliMATyHKJLI8JQ580TcmyBVmc4Lo+CCl5liJIKkIpULVK5rM1aVZsmqyyRJE1ssRHVWUqlTZu5CIpEmVWEoYhW30DQ6myXKa/KskoybIMQVRQ1SpimWJXuyBUEQQBWYSiFMmSlOurZyynIaUgIxQKeRZSZDGmYRBHDtv9xyjK4GvdmUApCASuR69vEEYZaaYjSgWilCOKBb7rcXgwgMLEDzNEzSfLEihFwjDB0EPKPGM6STEqFoqgIgiQxgmaJlCtySSlTBAmlHmCJIgoqk6chDy42+Xdm0sct0SRFLJi4/SNkxS7mlG3RZK8ikQKZUQmKqSlgJiE6GZAFBcbTaQso2oSiOB4KaZu8O1vfUyns0+SJMShiKZsGjVFkUCcM59eIwgGkgxpFG+cxV7Idr9OdwAnl+eIokGaJciKQF5uAlEff/gRdr3NzWhNTkEUb7BMQVRStWx8L2R8C5puISBTqZh0ejXSWOToziN+//f/58S5QZxGG6C8WDCZTPjwg0/pDbZJshRZsbCqHUxLRBJztvoWP/vRv2M6vcJzIlSliqLqTGchiTsnW/hMLzxGFzdUFAmzKeJEMbKp8uj9h9y99wgRhe2ejTcfY5QWiiGhqTUQIhbrMa3WNrImo1cE2u0OHUtDF9YU/orp5QX3+x/QrmYsLzIahokiF8zGBZVOydvjU3qtp5iliVzsYDd2eXP+GsVIeXf6BbOhR5GYG3i8biJWFLJ4hStWefiot+HkigqdWpvHTx7Q392m3dlBkAXOhm+4Gi7Z230Pu7ZN6GbsD47oNbeQxZzYnzM6O2FQ28euNUg8eLhdZfg//oJBrBJnHrraYDw+xwlnuOsJiyhBTGzee3Kfva1dtra2KDKBxcgj8W4Zn/2Sd+9eIJYtDu6/R7XZp9rrotegKHXEssFwNOJ2NCcM+0S5RCmVjFcLLq5e02wcsbOzTb/TJfDHrL05g60OSTqn3Wwhlzq3lwEVU+b25pQoLDArDT7++GMWiwUnJye8u7nCsvfRmhmT5ZQgzhBUh6IUcYIxY3dFmEjIZcxqMmK70eag1cLSE67P1himycyZsopD3GBNUSsZrU9w8xmGbTL3PIpcomqYdNsd8jQl9EJmk1tm02tCZ8LpizP+5f/3H/LTn39BrWVzMTxlHUQM7h6wEiJ69xVC4S1y28QetKjXLPK8z8ef3iG5fc7p5IQX4zOcRGcZaeRKEz+c4TkuuaIgWTH3Hn7Md37zUwxd4P27/b/+hjLyDZLU4d6DHRq1HtVKncdP7iHJAlXLpt+5z3Ti0O/ZHB7u8+GHH9PrNTAtjSiKELGQZR3LrDLo3GE1FTjcv0O9LqFrEs7SxzQlKoZBs9lkvpgRRiu2+odQmNw7fMzuYJ8yFbm5vmI5X3Bn/wBJ8lFlCUkuCbySWrWDJCssFgENe5fJxKHZGhCFGfPZmvt3P8F1ZIJkQX9HwXGXeP6Sf/aH/yWv3/2Qb3/nG+Rxk+HNOa+O/5KPvr2NrClIak6vv48oqQSxx2LpIog5pmmgmxXCyKXdaZIkCYvFAkGCooxYruc07C6jYch6FSKUInfvfMTe7j0CV+Tk7HP0SsnoZsz5ccrkZrm54Ugkqnaddv0xl+cJveYBYinT7x4hYrC3e4eDg33s7oqSFGdqML+Bv/zLf0mWXaKySxk3UMQuS2eMZMbMVymTlYNsyZxfLChQCLMZg609unYblYy9nQFN2+bHP/0FvZ0+1foOWtWk03nEdv8RpQCmZSMpCXG6YDo/w9LrJIG82bkbX3F1dYUidlAEizJe48xOUSWV26tLTk9/RqvWQhV6GEqDi+MZmpYiSAG+HzIZLcmTGCEvsSyRipnTtPeQJYE4nuOsAvzghs8/+3dMJufYtZLAWyKLKkk6J0mnvHj5I569+iGC6CMIArVaEy3ZJV6lVAwBpaKyWt/izmbISZOFs0aQfFbeGqveRzZFhvMElC5pWdIyB/RbW1QrEgoqctJiPZ6Rh1NOXp3Qbe1RliW6kbKcerjOnOX4itlwxqDbIw0CFrM1u/27dNt9WnaHdr1Bq6UhSgWqWMPzdEyzSh5HjG9mCKKMIMokSUxeTLgcf8nF1WvG8+cMh7f0+ipvX0wJ1gFPHzxhtbplcjMmdMF3QoK1i1w0GF4uMHWdipBz+uoZ3mqNIgsMBtts9R8hy1VGowlZ6NAyKxy07jCo7WAZBmlxS7PexraqtDoqUdSiUfsdrFYNQZFwoojFOqLfucv/7f/0f8aqv+XXf/M+gQeaVuK6b7i4PKbWqCFLCpqqo0gyIgKSKCCKmzFrmsaYVZvddoeL0yGybhJkK8Iywcsy2oMey1VK4HmURUFSRl9rN2PuHuyTxjKlAHkaI8syCDmVSsZqGZGX0tfBvoQozinFFNsuWK0zskJAkASCOCJNZCQ5ZDYfcjteUK3XyLJkc3NXCMTZBa+enaPJvV9VWQiIuYmEQRw4FNHF5v33SyRFJEkSZEFjvVzxve+9x8H2YxbejKIoNko8XSYMffa2D3j7/IwsFShLgVKUUFSBwI/Z27Op6J2vgzoCaVyiKAZhkDLoy1SrEquFjyio+OGMrPCJwoIiy9jt3SWMC5AjimLjkReBLPeo1mvMFjGlpJLGGYokkZYFkpijCD55piIKGlmeIkqg6zphGLLbbzO7KpguFWTdoMhMskhEKAyiLESuRGSljKht7EFJkpCnGWJeMuhVkAu4vZkhq8ZmhxGJNAO5yPnk6QMur8dkQP4rPSSkUcrdwx26dh9Z2SGJSwRRJ4oKCgpEOSTJzlkv1siiSRR4xFFBTokfRGiVlDhfU+R1cnKSNESWZfISZFll5Z7ylz/+Ea6nIpQFSZIhiiJ+4PD+++8zHo83t4+CQCqmyBULVA3VsPmv/5t/tNkTjyLWa58kC8l0EaNm8PTRAXpVY516TP0JZ/MbhsNjXGfJauUQRRGGqWNZTZqtAWGy5vJ2jmRkeEFBb6fHMppz7/7HLNwFXhTy6YO7mLmILdvUdJ3Td285OQ7p7e2AZjEar/n4g2/jL1OSXGSxWLGYT8kjh6Jw2Gk9YDaZkpXQv2cRywsk3aZeb5CFMY32Dg+e3CHKNe48qLJYXxFHBY12jZk/4tXxFwTpikbPwIlvOTkfoukdDo7eIylWiAqsohyzYrNVqyBEY6qmxn79Duc/fEU1veTVL/6QLApQMoFgesN6dMbk6gVJuGC702Ove484KFnM17jBFMMoaesdKnFBkqQMem1evfwZL784J1hZHOzewfEWRHHIePKWXtMijBcYSkZVM3lwdMR+o8mP/ujfs791iLuYcXM5YrVe4joRW4ND8nyFrOh0B01UVWVv/wGWWaXTs/j5Fz+lEHQktUKaR3zx8oeMRiuev/6cWsPGMLaJiluWa4/57ITlbUB3+5DLqcdw5uI4MqW7pm9LhCsfQc25GN4wdT0++/kr/EjCWRXMFjqKVWHv6C6ukxC7Q1Iv5uBwl06jz+OjJuv1G1J/yFa/pNWuYNU0hldjLNPGDXI++uTXODi6w4cffZ+tOx2G0zP8cIzcnHN19Zq0LtLsQJTICJZFKIucRNe8nDrIgoWirvG9FGfxlh/90R+DUODLf/WZ91+5odR1HddzSGKfZsuiKDPC0Ge5XKDpCrVqi2q1iufmnJ0s+eqrr8jznOU8IgjXuP4QVa5C1qViasTpijC5xTIb9DsP6ff79HoDauYWVaNHvWpx516bJHPxfR8SmXZ1H6koade22OodcXtzzsXpFbtb+7TqD5ElE1k0iQMDTWlydv6OdrtFHG04cYGfsnJvcL0ZkS8jljXiOMWsinz86X3arQHT6RzEgGpNYTg+5dXLCWGyYjQ7xwluCeOI1y8WzFZnPHvxS6aLIUgrprOA5y9/SZStuLi84eJ0yc3Q4927U8azK0ShIMsD9nbvkcUyWWwRJxHb/UdkUZ3L4S2tbky9pnCwu0MhKXjFinXg8ODhHpJYcLj/Pp99/mPm8zXj0Yzba5/FxGZr6z6htxnx3T96j3sHnyJJAVbVwAtXbG/dYT1X2T+4t0mmZQb9Oy2qvRq5atA61JCsmPZujdPLN7jhhMOjPhcXPyZNzwjWSzJhxGR5TKVSIY4yzi4/Iwgi6maf2eyck/OXvDk+5eG9b5ILM94e/wW7gy5bnX0Ot46oWQ1EyeXjDz7h7PQVcTQjzWL293ukYZ2twSGu69Lv96mZDQ52tqEMsat1Li5P0EQfJatCYNCyLGoGlOkKKZPZ7jZYTEY0zA4Pju5Ss3JaDZlmXaduKQhJxKAVs9MUMelyc3vB/XtH9Mwu05uf8Y0Pv0ccFewedpDKGuvJCaqQYzRUTKuDXgqkUc7J8zmGUGe7vc1Or0O7UaFRa7OauURpxHg85tmzn5NEAYPuHrqqopQmd/eeUlXbFFnI/p6FoZbMpzNqpk3giLTtfToHTax6nyxWvx63hiAUyIpESYasNkmA46uX+NkESemScMnCH1KpVtDZJwiXePFn3I5PiaIEJzin02lxfT1EMnPuPvqIsFyiVxo02lX8YMnocs5Ov01N20ZISgzNRUYiz33enn1FXFyCcs1sKmLyH6NV6oRpRBhLeFFIu9vmn/7Tf4zG57z/ySFefI+H9+8xOjnn/aPfJ4o1ZFVDllU0RUeWN+YSQRBAKCnLnDSNeXCvj7f0cb0YSbJqHMSsAAEAAElEQVSQ0oI0TLGqCk3LZDQON7umFJSCR5JFaHpC7PukiUiaBV+rCjep3sdPHhG6OqGro8p1CiGlQCLNPBbrcwJPIM4gL0sQBdJYRFEzPvzgIau1j+tHJF9zELMkot+5j7uGUhR+VYgxilIiFB7ees6n731C3ewQBglZlgEFSZxjqBrdbo31MkFR6mRlQpmrGHodSZB5/PgOs8ULBHw0TUMUVOI4pMhEju7ukCcaSZYhiCWqXCUvIkph8/uuxgZZbJPmGzJDFkPgxzSaCtPJmvUyoSggSwWyLCFNY+rVCqPpjOlis7KgKdqGESgUBKFDkfsEbrLRFWoKWbYJhyV5Rrtps9XuIosmiRiSlSGGFJElcwrNwY8EvEAmF6Fgo4rc7FC6hMFrTr68pma0SNOYUo1AjUizmFrFZK99SB5vk5MiyaBqCoJYIIoJhipSuPs4CxlZ80hzB1GWEKhQ5BKDrQrOIsVbJ+iqQhoXxEmGH3kE6ZB7j/qEaYHvx6iqTBIXlIVKKaV85zcGfPXy3xHEJYJYkqYp88mMR48eIMkFi8UCSVKIMh+zNCn8jO2uzbNfPucv/vTf0G33EClJkhhJkKHImfoi58sFolqi6HX6nR7NOOPpwy002WVyM8RZx2iGTiZlXE+XSFaDne0260lGv9Un8ifkaYhcGpiKzeXFG858n7HgMs9KlNo+t6sTcmlFmq+Yjs84etzh5OwXGKnKt77zbSRR48GTj1mMLoiWMp0tm2rdptfep4yaOLOIOJ0jG7D7sIkw0bj6yRecvBkRhW1k0UWzNG6uc0y9wcE9jThNMPQ+ipWSiGOenfw5BQmdnRqmYPHpwx7J6paG3USslyhNm8gKCAyb8r3/DPvXPiGOAoL4horZo27c4+R2zZ//8R/z0y9/zvXljHpFxChkiAPi6AZ/LvDB02/y9P4d9DJir9Xg0/s9hq9+hjec0LNtWjUVo+xBkqFXchRF4uLqmvPrG9rtLX7wO48Z3RxjKDKHB23azdqGiBA5VBs6v3z1p6zXS/KyoCDH7lR48eZzTk5uSBIVzSzZ3RmwvzfArltU9Q6nb4dMb1coYhdFNsjl/oYRfXXF7sE+TpixiB1k3SIxdAb1HWqWAUWKoEDPbiEWGTUz5Wz4OYapsw6XVPQWihCwv9NBEit0e3XyYEGQWTQHTYIY1onH7fqY9QoyTnj+8qeQz/jl519wc3WColpUzR5GtYZQqdMd1Gj1+simDvKMVXhLaiSousLuvo7SU5jEAe8ujjk/89k+uE9eiFRl/a+/oex0m2QxiIVKsA5JA5F2U0UWmqyXCcFqBYWIhEBFv2Z7EKGaE3TFpm3uUamZ3Azf4cdToiJDkwyW84TZwkMqNjDgy/MxVqXzdeoP5qMZ6/WQAh8UnYlzTJCsSGKB0fCaJHEYbPW5uVkRJwF122S2PGftXVEwpdVqoGgWhtVga/cuklRnuVjQatdotg6Zrx3W4QVi3mB4EaEZBbKgEwYeqiLz4O4DonWBUKj4jsTbV+ecn17ghacosohQmoyuU4qyRJRUylynWmmwmnsslwtkScO2m4hFjdXKpWe/x9nJmiROefHlJVWjTRrqrFZLoGC+mtLo1EEQMLUtirCPqiuMJzBduYynHnX7LqW2IMFDUGWKXMP1FnRaFXS1xHV8XN9DMVJmswmSWDC+nSBIIlfD11RtA7tTAyHE9yLKVCGPfK7PrtCVgooucn35AjkvMKUKpZfRNRUSNyUvRIQwxz0/5k7tG2xX75CvXQovoKYafPeTH3B7PafMCra7O4Rezmzu8XZ4RpKM2ep8ROI30CSZ+/uHtLUei9GEra5CGC8pEYmI8cqEn7/8JasVjK9n7HYMVgufrHARJQfkgkxUcBYhs+GK0XjITruCVihogoksWazcFZqlMFvMuZx8hRMJXM2nePEcoRCYrkDptVmFOe7oHXZXZr3wuLh9Rau6jxedg58yWU84cU4QjByjWkepR7jCDUKtynip0a7fo6lr7LR3afS6fPf7v0Pf/oBu+ylO7DMaLVj7E/aPutzc3DC8XTNcLFErFRSxTrROCZ1TsljGHS9QKwHbWy0oNoBZSZAIwwp2z0YtDJIyxTQ1tEiiKFMUpUOyzFn5U1Qlxja3Odh+gijrvD79CsdzsCyBs5sz8sRna69D7CeIQhVRhoePtqjkDaSspKoZWMIholblePiMxzvf4F7nMeH6AeeT95nFAsPrCe46YrEaYmkW//qf/xE37/6Qv/0//RaXFyO27lSJEwelCuPYJ/QkdFlCEwQqVX0zchQEBAqyokRQoCxUBk0oxRzPV5CUkDQPyKKYhiUhlR5lYpFlGX7okacWcQS2beN5HvNJjG5IxBSUKEhlRlVaIcQFkpHi4VNIBXJcIrJmHResVgmCISFEGYqmECVLOlWVyE+JJA2ZEiEqCYSEPCvZ2TYZ7O/hR/mvqiwk4kxAlBXkUiVPA/R8B9VokysR5AU5KbkSESxvWPsxQhaR5hGiXhClCd1Wl/2eiZg2SDSZNMvQcpm0MGjWIE1O8ec6sqqT5wkIPrlURSwD3NUbYi0hFSV0SvK8pBAS8jRClSJuRxc4abEJQyUpgpayWnq0Ww0GtkqRGZswUZaRignBOqHXqqMVPp+9OqfUq6SBQy7JFLkCRUqtUhLJEOYFFBvTTiqIRIXGdkVCX4xIigoiOVkSI4sCeSaSJQGdrSqiJjGLM8pUJSVCjUQoJSRzgr+6ZO4tUbU6UgqSVlDmBaYicWfb5PX5TwkTD0lqIEUGqlDihxN22xX8mwuGpyNUpSSTNiN6MVfxgoCtTo/CjZkOYzI0siSlJCUtRHRJ46j7LQ4Hv4EXLilKCQEZz/MY9HZw1jGCIJEkAbJoUKgpkirQtrv8/f/n/4O8EIizklIQaDRbSIKEHyq8d3CfPHQhSkmSETfegqJqcnGzIhVl9JpMWo75/IsXjFcrjJpFXjp4QcjdB3tEmAyvFyxnDm+/+pJev0m7W+XPfvYMigaQYDc0tvr30ASTreYDKmaDMs1pDVq0D9qMJhP2D6qcXy2odFqcOyd88fIZkloniUtm0xH393dYJDd4i5ziRCMY+dSrT9nfr+NEN7S6LWIvIk2G+OuMjvEph3v3qNVNNDUjjmN0rYJoZFwPfWI5o9L+lLtPvoegK9Tr9yhCjU71Lu998k2auwZiKXN9u6K1/x6emFKKc466H/Gtb/wmdS1DyRMy3+dqtkZtP+W3f+fvUZolb5aXvDkeMQ9cpos1ZqfJ0QObk/GQakejKDyO7thEksZiFRCuXVbRhIopcXN1yrtxSY7EZB2yvdPHm09wl6c0WxaT6RK72qRi6ii6gO+tmV/fIvhVBvYRnXqDndYeQZLw+plLLsrEZY5o5RxPjnnx8gTFUHDWMYqioVPgulPW/i1nk1cM3QxLr3ObTBjdeLQ0BVOvU5YhWejQ7u2gZAXOLCKY3hKj0KoOcG991EIgDAqQOjzceZ+ytLC3LVS5Tl3tI6s3vH2zQNVlzi6ecz455mS64rNnr1DrGWdXt8hpwXSsUVDFC6Bp76JafSYLB60s8ZycF8fP8KceB/v7KBUF0SwQNZv77z/8628oiyyFUuH87JZms8bBQZfZOKZaVQGRn33+jFanzvsfN9ArDlaljqU9JFciJu5kswgra4iCwssv36KoNiUaaZoyXg7JsoLdgwFJUTAcX6CaKggGINLv7TObT0jiDLthUODTbvdp2Q3yYkGnJ7NeLri4fI1t21hmgyyLGY+GOO4C1x/z8uVLnOiMMJBYLkWGN2PMSpudwUPW/g3tTp1Ou0eSZAhFnTLe5dmXQ1brBcPhkNkoYL0QmNyUdLttXj9fYlbaDLbaxEEVbykyX55wdT6la3+IKvQ53P4ON2cFqV+n23zIcrkJkKxXcwxDYjKZsLt9H1PrUjdEnuz9ACPvcnH2DF0WsHSF1WxORTcpIpkijbDNOnKh07EtJpdL0tCljENmiyGT6RWK4eL4l5yeviNML5nNxlSqCZ6/RNQKSnGNXe2Txi1CX6ZjH7G99ZBe+x7eQqfMZA62PiTw1yhiYwN9lbfYNvvYhYicuHzy3kccbO3x/Nkzmo19Dg4O0DSbL7/8jMvhCVm2MaEMb06xGjmlOMfxRJzQp9BmaJbC0h8TFyG1powTREyvFhwNtpC9kHIx4UF/l/XsnGorZrwMMc0GAhK6qVAUKV40odk8BDEnSgMiscHUD4hFF9lIuLN3H7mwUAUVQ2jQqZnUjEPywmB7q0sZZjQrJrbZ5GY0QrMazKcRaSxS77exmk+ZOxGNZptG6w6JB51+k4q9x3hxTZFA4N9Qihm9/SPipMGd3UMcxyEo1rw9fk5FrfP+Rx8yvJ4R3EbsbPUpI4E9c4dvf/gNBDWiPlB59vaSuqqS+gKKJ7FarjEqJrpmIkgKiD6CLyArK947+jX29h6z9CdUzS53Hu2jV1M6+zVSOcJxNVb+GqkS0dxSiLICWdmit3OHyfwaZzmmJRt4ZwFtraRI4PXtJYmcs5gueXf7Etc759t3P2Wrs8O7UYOfntQQRZVlvGQ+n/Pu5pi8tPjnf/8fMx7/c/7Wf7zDP/tv/nu2d6ooUcrqcsX9Vocv/91nhCIklRxVVZEkCVVVEcWNqlCTFSQENFWh2kz58vkLgiT/+uZJIosEFCmj08lZLMckqUKa5oTJksjPqNUs9EoDXauSJhtuY55v4Orf/8FvkWcWeQZiKSNTZ+2u6HQNDnpbuOsNdFxWBIosJw5Sut0mUZQQhykiGx6jWAoIZFhVCd8LyfP8VyWIEogCfuTT6hg8em+Xi+GINAqRBBlZlQmjBIkSVZEoS40o0TB0nTjJQSxZzC949eYzgsCGUiMvQkopY7lYY1kWe91DBNkkzRNECopcIEk8ZKHggycfQ14nTdOvE9opZSHjeQ5H93uoWo3AzzdjfzEniUviIKe/E6NrVfKkiq5rwGY8XRQFobuiVquh6A3KPAVZoiw3TMg8CXGdd9yOHCTR2KwskIOUEQQu/tqFVEUsLLJCAFmkVCSSLKZRMzja7bG/f4jnrsiLiChKKARIk5J2u4UgGyRhgSpDWSRQSmRZTqPVZLDX5uJmQl5WEaVkc0NcaAgllFnJxx9+D1kxCDLIsxJRUEhLD7FUsK0qVaNPKebkeUCWCghSTpZEWGbI1fBHTCYOBQJpGjOZjmi0W9Rtm5ubG9I8QVYV4jRBKKHTavPF55/zJ3/yb+h0OiRJhCSJiEJBWpRk8YRfnr5GlQ2a2zbL1Zy0DIlqJYvFBEMtmC9uEBWLarvGbOJTFiKy2kQIfZ4/O2Yxv8GuGNj1DocPj3j+5oRXby/Y6TfY3u9Tb+2SZAKPnrzH/tERzW6FfrPBch5hVRokfo6uFbx+c8Gz58/xQ4Gt7QMaYpvcy9E1i53DLSaLJe8Pvksl9zg5PWbwawOUAZhGj8KRyTwDXWkgpBVsM8cP5+RpyO1lQBoaPH54n8cPv43nuDRbNVq9Kq/Of8Qf/tH/h9PzGau1j6x5nF4cIxkSs8UlUbLkG5+8j6IlOMEtoqbx+JMaW/c6HNy9R+3RDk5txWj8gjevn/Hy5pTunXssrx1SMyBRPGbRikDKiOhQ69WY+w7zUOJ6OSZZS9w/6lPkLvJCIl4UtHs7LKfXCIWGJCmcXt0ycZbUuzVevbxidFmgCy3OLk6ZTiOQLTJFJJYdckWg1CVSEeQyp24VrOY31GsWTbuKrOQ8eLLNfD2hoamkZYyf+Jy8OUdTbLbbh4i5wCoNCYnZ7m5TiBHhzMHWderyXdbLFEWu0DZVgoVHxUwRdB2houDnAZlkoll1zi9fUSYS4TIkXOZE6YjLyxG9LYVmtUu31eX64hqJmLZt4swiLEOELGUyfkkmRhx0arT6TcaTN+w2GuT5isRZ0awozGKXZRygtRJ+9IufUe3l/PLdv/rrbygvzxdUq+XXQHIDL5hQr9vY1SpR4HP33jbD4Rnzecj4usvwSuPLz6/IcpdqXUSRNPSKwenpS3YGTTr9Jv1+n8P9Q25mM96dnTKf++RCwN7BHZr2FogCrhMxmb/GrleIU4c0lhCpgpAiSiUiBqPRDfWaiUDObDJmtVhTq1Zx/RHPn/2INHW5mZygVURyIWK6PEdSYoJwCUWNwKkS+iLrRYoidpDFKvPlgr3dI8ht7h18wNP3HmJZOr/9W7+LbR7SblkE/ookSllNU1aLjMQ5ZDXPWC9HfOPDDzg/uUIua1A6QIEgRpimiV3v4To+dw73ePfuGUXpIFdaHF8fU2g5vd0Ww+kFspGj6ha34xNkNUWQ1xTSLZrcY7WMefx4G8tSaTVttgY7GIaBiEkSgySadFv3+eC9X8NU7+KtcpAKyEwujy8QZf9r7MSML1/8jLycYdcMdnpb2FaLx4/v0O1YuMENdttElKHIFjSaVbKiznKRsL07wEkD/DhFUmVuxlc0WiqL1QVR5iErFeaLgGrNBnkzIn118pqFs+B6NMSLAtwoJUhSMr0kknwMq7mBss/GWGaDs/NbkjJBkqEQCvJyiednJIlKhkO11mK1ihi57yj0BYZVJwplzi7PuB2N0Cs1jKrFu/VX/MN/8S/4b//JH3N8fUZYrvjpL/6crMixajKnN2/Q9ZjQu2bpHiNIG7Vd4meQGoSJibMOGZ6fIIsaxxdfYtXr3MzOmYw8htMTzt5dMrldgJhh1U2EwuDqbMLam5OWIr4XQ1FjGWh89XaKqdpoqcvdowHrWEVUahzt3SPxYuIkJityZEliq9qhsyfQsvvEUYo/G6EKBXW7ymQ4wUtDanqfyXWEXRNp16rIqYkYV/iPfvB3uDw9o1i6tKsW9brCZH1Gpq9ICwVdN9jf20KTRDIppWf30cQc13X5oz+65cXQoDRMREFB9HyiwqNlD/gX//Qf07BP+F/9wTcQ3CZ/92//HrooM1+esffoHnb3CFGsIskV1FhCkDeBHFmWoSgpioKyFEjjHE2CVreDILbI8k1TiCgShxG72zVq1ZiLy1tKoaDIRcpSIE4CdrYbJKFNEIVU9AaSnFGIIVm24qsXf7ZJMAsGWRYhklLkIp1mn269imJUKPKUMNjsxoV+hKYWxElJmYmblL2ukEQp1YqGLBlkmU2clL8qTdMQJHCdCKMi0Wx1WPkxmqIiZJvASJZlqOUGMXY7cdGsAn+doigaaWTRbxkcHOwwnkeU2QaplBcxiiChGRAvI8azzTQiT0s01UQsUvodmyhIuT7z0FRzM2KXIE1D6qbNnf09pjMXz3PIsowwDBHQKLMISRqzWEQEvkAUBQiiglWvkaUF3WaFg4M7IDTJsgQ/3QDJPc+jpqk8eWRTlhpZkVIWCWVZEiQZznrNb/z6d9ja3SUvBESxJKekLEV8d8MN9dcBX3z+FsMwkaUSQ69SUBImKd2OxdnwlizRqFoasiKSpjmiIFOUGZe3b/BjkThPiPOAogwQ5ZTIDfn4w8doFYFXb89QTZO8SMlygTjVyIuUX/u1R1yeD5nPCgR8ojgginTSNKZbu8fv/fb/gdncJyMnyxKSJKHX6+K4Gyyb7/us12tkWSaNYmq1Kv/wH/5DHGeFovwHNaVInicIZUnP1mlYArWWyb//0Y9oVHoshkuEIoRcZHFbcO9gh7PTKwy1galF1A2dmqkiCiVmu0F/u40q5eRJyfPnz5Hykr69jSL4DK/miLINQspXXz0niAtGsyuyOOS9x/e4vjxm4c6ZhWOcSOC7v/49/PmKrmiCHCOLLu58Rb+6hyJXaEg6kV+g9FSm0zlfPn/G6fASseITBCtkKWLvcJdUFHHcGZ4/x3On1Cp1zo7fcHN+Q7VapdkycWcZpeDy4acf0eyauAsXb5GRRBGrdUSzMcBQB9SsLlEU8ckn/xHVRpOoKAizkrVU4q090kBEsE3MXZ/Tkz/m4tVrqv0KoqQQeQUVXeD6do4Xj4GIxSQmjCLmN0sqskySyMQ5uK5L4AUUaYBVSJzfnhEVCaKh0GzvE/k2YZQiSB7D6ym21WR0c8ndo/cwTJ1m16S3rzOazSnFknprl8OHe6hWDc3okOYVHjx8jFVVePTgI7Z3W4wuTpAEjYeP3qNmSNh2hcvxGy7frdkdtJmOrpm7Bf1Bk1JQqJgSRmnyYO8BR9t7CJTE/prVasHMGZHnOe56ysXZFTVzl43uSqaq2fR729x7+A22ex9jKiaSIPLwwRFZpKMroAiwt9Xl5uYF1YqMN3dYeiG3F+9QopJG3UJDY9dqsNPdJs+qLOYe89kS0zSZr0IWTvFXbij/yi7v/qDB0VGHd2/H3E6u2N7qk+cil5dXVPU+3//WI16+viSMZsRCQLMlcFivcXE2odPbRkwWBElMp1FHIkFWclarBd12h253D3cdcDV+hbOasbd9xF73KWkYsTXosVxulp3jKMVTlpSlgCyZLFZzOt0a529vMI2M7a2PWcxdkthjMo5pVHchX/PsixGqAceLEYMdlYH9Ho475vRmwf7OPo4z4voqwa53Ob0eEecrLEtHUxr85g++yen5V5wcv4JSo2IoSFIbUZgyuo4xZJNuT2Qxv+Tw4B5x2qJaV5nNUw4P7lFKE3rdHbISBDHh+PiYv/W7fxsv8ElLl0pNwvNDtgcDlkuJOF6zdkYooolmgK6btBsWuh0QRSlWrUEULmg0c7xkTpKmaMoOmAHN9sd4TkGjnhNnU2TJxPfmHL+9Zu+ww+3IoXe/xU4LFr5Ps6FBVqJXO8yGHvP5BZYlYugKi5nCcumyf/gek9mYXA7J44JCqhImK3a6TXSxymQ5wVIVbpdDSimhVquxWKww1C4yUIYLnFVGXviEgYcuNLBbbZJoQpRGTMY+9+5tsRz7VHY6HF+9omDO0cGnjMbXbOkD+o2nLMPXKKhkcZs0D/CnAVq/jyJZVK0u7uoGTTbxipI0W2JaKobR4HpxiqykiH6fj9+PaNX7iGqJ3d7nyTd+D7t+F92o8ur1jzg7/jc8PnpKHAusV0sqlQhDUahIsJZC0jAnCi/Q1B6SlFAgI0seFxdfYDf7TBdDLMVgPi8RZQ2yCGfWZGvwCGc5JLOgUnUwhAUvLq+5s/M+B41DYneOjsd3vvENLq+O2RnsIhTXqLpKEqfkQoPFeE6R9jFaOmJqkic5k8k57jhhu3WXQlXZ2/6AIH1FGFVQ5M3O2NXkFEGTCHUZS7GRsoA8jujesTh+9QaZGs1Bg8hXUEORZmdNmu7wF+8y1vkAEw3D8ElcMKoVfEPn3/yTf8Cn2yH/xd/733B+ek1x+4ZAMtBkiTKqoBQKs8WQ4aQkKRNqooqXBvz/WPuTIN3y8z4Te858zjfPQ87jnW/NVQAKAAmApESqZYnqsCmGW2p74fbG4Yh29MqOcHhjhxXhdocjZMlyt9WSWmqFZEkURavZoEiQIIAqVKGme2/VnfLmnPnN85nn40UioC0WyoizyE3mIjP+5/3e/+/3PKIgATdeakEQkAUJkClXVAxVZ7WIQIhuLDERhEFKXtOZj8H3ErJMuXHO4iMKOusbRa76c8JUQo7npOnNFmmt3kLX2syW45ufpyiQGvjxgHKlhDlbMJpG5Cp5AtdGyFJUWeHwTpvzM/NG3yOkJIJMkgQE1oLNjQ0+e2KiyPEvzsQwCBBEGVXUMTQR07RZufFNgcwNkHRIU5GDrQ5+sGRlZWSlCI08YbzENGUePiySZCleKqLIMjehoYw0cBGEFdViGzuwyLQMVTAI4wTXsnmwUWU5H5HE1ZvtmAiClOI5ApWCj2KYjCY+4s95jZFvk1d14simkq9jzgRiIhRRJcnAdV3CMGF3q8Tp+SVXgwDDyCOqGpnvE2QxRQUiT2Y4CtAMjTD0yBIFSc8hSWNG449QtTJumFDQBWI/JgwzpFQgl1cwg4AwqWBoJcJ0SRDeAKjjcE67XsAw8kiqhJ/EqKqKIcPSXtKqVeh063z+eR8/NOk2iqThjaAhjiKKZYVYyBjOQ1SjhZAFSKpHnOZR5YzYdxmOpjhhGSMTiSMRRU1vaCWjIz788L/HdG0EWcG2LCRJpFQqsZivqJSLeN7NNabv+7QqFa6uLvmn//Sf0mo2SSOBMIxRZQVVlXBXNn6QUJNSbGeJFIQU6wbF6m0MP6Vx2MByxmSiTM5QyBsaVqwhUqBeKjLqjYmyOZO+RDGXp9wtIgUNiqLC4a3bCOLrzOxLzs8f0222qTVCXr36jO2NQ4bTJRPfY/Owy9VZj9BKKet5Mm9OTpTBV3nwzh1EL8Jdhrx88gWWkKNRCghlkUCKWSt32LvtkoYZRVGnbhgslwuWfkJ9q8N8McbQRGo1lWJeRxJrZEi8enXK4Z3b6PI1CHUkQcfQFLY3mkTJAjXTifwVqAbmak4UvKJcgzB0kNWEJDGI4hg9cFl5IfVWFTExUA2dTNLQ8wn9oxeI0iZiYjKbKOSaCaVCjeXEJHLg4M49BGEDczlHEUu0CnvsvCGh5WROe6cIukyz1iFLREq5AqbpkKY29VoZQQJJ86m0atR2ytjJmOFqys4tODn9CkPfxgny3Nnd5uTl5xT1Is7cJAEebO+T2At+/P3v0719h1anSCmvU6xr/OkP/px8uUa3cZuLkyccH7XIlzTsRMH0l5iuQLWUEVoJjqXgmgK5fAk1Z7AazjBnA6aTJaV8kZxRZe9ejhdPr5gvXHL5FWfXNloxw3M3KRQMVkuLNFDY3V3DD0YUDI3VfEjg+qBnnF+c8uC1u4yvnyFIKuZ8QmDauJLP6tRB1GVKqoqeacSKwHK2xHN+6b3jLz9Q2o5JnGwgSAbNloKqa5yfn7NaxNTqLi9e9BkN5zx8bx1N0xj3pgTugPVmlcXIpbVeoTeaUK90kDOFYX+ARMR0coVkGESpxcb6LkGpBVHCajKhWa3x+Pln5CsakqiTChFhcoNRsVyXxcJC1UVyRon97TtYy4wsCbl9eIs/+v6/olQrsL6+yxf/9hFbhyn2Sib2JNa+LvNP/z9/RrOTUTO2ePqoz9vvGah1nWZbZmdvC0mscfTijMloyOHefXw3pFxRqVcaPPnyC9bbD2k2pjTqdeI45Fe+u0bCjN3db3J6PMC2ZpRqHn4YcHr1kq2tO4zGM9a6t1kuQxRZo92uMhmvECSNJMwzXz1HVhTiNKFcyRN4AlFUxCjKeO45oqQwHJ8gkFAs3GPcH1MuF/HDKaZpo3p5umsbTGaXFIoSQhrQXWtQyKmAz717d0iDJb3LIbFg4EoqQpIiKQGZMiFXSelfjzg4kBiOFwTJAvNiQT5XIk0TWq06vueh5ELmywFhJJAvV7AmCY3GJvVmk5U9ptPaxjAMkmxEQVawljIiCdWCRmCDLrTY36kxWh2jaQZ+4FIsqCSxjqCWqDbLrMQUsaawcBZ4q5doEcgqLFcz8uUqnW4bIwez2YpWc5vgyqXTrhLYCuvN+ySiiVGE4dJDSrvoLYGatkGttM9i7hCkNe4//B625yDKAd/69t+gUqnw43/3zyjoPs26QECXRqXLbDgjsEw2t+4Tp1WWU5OCUEYUNKJEIl8VSMnIySJ6TkdUcxCAIotMzBlFqUhtq8rSz1jMTdrlCu/d6zCfuLSqdfRiQBbm+PzLz0l0mXYQIYkScRBCmpEIPmkmk1RiOkaFWTTGDeesV+qYdQF/NECng6qAJHZZOA7drU1QY8azY5qdMu4qImFOMV+g0NxlZRrE8SuE2MS2UnJpjC1qhOzxo5cWK6FGEYEwCwh9lXZtjfP5Y1783o/53/y1Bg+/8R7T6zVqtQrLbZmSLoLTJJN7mIMJ5spnsloiyzmsxIFUR9QUEu9G+ybIAggqiR9TKWnc2XvAn/xgctMMjjxUJU+a+VSbCkg5LAuknIDnO5T1Groq4/tDHFcgCCPyBZlCUWF8bvPON9+h0rDpT2ZUarsk8YpI1MjwiYKQ6aKBG4EY+QiiiCDJRLGD608YzW6a5wgCmaDjejO+9d7bmMsrer0Bql75xZmYxSCpAlEQ0e3mGI0viTOVOAmQZRBFEdu2KOdzVBptZA1SSSLNPDTVQBBduq1NXhwd4wQSutpHFdooeQURAVUN+eSz5yDUCaMbyLokq0RBytZGnU5H5Ic/W6LoLcLQQZRFksyCNCUK1hmOXbzIRnJr5JQClmNTr5bYbG3xZLTAjyMK+QKIPmmc3XAzBYvLoY0sb6IoIo7vYyARBgnd9TIvX3xFyjayLBKGMYooE6cCOVWiXEk5uYyJIgXfDW4QSJJIGHpIqkA+b2CHCW7mIQgxgiASRgk5TcKQEoYjhyBT0XIavu0AHkIWU1AFHt65w+/9y+eImX7jB49VVK2EKjl02kXOr5Z4aYlKKUcWpQgkxImHpgTYzgxRaeLi4PsBoiBjOhaBk/LOm/D89PeZzRtIosKof8l3v/tdirk8URQBIvl8Hk25wVjVqxX+1v/t77KylrRqDZAUwjCjUMshiwKZBHlJwrv0ucSnubFF5rtYMw9lfYeL8w+Yeybt5g6LeR93KbJ20GTpThl8cEEsLegat0iZsLCGiKsmWxvrXJ4POb68JpdX2FpvUNUqjIcuhVKDWgEW0wVuNmO/cwdd3EDJQrpdmeNnS2I7x8H+Dr6b4U1NZtMhlrugUQtYmDZWuIXtpTTbBWbnn1AtVHESl8lihbSxRXlrnWw+5Pmnj3jj7fdQlYw08Ylii1plg8FoSKlYv1F7xjGyYTAcrBASl3arhjlzqFbLxIKKntd58WxIq95mPoGUFYaeRxAjkjSmtN4mNiPSKEd/OufNN7ssBqeISYHt+jeI8iaLkcObX/8aL6+esPI0YkWktVsBQ0ZOFZAyBrMLFOp01puMFpdEQp5aSybOZJq1JicnJxhykdfeeB1VyTMc9gjTkCgY8+rFJYrU5db+LmZvihyZRO4Sy3G51FOSTCKJJda3a4S+z9XlFHNwRrtdICfpFDbuUsDkkx895c7r38BQAjQvIVnfRdQyfElFFXxq1TaqtEQttBCLI7KiSM0oYUYeQmYixjJbW1soao7RyEbPacyGDnsHt5gvr7BWKWvVEsVCg4veitUqYGdnhzjwaTRyDEcygiYwHaXs79zBNB0e3nmHq8tjslhlb2+T2bVLljlcXI55+7W3GM8/ZzLXUAWZONMx6lAoCv/hB8rReIxRkBAoUK13Wa0siiWVUn6DclXCC0VqDYPx9ZKVdU7ipxTkOppUYBKOmL26ZGNrn/7omkLOoDe8RBQFhEzAHSXYnk1kH6BKIVkU0qoWGY/PmQ3jnzPfesRRBmmTcqdJPhfSrG1yfrwkisr82R8/YX19jY3tDX74g8+olNbwLJfQMbCXAosBPLz/NW7db/DRR3/K9laJNx6+w+5OjsnwkNnQQpVHNJsNwjC9IfbLMwajKY3mW6hCjVIRzi9e4jgr3ntrj8df9UnEPp2NvZuC0SikVJwzm4/Y2qlQKGgMBwor+4qVG+OGDpoBS+vnHL4sR6lgIIslRpMTskijoLcwckUyM+Hrb3bQJIfHny7IrR1w/KpHpdAmXxCZ9vvUyil7O7e4OJuwc7hHho8ghoiKSZYUCMOY5XyKZc6olHWOnj2lXi0hqbCzfo8otBhPn+GPI8JYppBrc3CwzaB3yXxpsr5Tx3UtIkIMrcxiHoHo0eyWCPyE0Ew4Oh2yvrFGs1ViOl/hWikHOx1Goz5uMOf27TsMUpOC3kIWXYy2TpokFMsZV8OQ7loTSfHJiTmSyKdbb5IyQdcs7BBsW6bcMcAI8fyIt9/8i7y8+BTFyOhf6VRLFr3xp9RbG8SZR298RqlYQVEhp+9SlHdJsgX1SKVcWOerx59w++7XQbD4W3/rb1KsV7n3+rtocpEkddk82GI5ucb3UpwMJNkkEWF/f5/zkwG7exvU9yqMR0sKRZ0wKaN5Buvbd4hml5gu2H5E06hwfP6S0lqJtapMsHQ5rB+ysHsIUY4oStnsdpitLNJCAd/30apVpr1zNK2Aqt6wIQUxw4g9ZKfA3D1iHIATimRymbkHhpEhF/PISZm8PkNRbnA3L189od3poOtrpFmAqDvIaLiWhOl8ilbaYrNwgO9PaGzvMDrvEes7/KMPJFKhQlmKiLIUWS6TKxf4+Ge/R31xwT/4P/82xbUcT5/1mX/5j6l3NSb2gKXp4iGjlTfYrOeo6gr9vsXGZpNEyEgFEQHppjktiTeZvBR8P6RcKtEfveD07ApZA02VCcObHO6bb75BfzgljXMIsoecKHgWEE/J6V0GfRtRkW+MNI5ELqcRxC+ZziQkXSZKbMg0rGxBMWdQy6tcjZwbjmaaIKMSJxmymCDJ0BuuEGWFNE1v8oOpgCh4NBubON4JgvrvxbZpetNijoIQRYwoFdvEcYCkiiRZiJLqSCKsdXKcX4/w/AhFySGJEXGckiYhI/MTipUyqlZFFjTizCdJRBazKdub91hNhzhuRKFawPcWyEKRJEpRtBhBlgiSG61iJqZkmUgaa9TrLqom4AUisprHtl0yI8ENI2r1PEKWMF+FBKGAGviouRBFLBKFY7obBU7PPeL4JpIgpBmyruHN5uzurTO6TokRSKMARZJIs4zQjZGCkLzRxHUTRBGCyCcVwFANlpZNPl+AWMGLUwQlJkszICXLJBQpo1kqMejFBHGIJsmkiYCqS6iizNZmhYvzcyYTj2qtgCEr+GlIkt1sg9PUZjha4QYC5WJIlkWIQo40drhzexct7/CTj54givcJowhJDDEKNUy/R63SZH19Dcu+IJFcKuUyb77xBkEQUMgbVEplwjDE9D12d3eZzWb8i3/1e1RqtV/8DWVZRRTBc11UWUVMZfqxQKeUZxi6ZFqbUkVEdIdsVG8TJdfETkpD6VIqK8TOkiySuXNQI8lvEIchkppDjHSy2EESRN597Vf40dMvEIwq9kSlVk1YmJ8g0CUvFens7vDFsUU5KXJ8+lM8P2A0rNFaVynkNFqtbSwnQEo09t69zdn5MU9/esY733iDs/4lmqbx2Sef8+1vfB0/EhAkiTu32gz654hBjEKeRqVMmnmcXw7Z3riF6w358vEJX//ar1CpFhFEkcG0iWhEqPk89eImYepRrK+TJA6mGROm7s31eFPDXSWU6i2WyyWG2KXTLSBnApawQhSgUki5uDpGNUpYfsTF8YcUGmXef+u3CBhhWwmysaRYzpEvtJnMzihpGfPJlPWDfVJJYxHaFJpN+tYAVa5S1W9sO821Jq4NR2dDZCEmTRzMWcYq7HH//m8ShCZGSaM/dnA8g2rZQKHC+XWPTqNOKqXMAwtJVlglNtv3DhEEgdnVFCHbota5TXffR9M0kmWZciNi5i9RxSrN2036x8cIcos0cZAVhWK7QiJtcH7xGXN3gRxk5NUKgiay8CPEch693iTxRXRDx+sV+dZ7f5X57IyffPCYtd0qR2cnXF7AWrfIoy+esre3RpDZNKpr1OtFssSh3z9G00Bt5AitOV4YoOdEdrplzHCBKOcpNbtMxj2m02taSucmKvdLfv3y2KCCw8qaMZ6f8OTp5/i+RKXaJQht7KVGdy2HkXfoX/cY9ExyeYVSSeby+gpBSwlTleFowsnFKWf9s5sgOSr5fJOHt94mc/NoqoDjprw6nvPlsx6j8ZTtzSKz3hxzHiHGbXxb4atHp0x6MOsnnLy85I/+4Ic8fO0QUYHx9IxGN8Io3DDpzs+/4sGbee7f2ef+6y08N6bb2eJ3/uff5fbdTeJI5OFrd3j44DarxZBiocb15RzXNVmZlxgFj17/EZl6Qq6Q0Wo3WNuosvR/CuqY8+uvGE2PsIMTwrTHZHbF7bvrzGYzpmOLSl3kwcPbkBap11oIgsPCfI6iusymI+LYplwyWHg22/caxGqMXqyjFhNGy2dUN8/5j//TBvYywNBKTPqwmiVsbGxQym0TRw6tTgGFIo7jEQY2SQSX5yMkMcI0h7SaBUrFBmW9TaWygax0ubo+IwU2t95HBDQtQVYkwtCnXG1w98FdSDXKhW3K2n2ioIRRrCJkeVy7SCIWWDgjdvYbtLaqXFyeosh5WrUOcWyxu9umWupizlNIZCx3znwZs7JMJNVhMLxgZ3OPg51b5LQckR5TXJN/jh7J6F/6TMc+SD65fI35MsCLUl5efcLcWhARgbJAMfLEUQ5zZWMtBdY3uwiKx2Te5+nLj4hCi3ff+jaBrPLiukdxzWBsXnA5esrB7i1Cy+flZ18QWn2uz66Yz2NSXWO2ihFQ8J2QYqWAqDRpdzdpNzu4Tkx7ff3mZZ5BSWwxPl8xWS2ZTvvUCyp6LqPRMijpIpPBmPX2LRYrE7VcII4sCFacXz6lUAkYvPiSyHNRs5R23kBJIEpitFwOUVYoFtr4soszTVgGLt6iz3Z3jUBSWPZd9FqNgXOKXNY46vV48N43EPNwNT3B9Cakgs/L46fcffgmb733G+ze+RaNrW1KVYOHd7bwJkV+9LTMnz9JUYWUouYRJaCXitjumI/++d/jV9YV/vf/xa8jlSVOr0YYWsDtnRrrhsaDtQc0imtsHbyDoci062VOTh0CHyzHxgtuNIRxnN6wGWUVUZJQFIUsg/WNJm+/96t4gYooSESeQJqIqGpCu2MQBApRFCIKClmU4boum+tdDnZvMV9ZRCGoikEmgOtCGsNskhJEBdJQRhBjFLlGbN+w4JaeRhwpSIhEgU8UhhiGgEwOxw4p5gtkiUCcBIhphpBZPH58RJToZHL2iwdRICImThM836TXX+C56c31mSIRBxlJ7LOyevTGS6IkI00sEhJ8L0HKdEq1PCenS7IsIk6lG9tPJoAQsrHRYBnfHNORHxOTIWsCnufR7lQo5Ds4fvZzH3mIKqmE2YrbB4dkYZXxdEEU+TcqN0Q8N2S2uMBcLrm8uMGjCEJyYwLyQhQxYDYfcjqY/6J4pPy8sBNFAfPlJblSB8+VyRIIs/hmkLLnbHQ0JFlnZvrE4k3mmTQiCAI0WaW7WWVhJiSZjCqLKJKCLApYjoeRh+VsxuX1CkUxiMMAQ9PRZOVmsFRdXNfGdW885mkCqegSJibFfIF7t3cQs5+/yhIZ5AxRT8nihNgO2d3dpVrbIEkjokQmiyEIAsLIpVpvsrBNEsHBni/59je/Tr1aQpUFfNdhNBogZBHdbov1tTbf//73eXV0gp4zQLhhg+q6jqIKNxKoVCJIJIrrReZRjyj1mE6WSIjoSp4kCFFihaJUYqNdp16q0K51eXDvENvxuRgMmJhTlGIOo1imXa0jqxofffQzdjprlLUik+kxXz59yUb7Lq8/fI1Gu0ZOV2nm64TxFZ1uk3KpSrNeplVvMBz0Obp8wcXikk9ffMWP/6zH8y+OsZ0rTl78KZqWsr5W5fV7B/jzOebMJhYFnl18gRdnBJlFolqUO0Xm1hVJkuD7IpXyFvV2joU14PT0iIuzK2qtIrlcDj+cgpBHMXSG80sENSOMLbIo5PWH91BEAaOoIks6SCGlao1Wp8bZ6TGeG9IbnRElfZ4+OsI2BSrVPPdu3+Ph7X1Go5f0L0eQLqgWVaylQOLlqRbKpLKMUioyGk4ZXV2wmK+wTI9cUSRERUyLCKgMBzOiNMD2rnn+8gVZVkRQRfZ2twmCBQVDxvcGtBtt3nn7PuPpFbY3JLB9ltMxUWSiSDAe9ikWFRxrxeB6QK3TIS8vkJR9fvtv/q+5vX+LakskkFVu7e2iYyNYKYlXxs8mlIoqtUKFmSUQLGfUSxUQPRSxSLVZpzebsrIDgszhuj+kUJM5P+tR0loc7u3z6PMTYpbMFidst2+xtdHC0HQs02Q2WuK7PpYzYNAzqTfKhHFCOA8RhYzz8yF6KqAK+ZuMe5Sxv32XVIjJl/O0N4sookG9Uv2lB8pfekN5995rnJ2MWMyXaJ0Ci9UVgtDAj21EEa5PPMZDH9uKCFFAMQjijEK+zswZoebKTMdjdg/uk6FgkKfb3ORnX3zGatSjUWyQeAaT2QVqScJLEspig8GFTRrYvHb71/mTf/cZ07HL2kaFyD+j37vgzYfv8f7X3mI4ntFaL+KFPfJSiWbpbZJuDLLFRvcetj3l/OqYbneXb//KN1nOenhOSuDLbG7XOT+fsr29jay5yOoSz1fJUhnXtrGCgER2+OJxj1J+G1nSmU18bP+Etc4d+j0HSZkTJwlBPGYwcvFcn2Zti/PjazrdJutrdQRBYL6YYugySVBhf+8urmtyev6Sw+4tgsUl1XIJRTMpVnRW84zhSubpS4flKqW70aWzBY6V0u7mCfyI5dKiXK0ThiEFtYOTnKJL23TbMcW8iLdMWGve56c//Yy333ibhWvSn55Q0nNYVhU5lyIpHppawvFXlEt1ppMVUZSRLzSYTEaU8hA4Nq1SjmU/5vrsiJ0HLdrFFg+3X+N86KCpLq1am0+Oz2i36mSpjyLn2Nne59Hnj9l5uM7k2qZWq6Bp4Pg1otild+2zvf2QlW1xcfqcTqdDHImY5pzX3/oNEqHPy1fPCBhCajAfODiOyLd+9dfwrUvkpEJDzpGFLpqmEcYJ6903EMVTavUiSWhwcn6G5ckEyRzVyyPrJbSCQK5SYVtKeLj3LXq9U+LVHGshsvv2Fs2ywNI6I1x5VHd3OT/rI2kRY/MGFzE9GdJs5giiJbX2GgXb48wU2FpfQ48ibHwqTQN/HFDt5Pno+Qvu7e2ymPVpr+VwrBhV6nJ5fYWbpsSrCzY3XuN66VMIV6iyTBwnxEGElBNotu9QbhywMD1ee+M+XuozHg641Vxn0Y9QDYmLy2vWOnd4+uSKzY1dSvkGmdijfz7nO7/yWwTTjNOrP6Ba0Vh/7a9wGR/xg8chf/K0h15pYCgJWRAhKkVSXeLzHz5H877gP//Pf51aU+R8saJlJsSSR6WzyfnjIzrtPIUopryxxgAf254iqgVeXA2JUoFEdJADDXR+XsS54RKmxKRAlmU06jV6A4vrwRRFVxEFgyhMKRVUcrrG+ZGNJKlIaoIU6NiWx/Zug2KuymqeIYo3kRwjV0LVRMoV8LyUOBHICMmICAKRvFJCinQkzSDNpiiyjqIqTFYz7h9sksUqYRAj6zFEAoohEPkpBwdd5otTvDBDRP3FmZilOoqo3BTGWGG5GaCQiRlCliKkAoooIisZK8cDVCQ0MkIkWSFhwcb6W0wnY06vXXJ6gu+HJJJIsaSTJBZng2sUZR8x00gyGT8KUHUFx11xeWkiiAYJCRIRSXIzuHuuzfErBwQBRS7huiaC6BDGcHinSa1WZ7kaI5RjotBHSDUizyUTXBRFxyhUEObZDb/S90kLEoIIxbLI068uyYTXQIjICAgjjSyBTrvIyxcviOUt4lhA9EI0WcZzfQxdIcqWXI97yNktHMslp+dJUhNJEkAIUNQ8omqgihpp4kEqIUkKmmywsVXm1ckxji+g6iWEBFRZI3AVFCXmtQev85MPvyRLE0TVJwkNMlEn8E3eefuAfn9Irz9BNg5I8MhI8W39hnFaL/LpFxdYS4Fmpcxf/I1fw3Ec1rttAPr9HoahkzM0osjj7//9/5Zmp4Pv+yiSQhC55AslRClDFiRSICpJXE/PWAUSd7ZfJwzP+fToQ957+zew3QmVepNiwSBJQzJRRzFSvjyakKUWlWITP5whCjqet0JxRaRCjljLkCQDMbDQcwKa1mJ785DTkyMUNQ/WDGu2wB1MiaUWcq2I0pQZXC8oVwu0NnS+eHJBsbOJPfqMb3z9DpG2zcmLR6iVAs26wl7a4vPTVwhpQGjZaGJGvbnGaHkMvkanvsf4KqZYklmYxyhuCVWukEY53nnzW7w6ecKXn12wc3+T9WaZwB1ycTpgsRRplANiNyWTM2zTxLFjJMXgxclLWo1NhtNjBKlDvbPFVrdJFr1NoV6h/TfWsFen/PCH/4xy4TauMyYTmnS38iRnDuP+DOQ2mbIiCX2mPRellifwhuSNFkZ9jen4JYntYakmYnUPUYFup8O4t+LO3hsY8isK1QC1lCeNZGazEZNZStkwcP0Z/cWYQrVBnGRs7O0SWAle7GM6No7nQd+naZRRiDk+66PLElezf8D17AF5SSUzdBDGLOcBm5uHvDx7irWMyBDJ+TpyqpMYCWlgU9LytOrrYBosTZdGc53VYoZnOpQ1HS+22dvrUDWq/PH3/zVvvHubjb13+fijP8GgSndnjen8Ga1aB1URyWIFYoXI9xiMTigoeaSixHI4ZLPTwrVUBM9Dk1TyuoE5WOCYY2qNbdpGh8XUolj5pefJX35D+fL0BaQCRSMjtF1Ojq549uKY8fIVF/1znr3o8firlyxXI9ypzOcfXDOcLLHtEClQGZxMUNIiR5eXSDI8fnbMP/7H/4SlbXFyfQmlGZ989iOkuEheLJCTba7Or4kFjzfe/SaDyYR3v73N3t0q3Y0cO1ub/OrX/xrFYpHpfMnb7zxEFhqICgiyjxubSPmEpTfgxx//j/TmX9JqNegPn/Czz/6YuXmN65kI8oqLk1Nq5QqFvM5ifk0hp2AYM9r1OhvdPQ4P7lDSa9w+2ERVfV57/S5JZtJpbrFazmi3ClSqdyiWajj+NQvriFypQCIUUHMSRlGj3iqhagVyBYM4k0hFidPrZ8ztOStnyGh+QaPVRUgVFpOIwPZplXe5+Hybrz4U2Nheo5xv8v7bv4aKzmqeIEgqklyiVKyzms0o5wuIUZF8TkQSMtbW9sgVWwxHEe996wFqRWBl9aiW6zTXGgwXT+n3TsnXGswsF0Wus1rarG2s40UCXuxQ6+RZ2zmg3b2FoKjY8ZA33t0jr5Qp1g75o48+4OnJh3RbbVbmnFqzQLPb5P691zH0AuViibffeYC5jMhXYpx0zJdHp8h5nfFqTLkt4iZzvGiMJKpMBiZJaEO64snjf8eHH35IriCRZWUUsYlRLLN30GDcmzO4Cjl6dY4ihAiSTG84RZAN+sMxqaCTiDd6LCee0Gw30LQ21eYOqi7Tqu4gZiGKWuRi9pyhbZLKAmLOxB3YFHMKOaVErbHDfDJke6dLXqsTmCvymYAYjHDGF6R2xqunL7GCKW21hG1ZTLwp40GfWW9JImUIYZ5aWefVxVMCMcT0E+RiA7WSpz/poec0OnsHXA0vqVc0qq0iaSYiChmSLDLtX+MtLrm1+zrFms4nzz5H+rlZqLjfQVECgrnI7YM3qGwKuMkEJS9yvTjBs0pUizkUAz569gPcaMnR9RX/zT/8H/h//qMBP3wxQi/nQUrIdAOpbPD4y895+if/HV+/f8r/47/8X9BsgDe5op1vE2YSjUKeLHZQKjkyZcmn5nMyvUxdyNh/8CbDMGF44YGU4vsiru/h+Q5BGOElGakUkWYJiqiTpnlyTYvJ5Asca0lBNYjkiCQzKZZyJILN1fUlruCTw0DUBSQlIp9kHL16zsqNkMWbwTRKgMSns96gPzIRYh9NUchQMVcr9g82UIw6X3zyinyhQRykhKJHEqmI8pLh7ByBCqIskMjJjUFGFdAMlcVcvLkGT6RfPDd4mgwxDgmdgGfHKwRJIbFSxFDBTzzyssrtBx2CZQEQidIAQchYWRbdusHF2ScMF1NEVSVLLWRk3ESgmtM4uzwjSfIgKGRKQEGVCR2RSiHD8qccn1yRoaJkCbJkICIhKyIrc4qer4AkI4kuspAiSwauvWR/p8zQWeAmEVmiEjgr0jjDDl26jRLt/V0mk4gocgg8/xcmG8u5RiuoqMVNothDSFLE0CBTYogSZvNrMvXGPS+EIYgCYRaRZiJRssRZWSRxiyiIETUJPzCR4xvVYaOaY+bbOMuEIHOQBJlELpIkCaoe0qwbpOImQZaiShkiMUkMUipiaCn98TkvjycoqoqYaQhJiJJECJlIImdcXc5JE5FEjBFi8KKUBI/E15lbC8LAYDq55q/85d9ifX2dQiFPq9VENxQEUmrVMt3uOv/23/whX528pJYzkFKZTJEQM51quYIgSjhhBAhki/Dn212f0ewJo/mSnZ0NrMWEdnObjb0OulZEVFVWyZLr3gmhO8VJLXQ9j5SVcJwyV+fXvLzqcXl9SaVeoH89YByrpKpDKiz57POfsnJNrievmK3mhKKEKfpUWgV8Z8irp6/QCzpDx+LJqzEYIUt3yPZem6Pzc+Y9m/W1A4bDx1yPUyaBgKSEKGqBwzuHtFu38RKLNCkTBSrnl88YmhdcTca4QCpX8GODif2cTz/vsbX7Ondff8hoYDIYOFwN+whJARmH/pVNpdRFKao4iYcdgmRk3D54mywu093ocnbV5/4bv8Gd19+ms7FHLFpcjn7IdPEMJVsnDl3K1S26mxUcT+LW/Td58OAb3LtVx1xes3A9qi2DMPZwXIHYSYgWVzRydRShhp6VmUz7ZIGHb3nIasRosGLncA87SBmPljiLgFatysZ6BzfMCNIIRQXiImtrdygVczhBjG4ITOYzFtMMa2GytK+4uHjF7OoVYhGi1YR+7yVJtGA5PCYwBQajKY+Ov0Azttm7s4GhNdg4vIeey5geDVmsXGLBI5rmqbQS5tM+sq/SbXaRZZliu4UqZKRTl2lvgWDIaJmPO8xY377Fy8vnDOdTGrkm660qiuSCKSDGKePVBc7E5My85HT6JQVdZdwbIeQDfDnF9j0kOeXTTx6Dl7u57UxU7r+2zeDa+6UHyl96Q7lcgq4uECSDavU2V/M/5dGnP0OOq3zve1WSVOXgcBfTmpLJsDSXTOYuQlag2eyiqi56SWL+wuPjyQdU9C7vvP0GpVqVN9+4xY8//jO++b0SoS9QKlVZziOqusNsBFOnhx9YXF0q7B00kdUZWZDj1v5dJC2l3mjx8uiSes1geuWhGyJL5zleWEGRNR6+vkV/eIzpXKLpEpraRBAFVt4FSSzRbb0GokOzXGE8i5hbYyoVFb2gk2Qmg2mPr731DbzAZzZ9yUcf/whN3mXlPSZNM5LUJfQCZHmJHh+gKnl0WSGnWfiOTf+yT7nQ4PTkmmIlJYoiBr1janUFWaggiyLVTpO8usfF7AtEySQy79LrZYiKyRtfK1Iu7aBKOR598QF7hy1MZ4Zj2yhijjQOaHd0+r0RipEjnzcYjR1WlkeuLOGlX5KmbxN6HoICOTWH42SsdXdv3OWuiiCImPaIXF4iiBbIqotlRpTLVUzznOU8QlQTNg/vkMgxcipQ1gxu7bZpbWxgz33SNEY3JCarExBV6t0qL65/SOJ1yTCYLpesra3jJx/y6PkzDne+Sxw2mc5fcvvWa3z1xSmGkeKHDn4UUq4INIw2y8UcXclRrWyxNBVM7yWRb1PQ2mgFjZkzhFCgViuSxgsUWb1RYI4zfM9m/+AAy/YQjQgrnJCFJSRXY2FeIQoqhbiErEhIUpNWq8tgcIR5foW1lGg2tiiVS1yc22g5D0kOmS5DcpUqopQwmSyIRZeFq1IuLggCEyOrstdd5/j6BK3a4Kura9a6bXIljZXlkKGjKAsqtQKaLrK+vk0upzMIhywmc8yX52x4HrqukdPziGlGsV5laV/z5u1vcKFs0e20+N77e4QLm273EDf4jMGoh+0skPEpYuBHGdUayMk6Lx9fslgYHNsqp2OIdIVmS0UXCqSqhj8PODr7Ke70Kb/1rfv8zv/hP2Mwesm/+OD7bK91qMVlCC08LOyxQVQO8ROflVdmrVbh+OQFzVoRxR0xn84YTYYIahnHnqFqBQhDsjgjiULiSEJRcsSJQ7kIchpzcnzM0oqptYuICqyciGa1y2a3he8JuG6AqifEkUYYJDQ7GoKoYFo2+XydTAhIAhFDA7LwRo6gCHhBgqqrIER8/a23eP877/Pff/+/YMmUWMyjZTGZFBJ4LqXiDlFokqU37fM4jpEUHze8ZjgxkZU6afbvW96yLOG5CbqWcOd+h6++TPBln4pSwBFD7DBkTXM5Pp3gkuKnMXKi/zybmZFlEa3mGh9+OkVIJAQEZFkk8gNmiwnPn8/wrC007ebMEMWM0AnY3MgjSQppXAYRFEXBsW2SxCOnZLz3fo3PPn2BF4D2c05wGkWoQoYqZjz6/DlBoKKRIMk6ggRJlpIvKPR6Q1YLkOQcSeKTiRGqaBB6No+/+BG2extVVUnTmCQRSNKMFI+1zRKWaeO5N7nIOIJ8vsh0OuZgO0etHnB67IJcIwpDZERETSBceRiahqbmcaMIRU0QUYnDkDiWUGUd25kxnTiIUoIsy2RJjCHl8eyQrYMyS/cZpjsil9siTVOkn7vpq1UZUVxieSYZOlmWEWcimipj2ibtcoFbdzb5/r/9irVOh7/8P/mLWI5DrVYjCALiMGJrawtBEAjDkH/yT/4JBaNEkMQ3iwshwXYtNjfvI54MbopKcUKsRFSrVWJ/wcHuPld9i2KliCInNGrr9CeX5KUauqrgJ0sEoUukWeTyOr3+kHpD5nrykl/93l/gcvAp1xcDZK8Gok7gn9FulbkYDqjk15FVASGp0Z/3iIQ53c1vkK95JPkygQ960UA24erqko31Q6oCSIKCJYhoah1RGLBcTMmXLhmYCXJSo7VVY+65jMw5shCznA4pF1UUrcCd9jqvjp/QvzCJwufc3nmf7sYBP/zhD4miPYqVNt3OBvlySO/LPu1ODtvVqdc6rOwTSuI+putTaQakcZFipcF1/0OOjm1Wqxn//J//n9is51AjgdXSJVUXrGwRqWKgNRpsuDkur0Zs39Z5+uyMglFBln0q5Rqj8YL6epfF6QlvPfwm08U5hCIFXSOrgdFokZNixETHiUYsl0vCMEGK1qkUY7QsAFnACVeMhw5xnLKxscH11QXWykXXJKYThUazxMVZj9u7r+O1Ftza/HXajXVyBQFRkOlfXtFYKxGENsePjjgf2pQMBzm3jSqGXI6O0JYa7777NnmxhTMbUS0GNNs1FmYPQc8RJDHdehNzPqJWOeT29gPa7SrO2MUPYlJRorupMh0NiWSJUkHn3UaD5dgm3olQcxm+V8aMJ9SbeYoiyFoHbTomE1QCJQeVFDsJKTVLDF6e0dC3eOc336R3PUbNGyRKyuXVkun08pceKH/pDeW0N2J0OeX5l8dc9Z5iqF1u3crTqdZYXtnkciVePD+i1z+l1x8wm3m4XkKunMdJLkkTEy8es9nVWWvssLvZ4Yd/8pRXj86Z9DKMbI+iUSKK5sRxzHQyZ3t3hzfe20JWNQ727/O97/wWewdd5uMScVTADnv0+n1SMaCz3mA4e0WaZQSBSJZlZCSsr+1gmwmV4gZRHNJZbyBrcH4+J0lbaHodrWTiJjO+fPk5qiFg2itSYmYrm+lqQrPTZHDlcnF+QuD7CGLKePk5Ob3B9vpbRJGCIRWoG/d4/eFdcjkdGY1iWWaz8z7FQoPnz59TrRXo90ckUcbe7jaKKGCZQ+7evsPOxtt4jkiz2uberbdJ05gwvlERXp1HCGnEdDxCUVPSzCdwVVqNXRRZQMx8POvGn9zpqjQbaxQKBcJ0yXD6EjcIUfU8T756SpKFNzaNzGU4PQFlxeX1UxzXZLma4voLTq8fkS/oFAsNFpMlhtJkd7dJXswTrmyuL/rM/SFfXn+AE8Q8+vgLpqsek2UfxZBwXJ+jkydEqYSmHuILS8aLE8LEY2kPKBY2uH3wqwwnZ1z0vyBXVPnjP/1XmMEZXjZm6fXQCjqaUaRQzFPKbSKJGqIUszKHlAv3UKQ1pJxLqlkkokYop0ztAQtvzNIxyVc1vHSKUJjy6OkzLvonnF1d8uXzZ6z8a6b2CREJK9PDyBVZmAsSaYVRqGJU8gwWlxQqOdRCQLGc5/nRZ1jRNS8vnnDa6+NlGmYEWU6l2Mnx6uVnxImGmVWJ6+scuQs69/eZuSsqlQqZkFAo1dnY3EFUHXqjF5jWnE5rl/nCxLQFtg43McOA9b3byJqCG7lYgcs02OfTRxVa7e/y4pnN42djfv+PfsbHH33Kv/mXv8fL/hM2d1tIik65LVJuVpksfBZuyh/+eM6//MGKf/GzlD87afJ8UkDRaggBHL865cvPH/Px/+8PWDz/A+53J/xv/3ff4mvf2+Or/oKXp8c8yEt0DXAEhWenPaJyipZTkL2MfGSiyTpZEpLXDSaTIdfDM/LlOqalATdbyNCXcf0I3w1IAhdZVAjjjExQyJKEW1s7WAsVJB1RAbIYCYNKLocmSKxcizSQSFPQdIGcIdJolhgOUjzfBSFFkA0iL0GOfczlmMnYIxUiRCkmS0OEGM6unvPoi3+N4xTwnBQimyAVSfyMr3/zIYPRnIwYSc5u1JCZQuDZ2P6KKFWQRIUslX/xiKJIGCQcHuxSr1UYjW0UNOIsRidP7MrcuVXhG3c1xFVALn8DdyeTsVcOO1tddnZ2CH0NSRaI45Q4i0jjgHq5wPvfeJfY04hCD1Lh58MUCKLDg/tvcHXlIogJtusSZRm6ISNkJpa1Yr4IULQGXuihGi5JHJJXY9bbFbLEwPVj4jTGdH1SISUKEiTB5vL8Ck0tkQn8PJ4gM59Oee3OLTQZxqOINI0RpexGSxkJWOaMJFtSKFbxvQSyG5aoHy0RBIFaqYGUxSSx9/PcpkRKRizEhJ5PrSTgOhFWKKJrOTIkUhyENCVnJEwX50SBTD5XIc1iBEFAVVVIIyTZ5/zyS6JUIhUUZFlGEHUk0SCOQ4pliWKhgipXUSWJNE0J4xvLkR9NefbiI14cHfG7v/vXMQwDWZbRVBWyBEEQSJKEarXKBx98wGePvqBRK+PHCUkqQWYgyjIb6y0ERNJEIBMEMkGCtMTBzmv4logiQSYIuL7Oo+c/YTI1CROP2WrMfOJyfvkMQRI4P/Wp1LZwwwjUBZ9+8TErS6C1to9kGEiqQZrIXF+beK6CG1kEqcjurQNKlS5afoO5d8HzV8eYVoSiS3z17IjpwKVUKNCo5RkOh1imQ3ezxVenP8V2i9y/9x1Ggx6+aWKt5gyvTLI0RELBtm06zVts7uxSzHXQ0nXuH/4av/Vb3+Xw4C6muSAO8nzta1/jwYO3uHtvjzv395jNV6h6dvM/pNrEscJa9zbz1QXrayWIRcxlyrMXn5IRsZh5bHRfA1llGWUsZAVpe4/267+GUN+lvn4bXe5wdPQCXStyeTLCsX1AxNBvCqj2SmA6Sfj2O79B//oppu2QCTKxCLmaQrWssloE9Prn2I5FEATYTo/p8BrXipmORxCJfPX4K0I/pdkoY5sWsqSzublOoaggqBZkGof7GxR0Dc+M0dUya3s6pcotJguT//a//q/44pMZSRLx0Ue/T6FSodqqIegh9VqXr717hzdee50g0ji+fI4TWUiKztngCENJUfOwcBSEUo72nQpZUSJXLnL26pwX/QGhlHCxeM75aIWiFLGXzxkNJ1wHMrqRsVxmqLKAvRLZfW0HpaDhLBSSwCcIBBqdNXwlI1El8nKRllrltf3bqFHAyfnJTWyDFVfDEwaTORvbt37pgfKX3lAWlRpbG4cYd2Ry+RY/+emP6Gx26X+5pK4c8NFPP+Hg1jovXyz5q3/1V7A9n+FgSmuthO8XKWopekvg6PExmiGRZha/8usb7LV3UaUclWLEYlZCjuvImcCD+7u8fHLG9m0VUQhZLBacrz7iwcPb3Lv7OnPrJZpuEGQTPv7if8AwGuRLKktXwPHGNKvbuLZAv3/F9m4Ld1Gi7xwhpApr7S2W01OKpZsXx2RqEUQ+sqyQJAnd9sZN3sDYZDyekYQ51HiMZugEQUauovDNh1/DXsnIokI+l2Ozs8Fnj/6MmhQhqgFeJIKkkKuGPPnoFXvbtzg5f4wsqMwXI2I/Y727T62yIg0lRvNrWo1NGvV9gmDF7k6Hi/MepfIarZaMquUolgxWToZh1FGMEZf9l0hZDteG0FsRhDGtjT2+fHyEltcY9m10vYC18rgevGJjcxsvnDAYXpDLKwiSh5Gr43rge2MqTY3Li2tyeZ3r8JjQE9EkcKwq7XwZQ4rQi0UiQWC+mHFweMjl1TF6RWU0WlHr2Fz0ztlav0cQZTw7+jG1RpnIb1Eo2URxwHg6pFjReHX+kkIxx9wekvZ3abXXWKyuWFoyoqxgOw6yMiNOfJqVA07OXjGan6LnDOLEo91ps7SWhPESMVUwqhmrkU2rWsAJ+xxfDChVqhydXlIql1E8mXK1QxgUMS2fTBBxXRdNLnJ0coSQxfhBxtHxjzi8vYaoFDD9OWY/xLRC9h9UOb94xe7+LSZ9n1BaYtoT7BU05BqVWovR7AKUjNOrc/b2X2c2m5NmAWqujJ2uiAKfLJUZjQc0mx1+8MmfsrO9R6VSp3feR5ZSCtUOk+MhYRSQAVEY8v/6f/8DPhEy/i//1/8KQ5XJVUrsHXTBdagWanzwt39ATiiQSjJxqOEFLoIgkaQGiVokkWz0vEjqymhRQOKbSHLG4Z02Ww91dre6tNo1MHym0zlPegPuVjfZrBZZrBJqqUJJtti5vU0oZcwnPUS1wp39Nzm+uqa/6NNp1KmUy5hiwjBOcd0YLSfihz5SlpIKEmEQokgyYegjqBJxJFPKFbh92OH8eEwkBMSZgpAKhL7DW6/dxhx7nF0P0bQmy2lIc62CmIkUcgYX1xaBJ+DYIXIGBDGv3W5TKMpM5jMUXSfJIhRRRkolDu41cf0xbgD1bgV/HpCoGTIZjVqZTz5w0PN1gtAmS0WSRKfdqZMvlFksrhCVJppS+veHYuaRRAG2mSGxDukUQVTJhBAhTcmUjEr0jK/+zU9xnN8kkGuI+GTZDbOwWhN59uIpspxDklIyISNDwFyu+E9/9zsUjGvM1ZDNeoMoShAEhTRZQWZx1b9ClEtkxMQxqJrKcm6xs36PfD7HaPSUTLAQBBmEArY9o1qqIRAwm4ekgogbuCjIeL4PUcZ6u0Cl0uDpU5tEsFFknSwTCMMVulbmcP81njwPSeXsZoMXJ8hajVopz95uk88/N0kyFV0JETIFSZHw/QlpumA1n+NZZcLYR0YmQyTJMhRRwlpcEwcNUOukUUoYRui6zGrhsr9XQdMter0ZWVpCUlJiP0IVCuhKxp1bh9QrHubqOYohkKUmoqyiyBqSmKHoPtbIwgsictUMIZMQRJk4CNl7rYOgBDSbDX7j17+D6wSosoIkiViWRRyEdDod6vU6f+fv/B2KxSKBn4IMqiQgJRKyYrCxuY6QfYmmaZBlCLFJ6K9hOhoL54ze+JpN9S6e5SIpeXZ29hhc9mg3c7heh3v372GFz6g2ayCtMKcyppWys6fz9Ktr3n//W9jmguFoiKZplCo5ag2d49MvsXwXSU+ZzCbkcmvMxid0mwe43jWjfsDe1iFZ3aFUuM101qfcbCIqKqv5jLu3mnz6Z5/z3nu/wuHh2wx7xxSrCYlnU5Fq2IlLvnQXN16SK7S5OH/GWnubWr3F1eUQXa9y75sHDEaXCFINqQDH518yHWbUmwWKpSZu4GDkK8xWI2R1D1mOubqYout5NF3m9t4tPvjJZxRLKtOxS2d9n0H/mIPD+zx99ox2Z53Nbofr01OqpTaIPqenX3H//tskwQDPjTFNkzBMKZUKFAsKxycvueq/oFDoUsmX+fz5Be994z2mlzMWlk2zrjOcmLzz1re4OjvBDfpMBxHf/fb3OD96wl/57ncplje5nj5n5ljkjTz1epUgMsmnXYqlGCmTsRYW1apGf/ozPvr7V3zrm7/Dj3/8z7h1b43PP/4Jf+Ev/d/5m/9Lmx98+AGTWYaBwIPX2pxezpgsxjiBSLWWMjFFjNYuhZxISfRQBIinAmf95yRhgK6nNCpldDFPoxYiBwm3ahv0e+dUt+s8fT5Fbdoga6T2irXqGvOVj14RyMIJhAb7hxtc9y9pd2t01po43gylUGWv0cCcj5kJEYaU5+7tN/jqq2eYQYKWF1l6lwzm4X/4gXIwvca2Anwn5up6zJ3X1kjSAp3dEt//wU/4i//RWzSa6wRRhhM4CJJAqaZydnFEuaojCRKhKXDv7jYCbd69/z5nlxek7ozp4oj7D+9xeRqw8nNIik8YqGzuV9jc3GEyuKZca1AwZBRZwxFOee31u8zH4AZLRCWmUtUIo4BCvs5175Jbe1Wy1CKOJJrltzkafsHdw3ssFjGevcQyp5TrCp4jkkQqtifxxuvvc3z0ioU5oFSp8+rlEY7j4FkyrYaEHVTY2buF5c6YjzVK5Qwpk3A9m9H4klq9gO9KCEKVOLtmPI1QxCr37t4miR2qFYNKcYdirkDv6powSChWisSuQpQOsJ0yi9mYWsPAdVckBIiygGtrDPoKlbrIbDFnvHBQdRlBVFEUjcV0QqWmk0NjOFmhl0KSxMO252SCTU6vMp706LT2CDwol26+LxYrXF84qJpOEPU4eZVysP+AIIy4vu4jyh75Wp7JtI87cciVijjBHEXXwFE4Pn1OpZpn1I/QDJvhMOT1h9/j1YsjCkWNNPaZT5ckwQqtkGHZHpqmIaYF6g2QxByq0EAvmLhWRqN+CECzvkNv8AI97zIehIzCa0p1ENM6gugioDKdWDhOjOulqPoQy72xapyfXHFr7yGuIoJsstXZZjKb01nfYTkPWeu2uew9xvEyKsUdFKlAlNhUa01Wzgl6UeTVyStE2WOyMrm99z6TvkO+GdFs7XF9OWNr85DJdAgZ1OsGs/kx6wdfx7YmJOns5tPd1RmKUqVUKzN3j0mWArMgolDJ2N24zXTZp9vaIMsyxpMrKmURQ6mRxBnT2QhNU/D9EFmROLxT5/nllHKtQJC4ROGKRz+9olXf4tf/s7f46wfvs1wIXM+fkXoxg1FKt3Sf1fgrBP0mGjBcjKl3oNmos7++T7OUY3fnDZ68OmK2vCZNJCIrolqsIgseUbQAJaGytoWFzNXoCCdWKRcN0CUWssWH11PuNt8mEFOc5QpdNphGI1ZejJ/aqNkmouCSiTFZFkMWEscyiqSiqBG+F6DnMpLU53o4QDNkZDFPQoSsZDRbMtOJxXKlIuaW+IEGNNA1iVx5xtXnp8RZQoZLSkIUuGSpAmILP1RQciJRlCKkP9fxJS5mWCcInxN5EpEgQhBQb2oALJfiTQYwixEUmcByyJcFvNAHKQdCShr/+8NVUQAyPG/IfJ4niETknEyCSxJahHGEsbbPbvNrmJ9fUpZF4uzGhiMKGaE/JdQyBEFGJCOIY3J5A1WUWC0uKVcKyOrNlbUoJgR+xHjW5z/53Xd5dX7M3PTQmi1i3yPyQYxVEumMTNskEwQkUUSXb4YcPV/CT/s4dpGlGROkAloUIYoiQZAgp6ALAe4KfDckV9GJo5AgTvG8gFa7gpYTsJ1zclqBMA5ueMCrJTv1EmkiMRhZQJ00jW+ykpmCJIgQ+0iZAUkJI3djH1JkncCNEdOU7a0up2cRWZYgiAK6ISBJOkIaUCxo1OrrCOICxIQ4iDEMDUEQCHyPcknDcSKiSKbYECHMkYkSnhOhCgq3Dg54+WyAphcRM5k48hBUkSSKCf0VJydL/qe/85/Q7bZZzpagCPj+TTN+e3ubWq3G7/3e7/Ho8WPW1tYxTRNJUm5a7nFKq2swXzzFdW1UTUdIM4TUQMkl2JFFsdZAt0SCwAN5gqp1GPZOCYMAsnUEyaSU38JOTimWQ4SsxOZ2juHQ4uJ8yN7+Ol89+5hGrcnGzjqriYsoS8xXQ3Stwa39BwyG1+TzAp9+/GMOdg6ZTc+J45hyvk4YevhxiJFLmC6WBE6EW/FR82XkwMDQHvPVpz/k7tcPsbIZ4WSfvd1NHGfMWneLXm9Jp1Pm6PEJzcoWk4HF2kYV+8pHkDMG4xNcJ0XUfC6ffkaaKBTKW/i+yu1bD3jx8hnmIqOzVkTWFwjLAggxpjWjU9zE81ckkcL+3bvMl9eouTrlusvFZR9DK3LdO2NlukjFIufTMc2aiO97JHHAYjFhY2OLKFKRJQEt5xIFC5arJbXyQ7RCyKPPPyFE4+Wzl9QNhbn3imzZQFJKvDh5hUEOLReyXirxo5/8hL32NpOhy49/9FMO77Yg01Bkg7xR5vq6T6WwRhoHXFzM0fQURaih6WMMVeWP//AfYTkCh7c2qZUVXpz9PusVg9fv7SAUWqjekunkc2arOaK6zp2DIuNxn1yziqSFzAYxabGGlFkIyRUHeya3tt7k5dOvKJQkJvOYbnmLainj0aMnJFmOsSXQbNUoNH2seUDZaONlKUfDGfd39tBTjVa9wqvRC3puyjt7bZLURVdivMDl0bMxSOCoKbrswKJIpa4zdyNc3yEOfTJR+w8/UFYrZVqlEnldZX/fYLGU0dhgZz+l3ta5tXfAeOjwznt3mM8DZpMVdjBAU4vMFkd0u10mkwStpnB7t8uzo8csViatRgTZnJcXCaWGSF2QmIxDjnsvaa57eJlGqZ7DT86olLe4Gh7TbKl88OEPKZZkAl+nUGywWqTkjBpFtUCrtqQ/uOZw702alQNcf0a+oDK4XtHdWCcIl7z2xm2KhRq96wGiHLBTvsuoPyD1ZWrlEr4v02w2+O63fptqucBPP/6UWFhw1btma3OXfv+S696Cva3b+EGA4DfxowGx7yAIAoaRcfTlhNt38pQMibPeCFVVWS1HLGcLtnc7qKLGoD9BFmNqrXVmsz4RLv2egGic0Gk+5PzqgkSO8YUpJ5cxRkHBtFx0oYvtjNm6u8ve9us8P3pCoSQyHbtoGkSBRpKZqLKOJBZQlJgXL16wt7+N4/kYRp7AE4ljmYV5Qa2R48HmWwjyioJYoVzaZDY/RRZlgkBGrcHYnSCmOmpcQdYWOMEUIWlT1kwCScU2fWzbxnUVktgnl6/i2BFhMkYJdxDlOXFYwrWqeEHKdDzhrbcfcnZ5Tbe1TqmsMRxP6PUu0NQizlKi3WoznZ5SqnWwVx6VqoFlXZLP16nWughSmdAVmDsjdHmbNKpyej5la3sDJJlitoasFJCyEhsbKf3+FYa6xtbmHvP5FD+aIiCysAYIWYNG3SDLBFbmmDj0UOUKC+cZaA26nRa2OSdLLVQZFouYg90dFAnmvQu8cIEg5ek0vkGr0WS5GBAvbWpyG1s2aTfrxHFMmowo6iVK6+D7OWIGpJlIrdGhd2Vxa/8+afYhmpYDMWXzsIb3bEApL5AlRVRNJMvFNNfbiMUZSVyiUdGodkrc3vgmprjio599ym/+9u8wWI7JhBH5eJuJvyCTZJaLMXlF4yeffwFyRF0tUTJKXPUcTq2XGGqVVXWdDUMlng05XVpsdtfQ8kWs5QxDEKnIAsfXlxwPI2IRomRBvi6zmWvz6vEYx88wBI8k8BGMEnlZZmpdU9DrJElKFAfYdsiDB7dY7/4Kp1d/iEQRMZNAzsgy2D9sM5xcYfkOrVqDyJOw7ZicrpElMpNZiqoUCQMw1JQwiNnZXuf8sk8Ug5LESKqEkIGhZniRz2Wvh1ao3NhSRPBTDdu+YmX1SVFAFIgjgZxRIApd8obEgwfv8od/8CNyhkaSOL84E9NUxHU9fvWv3mOxOiVINJQ0JM0kVF3EmEVcL0q8nCyJ5DxREIIkIMoSYpKxvl7n4qpPGKfImoYoZgiZgCRAsSjw04++olCp43kBWRah6HlkJcOyl/hBghO66IJAlsYoWh4vDBClKd//wR+xmP8lVD1HnLiIGNjeNfWKROD5zOc+opgncAOUnMxyYbFXr/Dm63v8y9/7KbK2QZqJpLGJrBkoosYbrx/y4sWfsrBt9EqdNLlBBjmuS+feDoETEcUqYSyixtINjigJSdKQ2XRBuWAQE5MmEbKo4AceulFAUxKq9QqLRyNk6caOlAgBciqSRD7lUoXx2MQNxBsXvJIjSV3SMKZULFOuwPHpCbKSkWUeoiAjq+DaLs0mfPHkR7w6uUbW7qPIIhEKgpAQxTb12iat+j7vvP0dFosFmZiiKQaaqqBpyo1YAPi7f/fvoRoGjheQCdrN75FTkgwO9g64//AQ6fdfIckSYeiC1GJt7ZD5YkIcZGSpQ7FYon+uUWprmDOTZqvEYHBNGkr0rl8SCgaGLhGrA85evSKJI7719d9hPLlCyjrIqonjLjD0HIaqsXQ0SlUdRZMpl4ssrRmvv3mLKBAplppYJqC6LLwljqkQZn3knEq9UcLzLPL5IlJWpNy4TeilnJ/Z3Ln7LdLQRyklkFW57Dk0Whm9ozlrzTzXvS/QjFvMp2PiyKVT75CmMb2rPu9945Crsz7Fqk6QDBldKXztGw9pNBo06zrL5RI/tIkyC8PIM7OuYOKRskm10kBVVcIgQ8tcyjmdRqGO780I/IDUi1AVnbdv3+aif8x8MuVcvKDVbuAEV8zmGpVyAcsMuTo+RZZjirUNDGOdjbWM9XYFX4gYmz2iNEEQZZD8G1OOqqMIGuWKxM5hlWcvPscLQmrddUqdKqPTU6p6nVcnj2nVmiwW1wShQLXeRtdVAjckSTLSzCWXD7j93ntUy33sscuw/wnnn6YoZZVo9Zjrk2t2t7ssI2hUypBlTAYuqW6hqDmKUpNYyCjny3gLgXr5Fn52wRvvF/nzP5ojVAqcTE9gITOjwNfff5fh6Rn1ShVFFxDlGNF3mZke5bKC54/Rci1MVgh5GSEVsLIVvfNLzJVEo1rB8kdkakpZKrOcmiTRJd3mAYJ9Regm3L59h9F4/EsPlL90hrLbqVAqlbh18Aa397/NX/ntX0ORVOIowLFtBtMjjJKPaUaMJ1fMlucYSoU0Ubg8Vsnn7mHPfUSxwGnvp8Tyis2dIp+/eEmpUmSyfEXMjF5vQhDIPHztFv0rlz/8g/+RRitG08FLriiWVIaDCQIFlPQeurLG5cWA41fPue4/QVQXrK3XuXvra0wnDo+ffUh/+gVR5lIol8iATncL309/nssDaxXjBXNKpRKbW03azXUq5RZvvvkWgZ8xGo3Y2NqkUm6ys32IY7lkqUexVGNm93H8CCc7QS/kcaMVg9GIxSJALy2ZTS+ZTIc4psT1eR9JNdELDr3Bc07PzxjPl4xX1/T7A1SlyM7OHf7sJ98nVJ4yd59juXNsd4Wk6UhSkdH0iqX9kvHsiBSXpy8/4vmrT9BzMVEUgxBQra7z3te+TaVaZzDqkyslaJpGJvh4vkWWSixXI6p1nXanRrmmYORrDGdf4YYTFuYI2z+nVJMplIqoeRXX81EKDcb+jPK6S9koslY+ZDa9JFZkfA92du+wMAeUGy4xHpmgsb61iSi0STOFjc4brOwRTnBOvhRQbkaY1pzdrbcJogWW5RDYKlkcYdnXRPGEOJtSKFYplKSbTOdihSTk8PwIy1liWRGZJNJsHTC3Lqm0Fdpru5h2iuPmCH2BcnGNQqHEdLKk1Vyn02ljWnPu3b9DBiiajmkuEQWF8WjJdDLEUBq0m2ucXv+Aja0SxVKMyIrt9TWcpYMgpOwfbHB1PUJVW5SbTTZ37qPrJUaTV7w6/Zj5fEwiKdiRg1TQ6M9GBBn8+Z+fECUh1eouk9kJilSgWb/Fo0dHKCpcnpyTxUAqQpJyuFFClAy81CVIbMJohZzT+erFK/Jai8HwnJXtorHBx3/6I9yhiBbl6D0/I54nzF9OuPjyFY2kTbyMyYwKy1Rhal2RhTMEV2UZznAQ+Wt/6W9QkqoYzpjl5YJLJ6WRLyGJPi+Of8z1YokkFBhczNlqPERspNhKilHYwYrmJGqAWk544903iKIASVBJ44DVwkSUFRIRkGI0SUeXSiSyxdR2CMIGqqiQZhEJAgISc3PJ6dUIpBsPdxRmeKFPq61grSLmC1A1iBOfyE+IYyiUyiCWSUTI0oggdEmJMHIJubLBbOoSpgmKJiOnOZwgZHdji+nymqVlkwoRcFOCkSWB/b09ZuOYOBLwPJcsSX7xSKJKlqVMZucoSgkvSm8QOJmKl0jkCyl7TWhULSLBI81XSH7u60ZI2d3uUCl3cT0LARFJUQj8CF0VWd9o4foZMQKCBLqeIwg9dFlAk1OiQEE3ygRBhCCKjGZzLq5O2Gk+5Ld+9X+FImuEkYuAfFMqsXO8cfcuUqoQhRJxGN1s7OIQzwsQlZDlakwYG4RJSpylyLJM6PtkWUavd0y/N0XTDfzQIwgC0kQiyxKWywFhAH4okAkpZBFZkiKmOoW8wn/0l7/OvXtvYDkykqhBJpDPa3iOS64AkZCRZEVE0hs8lKghyRkIMStzjKFXsCwBJJE0vYkpibKKpMQkTFktQmTJQBYVQCTNRGQhR7GkIwkiitRF129a9qViHkkSUGWRSrHC/s53EGUD13NIs4zFYsF8sWA8HlMul/l7/81/zedfPKJRbxHGCYKUIiKSJBKibLC7uY4TTIiThDBKECUZL13Smx5xcfEcTcuo1wpcXSwZza7ZaX2Xje4eUZjDTwJKxXW2dne46PWJkpj5JKNW3qPT2uP09BjLGZEKc/xoxPW5S6PUIY1kxKyGUSjy4w8+xrQtHFsmV2iDKOEFCvWuyMKcoqg5yg0Rz8/I5Vr4qUihUIB0wXD+lFy9wsNv75Ovx4wGTwhii6veOU+fn5EKNvOlTbXe4vDWezx44x7f/I0mF70jRKHM+sYmkljhna9vc3TU4/6d97Ftl83NLsVqxAc/+Qg/nLJYDUnSkNcffpu9g7tMlwN8L6NaaTEYX1CpaYiCTKGQY+XNOD0b4FgpjVIFJdbYbGyxVl9nMbYIfHj37a+jq3kKhQJB4JJkNuVajsDVyOUL1FtNXOcVo4tratU1VK2KhoprhRjKDqpWI0pCjJzCzt4mfhTz+NEzxuMxe3dbvPHObZSiyVfPnt4sgVYOQiIzGi5BSMgEmdVqgeNatDdzOEFEJsWouSap4PP4MwfbV3j8xYiz6SseHZ/QuzQxitt4WZ6l4/Hs1RNenlyiG0VyukIWKDS7JZaTCYGfYDQdlv6KF8c5vnpWZWpn1Bp7KMmS89PnbNZbmJdHuF6Po+k5zwdHnF2ecWUOMFcz6uUmTmzz2cunDO2U8+GIopzDMwOa7RayJmLaS9a2d/jer/8mq8WSRrPM1955l2alwVuvHfLGwzsYUpHDvQf/4QfKVncDo6YRSRm5eoWTS5O3v/06al5Ez0PgK1RrLQJPRlWKdNt7tJsHFAs6Dx7e4umLZ7QbZcYzFzKNL1/8iC9ffoArLHj+8oze9TlffPoUx7b46stH9K4vWGvsE63WOX0+p9Xa4slnE2aTCM8q0O1W8X2X7nqB3d0t7j/Y4N7t1yCRCXwLSZLY2V1H1QNG4wFhYlOoCkRMOT27YH//Npe9U8azC+r1JqoqQqbhegvqlW3yBZmnT58yW1xSrso02jKt5hqyoBOnK4pllbXNHJOJw2Q55dXVl1yOX2KU6tjJlKXlU64c4EUxo8k1KT3WNxs4poq5uNGyXQ1OKddSxvMzCoWYyWTEydkpd15b4/f/+RWfPP5z9MqQ3f02oXjGwj2i01lHFjUycYLrzUkCjdG4T+C7PP78GetbAtPZiC+e/BDXdXnr7a8hKxqeP2drpw5phqblUFSJMLIAKOYb9PtzUAKmYwdzGeA4Hv3xBRPrU7xoglpUWM6m7G9uMhnMiASHmTMnkyqE6YR2t4S9yBN6OrMJqEqTKFvhRR4bW4dMVycYeofD/e9SrTfpj68oFou4voNtuezv3cNe+fjhgHJJp5Rr4rgmgecyHF3T750TxnMkOSOMUhAFFuYM25uDWKJ/OkIRU3KqzLg/IK+LNEstssQlDWwkxcH1btAVIjlMc8XZ5UtkTaBSbXKw/yaCbEEqUsm3CMMpUiZDUkfTRfJ6Ece9QDNWaHpIkjgMxiMQFVAEpqNLAiuhVWvSbZWQMpHNtUMMtchkZCIFIgfdTYpxlwf7GxiJwODpCbfqu4QDh7xQpaSVKaiwsVYiy0IkMSNLUzbX9+i2yyS+hJppaIKGKCiEts3yPOXW7ibl5hr1VpmdrSar1QDRcKhu5jHlJ6zCGDWfQgFCeUVszUkmAQ92fp3N3b+Ar4MaG5TlHF89/YLiWhNVSMgXDO6017kajXhyvEJRuvhGRLm7R6zHrOw5ReMue402oTpjOC7x4eM6p7N98uUqr919i7fefsCDO5tUK3ks08NzA4LwhkMXBAkeL/k//pf/My7HAwrlG6d2GMLaVhc9l+PLr4YIaelmO6UnXJwP2d3dpd4oMJ+ZFItFAGQhIcsimp0as6WP74cokoqhF3C8lHK1DFHC8ZlLLlcidGPEwhISl1KpwsH+fYJAQpIERCkjzTzi1MIwVL74/BmKrGHoKrKg/OLxvQRNl2i11rg89ZFkDQmNSAhREp1VHDPMWYxHBjV01GiBLAl4nkXOkJnP+8wWFqKSgHBTIswyiTDwcV0XWVJIMhAkiKMMx/PY2dhEUxTMZYSqlW6ynmRIio6gxOysK0yuAiwzJF80EEQZSQkR0oy3XtvjcOcWy5mDpulEfkKaxfhhiKplJFnEbOESij6ZEBGFGWQKhq6RxjYyRXwvRhRvSo9ekCIIApWywmplY1sBgpAQhB6IEUFoEiceUSjQu7KIiEgTCT+MSLgZ3F1/xrOTV4SZhpBxw7cUIAoTFNHg3r2HlKstAj9F00FIUxRFJY5j8gWZIFrimCqyUCcNM2Q1QhAgS0Vyukir1SKJdKIkRNUSssRHlEASZKqVFgf77+H7LpqmIHDzAQGg2+0yn8/523/777C5uYnrB0iySprJaIpKmkUUyjrdbhUvmJFmN4ipOI4p5KqksUZ7vUa+aHDdG7DefZPf/et/k7PRh3jxgv58SCqWeHH9Gf/ff/sP2To4YOw8J0imZLLH2fk1ZxdPiROYTUJkqUK7W0EmJY5tHL/HcDhEyYEgG1TbZaarPnGU4AfXPH3ylN2dO6RJhCKp5DSZ0A2ZjG3CRMZLbPKFCoJkc3k2QBBLpLqBUTNYzR1Cc4BnTXGCiIvFKf/wX/93PLs85c9//JJXx0uK9SbPT48Yr66x/JgoSxksjjBNk/nMQtNFFN1DkQtY1pI48Xnx6qeMRiM212/z2uvv0F1fQxbz6IbC+VmfXC5HEIwQxRL5coVMKqBWBGaOxSockGYC62ubpGnK3n6Z4fCMYr6LWvCZTMc02wXq6x30QoX12gGHmwJaHh69OmVl+oRjl+2dmyvrMLJZWX1OX03x3RUHt9bptDdR4wbXr+Z0qi3yagVFkjEMhc5aiSidkYka8+U15kJkc2MPcxkSpg6j5Qqj2mDaH5PJEkfHI8RiiK8U6ezVSeWI06sXBGmK54TEkUWYuKRSQhwUWN9u0ZuMWHjXfPr8A64HfY6fHuMvxgSWQLWsomewuf4Wv/bef8y7b90hVXLoSoGDwibyNCEM5ljxAsuyeHT8IdfWHFHImC2GpBgInoPluXz4/BkLy2UZBDiE/OQnP6Ky3mTm2zzuzeknV1wtgZLCKpzhsfgPP1BeDj9l5fV4cvKI88WfM/E+5vj4MQW9SbtV5dadbU5OXyLqS0RZIkuLBKFDLqcDLhv1XVKhwYPb75HFXbrtXYJA4K17b6PmZL7xze9x5/7raDmRRrNDrpBn52CHv/Tb3+H1d25hWxFvvHWbLBVZ2+xQr69TrMcgpLz+2mtI1JHFCqKUsLm2QxKuCD3Y6O6ShEUse0GlXKCgbXJ4uI5lj2m3mxQLNfx4TquwgeNcUV+vMfPGRFlEoVgmV5PwBDg5OUHPxQwmIyRFZrV0mU9NRETy8h63d75D4nc4Pj5jNbOpVnR6vacUSwbtzgMarQNUrYjtLFhr30ER1sgZEpPlEwp6myjS0QoiY/MJt+9u8Zvf+20axl0GV0v+/Cd/xOnoYxKjx2Dc5+J0QWCLqKrLyhrihmNGY48Hr9/m8rKPkY+5HDwhzQQca4oY26RxnjQLsL0ppxefUqu2ycI1/MAiyVy84JpW/RaVeoNSrY5qVIhSh1RUyCQZc7Wg3lIQBYPhaMLKOcPzx4TZBYLscPegyf7hCbWiiSFkuP4VaSYwX51zevGE7fVN5rNLTo6eEIchgRswn9jIyEznL/n0k5/gOQ639+9yftbD913W2vfIkPHjJarUYjqyWE5WxPGIpXlMGM/Q8zHDwZcUaylB4HLdO2d35w6ZuOLFq0cIusXF6BWLxc1L8+Xxx+g5AVUPmEyvuXv4LV68+Ap72UcTWxhGnVCIiFSH8XJOIk6xVworx+b4csnnT2ZUWi3COEJVXWQxo3d9jCKJ5HNz5MSnpNUIfBXTW5EKIfl8jKbLeFGNP/zpH9Lc6bBaqNy5u45lebQ281z2vqLT0rB6JrlUh0wgiELSFFQh4/U3NrCWS2Q5T5gFEKsUSwZ/8McfYYkJrnnJB3/ymJkfordsXNNk3PNoa+/w/ltfo1JdY3JyTc3YIkpa7G93EIIZn371BY6aYksRtnjFYPqI5ficUmMNKz5hFSzZfvA2G5tr6CUZ2Rc56r8kYIJSUBBVDTsu8tGzNh8f1bi8EpARUdQUrVAmkvNIRpvWZpODvR2atTy6JiKLEbE94r39X+c33/8NPHtF5As4yU3pgsRGkW0ktUiaeAiZQaKIyImHkGS4FIgyCcMw0CSNMMkQQhkneYaY6aiSTqKIJJGOGCo0m3kWix5ZmpKlIUg+tq8gkqff/4wnn75AVCs3IPQMojijXixx99YdJOXGwS0gk8rhL55ETmkadUQSjmYrjKJKGN04saPUo1PSaKgx/eGKQJVIuFGupolCo1bB0CTOTpYoeokUizQTEMUUIQo4ufwZg2WALCrIaUoieKRRiiwK1CsFLs4uiZAQM//m6loSUWUZOzJZeR6uEBAkGfgRcSSBEhHOb7KqkaIQZQkKIWmUEmUJpjfBc8F0DNIwIPECEkEhUVNC+5KyHDIerdDUHKE7J0UGESLTo1Iqk0gaaVrh/0/bf8Xa3uf3edjz73X1vnYvp5/3vL1wZjjDGYpFFCUqghzAthDLiREguYgFBEGQAAZiIIqgiyAKkiBBbMdKgkRCnALbtExS5HA0hTPz9nPe0/bZva7e/73mYjPULS+YDaz7tTawfuvz//6+n+fxhAQNBd9PyWKVNHT55vwZIz9CVnJSfEQhIUtUPM/jg0d3+ejxY2I/I5NFSGXEJMMNUwoNg7ubGsdPj1E0FSnJEBQNNdcJ4wBdV2mWuyToYIQICoSJhCbYf27piTk7HLFYpUiWRepLCLJNUSwS+yu+/2v/JlniIysCmSghiAmSoqOaKoZe5J//P/5TdDMmyDLSPEERAiQlJk8SdMXGkFWW+ZRQKKAoGnmeEicpK3dOlE559uIZw3FMo1mjP3rOm1dz+sMZuaogqh694QFascKv/frv4Dh9WrUHjMZzEMqYFYPH7z9h6SY0uyWuen2iSOTCveB6PmY297i+vqZSNRmvvuHiYszW9n0arTVWjszduz/g1ZtfUjLbaAosl0t6sy+x7ATXH3N2fIkYKVSLLcLomtWyj6FsMrx2cBMPtVxAMFesvAmlqsGjtxt4yxVLf8Gjtx6zXJ3y/JvPGYwOOTo6QtESTk7P2NvtkucScRqQSgLX/UMkZAwjRLckji4PSfOQ+bzHz3/5hyiajiBLCPqA/uCa7e4nvP1onenomiTVcP0AP5hTqhfp+VcMBwOm8YxZMKNoFwlXOftb91g5Y54fHCIlKTvtLeprJoFo4a+WWIqHaZXZ2t/l4vQE110QL4somUSWxty9s8foTMRQSxyfviT0XKSogMiCIIAgUnCCgHq9jq0bSEmTZl3h+dOnfPn1V1z35rS7m1z1bri66TOd9SjUNCRBpllv0Cq0iYICj57cZbZa0Sxt8PbDfURAlC2Mcs5w7OAEY2RRwLJNKto27z1+gqwrhLHO3t49gmyMlJepdSocnb8gDSIqtXUOLvrcjD3ssk6YxKyWQ5b9Q8LFAtFeslhccXpwwCRxWSwvyDyHUnmB419wdf41shQzc+YYmkkQjAkTCSdcMZ5OWIQeB69v/uoD5XwaI0oGrVYTRZEII4fL/ueMZs+IQoE4EPjg/Xc4fnOKaaaUqzHVagVVU2g0GpTKRbqdGkF0RqOtsd39mDt7T/CXChV7m+GNi63XKVs7GJrOWncXXelyeTGkWq1DrhGEC1zXgVSkUi2x1n5EFCaMRiMUuY5dvmWiaUaBKE0YTW6IM49vf+s77G5/yGqV4gUj4jhnNPBIYh9VkXAWPk4wJIxFotglTwTSQETKdTa6LZYDA1WuMejPWbnXaGqRJFvguisUoYOozBiNRuxs7bDWfkCt2kVWUtI0ZzBY0R9coogWzipENVTc5AZN6yCINqEv0my0GY7PWbhHGFqRs6Nr9u92UBWL82OXne2HdIsfEC5yfHdGtZ7SamxTKq5RrW5SLjWRtRX7d7ssFzmXl9fIskilot2CmsV10njCfBCQJwrt+j6iEhGIZ6zt60SJQa1R5fJyyIunNxy9OSHwlwhJjYZ9D0O2UESJ2WzC51/+iI8/+h7edAMBlXLFQlfa/PSnx3iBRhhWqDRM1ro7tMofYGl1DGuFIFjYhSKaGeA6E0ytSJ6nRKnDcHYNUkwqOTx7/VOQUgb9FIEq9ep9htdw+GZIs9mgVt5hNHTwHZ1m9SHuIiMWzlg5MxrVO2xt7jFfXjCfQLvdJfYlZLGIJNw+USqywWw2Jc1XCGLEs2++4s6jNW4mV6iaSKNYQUk0Ml8hJ6FRfcx0fsFkOqJUaPGdb/8685EGmYGuVlhrP8AwmmSphpjVEdCxTRtDkendHBL5c/a2H/Lg4VuIosj2boPFLMJLV7y5PCJRPE4vJuT5EjHW6K5t0pveIEoyoigRxiGT+ZAPvr2NIEm3BRcgF310Xad/MyZwMxIx4NG7W2RSwsmbFVvdR4jCjEyYkyQaV4tLonKPnAVJfMw47/H68nOC6SlKrjEdHRPNBe5UPsSb+VydDCkUd5klK4zVClXJuDp/TcWKScMZ25tvcT0q8M/+4Ir/+w9XTMcyogaqlRJHIr6rcHl2yGp+g5JBo7PG2x99wFsffsy3vv/Xef97v8qv/52/zsGNwB/9qEmn84B6o0S5aKApMpIk0Wis0e8PIRaIvDGxHzAejCmWb6f7gqhTLBbQTAkRC6sg0BsecHx0gWEYSCJoWogf9ems72DaawSxgCjFaKqKLspknsPf+P73EbIOYRiDJCJLKnlqEoQRl6MvWMw9hExAEAKkvPAXL+Kc6eQKGGLKJfIoQVNvHeECKpk0RdYS8sRCkTWyVELJ9dsDVcmIJY1c0xEUBUUqY8oqfpCyd2eXzsYuy1VImsYEoQBoxHFMuaIgyRGV0gZZdmuz0VSLIHBoNIt8/N53ceYGSSiSpwmClEAuoqoqsTBBNLqIGOiyQpinICuEiyX7Wy2ccEHCbXjKMjBMldlsye7+HbRCyGJ5i93RtdurdtfxKJcrrG80+Pzzz4kTsHKZLEuQDZlV4rPebXNns8vF6QkyBRBVcgEEWSLPc8yCzvXNAESZlJw4DtE0hTjx0OWQ6c05qiCRZgm5KnPr/87/fEoa0mg08Nz4FlWU5CiJAOmUBJd/8+98jzxYkpEipy6mKGDoGjc3N/z9f/e/RbvTJs9zZFkmCEJkWWE+G2BqOv/nf/rPGcx+QqlUQJIkkiQDbifCsqxhGSaCFNHupgwnJyRJhOcHSJLA3DlgPL1ka/Muk8kIRTPwvZR/8S//GUm2QJYsRClGt3yCcMFiOWU8OyHPREQ1Z7a6QVJUDk6/ATUhEl1E2SASQqxCl1Zz97ZwuLGGLNh4i4xmo8L1+Rm55ONHC1zfoVbdRrMU5vMFab7Asgr0+32SqMbaeoeLm2NcTyYKiwThgpvx12imjGWraJLJvJeQhnNG/T5HByM6axuIKIwGEyQRBClgvfuQWr1MEARsr31IpbCGrIvUqruYuQZBwMo/4/jskJuTM6L4isvLQ1aLFF216PcueX34Gb2bAUW7TZQueXnwDMScy+s3qHIBCZ0kiFFEH6UYk0QO03GCoCmEQo9wpdCtP2Zzp8Roecbx2Smv3pyyWmaU2xKzlYOTnjFcnTKfz1A0iyQfI1JAwGAxcVDkgOubZxiqxoMHW9zcHJGEKaqaIIsCctyipLe4GVwTJgN6/QlpFlIulri7+4DFOCAOBhjmrRnLtE0ajSaWbfDZ1z9hY7ONrjQpFi103SaPTWJPY3TTJw8VEs8nmPmkUUroSKzvt5n6M3LRwrR1Atch90IGkxc8+/oAby4SxzFvDl/R6tjUWyKzkUQ9s8iQyY06gpQwnbpcTBaUOjbOeMVqIlGtG7w5OMH1lkS5w7NXv2S+GpORo6kWoqRwc9MnCAJcd0Wz9f8H9aIkF5kve4yXR6hSnTzT8D2HYmWHJJvy8tXX7MZ1PvmVj3j14gJFv2YhjknDKpVqBdu2aTXXGQxO8V0HvaLQLnVY6ONbDZMo0BscI2LT7bYJXYiECYKYslr6t6Bcs8jmuw/44z/6JQ8e3qVZX2O1WnJydM329i6yVKBeecjp2Uu2dzscvHlFnAvIYoE41nD8MYv5GF3XEcWA8fSGwJUpl0wuroZo5u0OTeBAFMWUiwXevDoiR6NcajJdLJAVgdm8T6PZxV/lrG0UGc9uiOI59ea92yZYqUkYjmjWdgk8Gb2QcXV1xZMnH3F0+imnV19SrVyytbvDeJKzWDks5hmoLghLLLPILz/9Kaoq8/DxHpEnk0Ua79z5AacXXxPrErZd4PIyoVwu8uLwBVvbRf70R39AuVYlDGOGA49ePiCOZ9SbPVprVV68eIFhVAgdmZpZRVVCjl7fkCYG1cIjXr04o1GvUynWyPMlO+u7RF6B2fQVIk3CZEC91uHybEmhYFGpPeT8YoCmx0i6wOGRR0GtY9gZsSsh2HPc1QqBMm50w/WxT5Y5GHqZOM7odJpM5j7t5vvM52d/3izcw1Q6LPIZRiHi+PiMOw83qZc7yHmFMI1o1n3W1u7y+uAFQeiwufYRptHCd0WujkfYFZdGs47rusyXPT7+6FcZD1KuL+ZE2Qwk6fZz3JyScMH5pUeQGwRBQOiGpPkK26hTLOrMF+cgJMTZmK32t/jqy+cIwoJSsYK/KODaS+r1Bo3qQ549+xfo2X1EOabVMhFG0S1Tdegxnh9gGmXu3dsgiUTMqoVqmEynfcrVAgW1wOx6hL5h059OSdIYUZTQVA1DV7CKCbVaFS8IECSNnARNtphPF5y8cXlwN2bQP2buzEnkEl4QUC8ZXA+vyUshjx6/x/PXn1PRoagVGc0SrOoahdoNrnfCYDXnb338m7jLgHrhAjfL6LlLVM1hFOvkqczm7l1WIbx56TD5cUyYtBAMHbuYIUoCnpOxcmMC38cyQEXHHwd4N5esvBVpmqJbCsgSZslGVFIKdoUwNamsbRL6LkmWUWukyKnI//Q/+GPm0zXWtgyyZEqxYLPz7fep1Te5uTpGklPmiwF5mOC4K6pFgbW1x/z8hzfkaYwimiiSRqXUoVbVWLkTBHQsyyIOb6+Uc0mmu1dkxhiEGDAQJMgSn0rFoFjX8VwdyTDIlJA8+dfWCEEVMesBfh6D3CaME2TFR9MUVo5Lo1wkilW8eEGaGchCTijdIpMc7xRVaSJkEkKeECUhmZCRpil2JWc4nZBkRfSCSBZ6JJkEuUKWB1wOxkzGCZIUk+cBkmKTu1AqFHGDawb9CaokQ6KRZwpRJiIIOaP5CV8dvEGSJGQREk0hiFL0XGSj1WDlDxmNlxQ6HXzHQ1NtFElGlmHhLZgsYtBiVk6CqKYkaUqceyimw969bQ7+aEK50iTKXYTURBcyvNkh6fwOhcIWxwMXLdJQZJEshTiOUTUBP8iZOwFSSUAQc6JMRhFMEtcndiXG1z66VkTUbq0fYiYT+hH777QxdQvXz5AkGVETIJfIyUCY4U4/ZbNaBkEhzmIUVSVcLtjcXOfv/t2/y2QyxQsDQCDLMs4uLyiZFaajOT/52T/nu79Z5f/2T19TatVJUgWRHEUwiQMHUVaRESmXTASlQZaNkWQZVdFRZRtdrVAq1kD0OTj8knr5AT/4je+jqUuu+0eoRo7odtnafMjJ+ZdsdLY5PfsGxBhJVbi5uaHd7qKpNtNZD60A1/0+fjLGcTw21+8gCyZHbw7pNO5QqxT5+qtXmMUyeilgMHtFya7w9fN/RehHtNZK+F54KwEQQwQKWJbEcPISUEkJmM8dJK2MoWQovsMnDz9kkn3N9VBk984D4jnUizpe3GM6cyiXWpiWwqvXlzgLD8244NLZ4d52idn5BffW7vH+r/wN/sUf/xGdvQIHlxP2N99FU22CpUylUeX583PIIza26rc6R01mOoup1lSGkyNEYZv1DZvT0zdYZpEgXJH4Sza2PmaRHOGHOqP5FZ4rsnWng5tN0E0P1SlSKujM+wl7WxtE0QJVtEnzORcXl7RaCqbaYDYbcn45wbZ1kkQn8jN6NzcIgoKQKxy9usS2l5hdg9UkQo18ynqbXBszmQdUm01mqykLb4aqaeiGCrnBYj4mzSIqpQ7NxhpB4uHPV+TYWKbIbDZjd2+dm8EN01mIVQwp0CbIVtRKZUZDD0HRscwUVZHwoyViqGKYMv7KYTqNkPU5G50HVEo27rLH3t17GImCWHzBcLbESE2SKGfzfpvPPv+ScHHER598G930iLwpD++8RZomlE0HvVClYJmcn11xfn6OIIgkkU65aCErwl86UP6lJ5Rr6x2iZImh1ZiMZ3QbjyFYY7E6pD88JpOueP78JVke0mxbBEFAoaTiRwPcoE8uT/nFp39CnIhMpgvidM54MqRcLvP40dtMxj3ErMB0smK1WlEuV4lil0Z1jZveGY7TZ7UIaLZq/IN/8A8YjSYsVj3SBO7s3UWSQsJ4hijBYjnj/OopnW6dOzvf4vSkz3xxTqu+hiTqt1cvaYrr+qxttbELTXTToFS2EJAIQodWp4QTXTFd3iCrIikronhFliooes5y4SIIIuc3P0eUI2RR5ezsjPc/2kVWMoSkQ5aYIC2Yzi/JBIdXL494cO+7pFGLIMy5uH5DmAyQtAmVVkqjWUa3Anw/RNdNqg2VxXyFagRk6ZLhqI+7lNnc2uf84opcOaVUi9jbfI+i2WF74wnjxRVvv/0u3c4Gy9WYqXPGcHHBwekAQSuxvvkYxx+jSSWK6jol+QmJvySOZzx+dJduu4VAwuZmiyybc91/hePMqdYkbKOCbkjYBZm7D7rUKo948ug76Eqb0FMxbIup/5xS1WC+WJALHpVqA2eZsZx5FO0CgSfghh5rW2XCbEKxmhGEM/LEYtCfYhc0Ft4ZVjlhODlmOByShAGTyQBFy9CLLrpu0B8c0mqXubO3jyIVsQybWrV4+7/KcsgMFqtr1tbrfPqLl5TLJco1kWoTwiDFW6m06neQpAqm2sXQRcaza7z8hCBdUas2WcymrK83ePzoO2jiHVbuEEVfUatvompVGhswXnzDl1//mPPrz+k0H+FFI+arIc7SpNFusQiuaa/tMpyNWfo95pM5cRDieC4X1zeocgFZEpDyjL3NDue9zyhUJGRRJs9AEAQyz8CQQ+4/rBAGSyRZIM9zMm69vl/88oS9/XsIcZmyYlOTMx53tnj2sxfIRoUXlxPiqczbnR28SR8hE5n0DykWi8xXJs5EYKO9zk+/+RmfffFj7GqZQqnMhmqTzZcsnRWWXCMPnzC4foysf4TeXKPUBVV1GF/PGB1d882nP+fNiy8YT6/pTxaIZonS+jq1Oxvc+5VHPPnu+2w93KGzuYaY6kyul5wcvODpl3/KL370Q158+RmHT99wcnDG5bDPOIqp73f45Dc+4t/+9/4d/nv//r/Pf/d/8N9BYJ2K9S7/zb/9e/zmd77H9z74Do8f3WNtbY3+lYEk6khyxmISsZwGzCZjXh/9PoNeHwmJ0HcQMgNZlCiVFOZLl+M3HpVCB0UCMSuiyQVExSMlIAwgS12yQETK5b94RW5MtaRTMHTm4wlFo4KYS8RphpDriNqINyeviGMbXVGQJAVBFfC9iEol5uDVM5bzFF1TkNQcWVMQKVBr+9iVGESRLAFyGUWTiCKH3b0OUZajaVWyLAUgSSPEXEGRM5IkYTSZIRor/GBOELpoak6Sznj0eJ3pykfVtdvglYEkSYhiAEKIpq/jBjlBlCIqGVGYEQch5apBnBfw05QsFclSCQSZPFNx3CmT2RWeKyMpKlHsg6ggCzppnPDwwSZnJxccnt0gGzKapiGIOUkaQZ7Q6hRZLBPyTEBRboHnAGGwots10QsKMz9F0VTSOAMyFE0nSxLaNYvDNycsFjGCLCEiEuQRYaogiTqKrnBy5eH7ArKmI+oaceTx7/z9v4cf+XhhgCTJKIpy+74EgXajzX/0H/2ndDZzljOII5UMgTSNSYUVcRKgKLfg9lajgbOckwQqIJBnEMcRsiIi4HF5dU7gWGyvv4dllhj0pxwfXXF+fokzrdNqbnLdO+DBg3s4yxRFUaga2yh5gZ2NdbJAxnNm1EpdJBRKZolcXpJJC07OT1muhsRJwGAQ8Pr1DZqd0xueYem7xIlIoVwiyyVEWWE8mjIdgaL7ZNmSOE7pdJrM3ZfMF1Ma5bvs7dxnNTqmu2UQK1Vm+VdkQUAwG9IfHODmS7z4GD9wSLP49jfG6xGGPt/+3jss3SsKWYzkqVTaDdxCwIXn8zt/699DCDMqkkgYunjOjCCacXHV58H9d6iW9lnOEyZTlzgo8faHXabzCzS9SpgNuR6cYphVkAT02GKttAv5hNnUo1Kp4adTUuOKr776iiy0WExk9jYfIwgDZqsDrs/nRK7AajWjWrZZa21haZ1b+ksxplpbo9VtsXJ91jZqZJmC43rMF0Oa7QzDgGJJYr76mt50SKIOsAs1NF1hPJjiLUYUjZDUWTAbj3DnGQWjjrNy8YIp08k115dHyIrE0ZuXTEZjuutl5s6QIPXx0xFvjk9YBHNk3STyBDx/gO86zAYhYRATIyNbBYIwobVW4tGTt2jXH7P0r/FCh0Ztm/l8ys1ogoTBRvM+Tz54TBaDHojUFJnf+91/g067gJJX+NbH71CtGJQKFpZexlmFvDk4QtVydrfXubu/w+BqSBoJXJ1f/aUD5V96Qikg02quE3gS7baAoIzZ2q2wdMY0qh+BEJKUfE5PLm+vq+prpJFELvTpDc+J2aHSqjJZnLN/9wG9qzGiMmcyWdFuh9QaZQb9KY8evsv5xTGDQe92P9ARkdWYTqfN+emA169fE2zmCMhMZzcEYcLKzXAWCf3ehEwIaNZrjMZ9SpUxiGuEvodpakymRzTrLaIgYrK4xrIN/Njh5voYXVNYXMYg+qQ5vDj4M8qlLTob91k60L/4BlW3MNU6NzdvyNKI3e09kCucXnyFqVpYBZuXz8/QbYE4HbEY65hmmTS/JMlWbHTf5eXrL4h4Taf968zncxBClguHZsdEpk21sIcf9Th+M6JgbrKzJ/Leo9/hF7/8CUnusVzIpHEZPz5ha71MHhsUrQKrlYivXNOu7PDVL88RlJj1rTYnJ2PyrMFo+eJ20Xzew/cyBoMxohTiOhmddQPfhc6aT6f6IX/2Zz8ly1XIWmzvNCjVUpbzOds7d/nqq8+Q1JQgTDk9esrWvkaU3VCvrhFEA+Is5ek3z9HMkPGsShqbWMUaQn6DIC7orrc4PHuGYreYzxKEzEeVbJqdMor6BAERs5CgiCKO47K5WUcUZObLBRdXJ+TClE7zAfPZiCTKsHQDL+sxnq1Q1IxiRef6akbKa+q1DrJksb5+u6xv2SqJoOMmArohoxqQZAJrG1tMeiuuB68JhAJr61uMlz0C3ycKi4g4rHce4fhvePLoQ/rDUwI/4vr6hnLFwjA1klgALSWXfBq1NZLMw4tjrm8mlMvHFEoyw+ERzdo6RWOT6fwVxYJEEoRcXYx56+0HXFwtsMod1MWELHNI0xQEkfOLU8Kqyd/9vd/gh3/4f6Be03Hj2+tvRRV49bLP1eWQStmibG2QJgbffPGc9Xabra1djk9/RO/wFfe6DzGTFgP/S37w0feZLCRm45xP3tkhWnm46RWtuzV6YczKkXiw8ZCWeB+jvE+cKYSzCc2Wixd5CEOb45MT3MC/nTzqNbbv3SFOIxaLgMViwXw2xFvOEEhBMlEUBdM0UZUCllam3llHMQSSSCFNHLLshjySERKdlRsQhktGqynehcLghYEolanWTeq1NRRDRVBFFMNEayh8tPc+hnw7yf+N33wH1cyIkxxVNXGdAIEpw9Eh7TULP10Qu+BNHMTYwzLmnJ+f4zmbFBUBCBCyCFmNef36BTL3sS0FSYoQE/UvzkRRjCGzAJM88RHwyNIcWdMJ0wRF9JGVImkiIqkpaZZRUUrM0intVpn+ZEUsrqPlMsQZuqzg5SHj+ZtbPVrQwTIF4kwgimJUQWK6PKYki7dTQlEhIUOWRdIkQZSmnN28ZLpMEIUGsmggpC7L2ZK9tbsEC4vQURBElTiP0TUNzw2ot4ps3q3wwz/5BiSd1dKnWpYRJCDPMQyNONNw/ZiCoZDEIYmXIssmdknC9RYcHkaoeg3SlCzVkXWJmbMgljt88N5H/OT1zymbO3iBgyCkZGlMydZIEwdymyRLicMYTdaI0wxVUkgzh4PeCq3axTuNsASFVEiI0ghFEjG0hIvLMbJWRJAlojjEUlX8xEURci7nOuOwchuWJY3eyQl/83d/h3fffZuz8xOKtk2aZkynU0RRxDAMnr/8Of+n/+Sf8h/+47/O+fEIRdEQ0FC0BClTyeUATdaYzUN29loUCgGut0TTtFuNnyyTxRarRcTO9jZCrjFf3TCdL1BkG9+L6G5qZPGUw+NTGtUPeefxr/Psy/813e46Yq7Qn7whjVW21t/mZvQS11vRabVZCceImYHviLRbBSaLS568d5/RwMH1FQQtYjrts25UaXfqDCcX6EYN27ZJYofIntBt7rFaRowmryB9RKVwlzAMWa5GJLlAp97l+sLF8QSEiYIhb4B8wMlRj/v3qhyeHWPpHTa220zm1ziOw8bmLsPBgrL9GNs06E2HbK7fZTY84Pn1Cz7VfkIYT5DSAuWqhbtK0XWbB3tvMxheICspg+tzLLPGTf81pvkEw6gxnL7EGY+plR5i2iF+FFDMC/RHY8LFKVleYeis2Nh6G0VTeTP6MyICRC3ifHxE7EkIWgPNhiC/YOovaVbfYrE6QVQ0lguPUnGDJEk4v7mgWGmRCxmCKlKu1xCUKY4LghBwfnFFKohkgsj65h6HBzc4nkOntUutVuPNm1+ShAXkXCAjJM9USBSEVMayNG6uegiZQbNtczM8YjC6wLAFRKmArIhUbJNmpUBnY4uLV2+YjkNa7U129zPc0MVZaEhqQhS7HB25/LXvP+HTz/8U09qhu1njmy9eM/cS1jeLpJ6IrsP5yYJP3rlHTfyUtWSF4T/lUu1SseuEwYDTo0vuv7XHxBcoWArrzTrT+Q1RGDLpTzHVEiQalmX91QdKU9OZ+QsKxRbLRUwULFmuIrZ3N7g8ndO7mVJtxkiCS7O+x2R2RqPRQpRlQsdjuhiCKNHptBnNz1nbWufo5AJEjy9ffIWU25TLJYJwSZ6n+IHDw4eP+fLLr6gUt1mtJkhKyNHxN3jBCCHXUVWVIPI5vx5TMGsUyjmrhcO9O++hKDnPD37C5cUN29u7+MGKQtnipv8G33ex7SZHp6+583Adq6wxGV0Sxg7dzgamWSXOPHrDAakm8OroiE41oWRZeMsFnfUCx4dvGI4s8rSGphisrdfxvRBFTRiOblAVgygd0yxaWOZHLBYThuNj1tf2EBE4PnxDuVrDW+nEeZ/hOGaztcfTrw54/NYD7t6tkOc5hrzFl1+8QFYlpqMlzXaVfv+GTruGnNe4uhxQaaQohsJiYlCyizx4Z8bJ+Zjp1Gdjcw1JMCmIH9KsyByfPKPRquH5U+xinbUdndliiYLH868WhPs2etHh4urWErEKr7Hj95iOeiC8RjcF/OQMvWjTbJucn0wpWR2ajSJzJyHGZT4WkDBJYokgnKJZIoIv091YZziM6LTustGt0zv9hrv7d5hPHcplFRn7djdREfG8AM9N6HQlyETEVY6qydRqD5iNlwi5yaA3JHQtik2YLU7Q4jZvXp9Ra0ImxCThJhPnnGLJ5Ph8gEyXNNVoNTcQUFg6CxTZZHIjsrv1hKXXw7Te4rx3QK1UQJZlbq4uMQr5LfJoZXN6ccR4dAOZAcoUW39ApSTQu+kzVC6xrBKZHGFbKtNZyqNHH7K+tsX15XNKhTK2UWQ8GGOqMg+2n3B9fsn6/RonN0fIUoVauU1pKoHQQxRFREmhuV7mRshYW9PY2miwdMYoikWWZSi6zGI+ZnCuce9RES8IwZQwtgQadh1ihyc7XSJBZGqHqDWbbfMRk7TIH/zsx3z43ccssy6QYBttfNnEkCvYps6ll6BLFsHKIcRHkFWEUEXOQyxNpNspMB4LTKYrev3X5JmFIOmoZky5opHnJvXard1CCBJyQWK1WhHHE1b+DEnMSTOPUqlMt9Wm2foYMVcRRdCLOpksIuUqaq4ydUZ4boC7cnh59CXuyiGJFVarGYamUW9u01pTaFbu0WhZ1GsN6vUOmi6ysWETRymbW++Q5iqpOMZdQRREeKsVJ5dDdnbfwlkprJwpYZAQOAvaax5zp8/NdYPUVNHVAE2x/+JMzKWU+gZc9xaYxSaiIqAJECYZmhphKBZZVLttjgsZuZjhhwmyGrPWKHPwcoGu6yhyjizYJLmD591gmxbuogTo5JlPkqQIio6YyqT5giisgyChKhKrpU+5WsJzZrzz/h10U8ILRwTCAilysXQDd5VSe5DiZV9BVkDUBFRZQMlEEtEgEzz0oshsusJzBcoFC9/PQF8RBzFvvbXH6cWfkeW3Leg48dA1nTBwsfQFtdoehhbiL3wsy0DMbsHhtqriBTO04gMkuYznr4Bb17brubSKKoWizvnFNareRiQkyxIk2YBUIU5c+tOYwaKIblnImYciKgiCiEhGvWYRBSKud4Wh+li6RZIsSXyTZtljrVPgi88mVAsFVtNL7u0+5u/87d/jpneFosg4nouIiCzI+J5LuVzif/SP/hdUKhU2d8r84e//gjy1EMSUNJHIEgvEHMOqMl0MsIsC3jLHNouQ31ps8jzB1Es82L/LfDEhS5eUiiaypCFSZCbNCDyJ7c1NCuUBaeJyfvmS7//ab3F+dsLTl1+zsavjrzRSKcQumQRJn5krISkVZqMxzdoWs6lDGAnc9AKEPCdLBZR4lzwKODt9TanYQDcqVOsmYTwjXBjoRpXJKKJZ28XxhiSZy2LusbGxQa/XY3N9m/7YgzhEk0dEbpuVtMBTU7b2tnCGY9a775Dj4TohnudgWzXmswnOMmVraxvTELm4uGA8PMcP5xSqEkbBxFlq1MtbkM+Io4Tr3htEKWE2O0U3alQrZV6+PGa90+Xy4oyVM2dn5wH1xg3nZydoxj1yIaJUL1IUtxjPr0miEEGZ8eUX/4p72x/QKmmsllOcpYOcltnf6XA1nPLu23f44hdDaqVt/PCS2XSJINXpbjT5/Ksfo2k6a2v7LJ1b+kSWhxT0feJEQGaNUinn+eUL3nr4q6x3HYKZTb1colo0WCwdojCnUGyxde8dVt4laSKyCkakWYLjeMhyga31OoqikYm3D6RhfMn+1nv4yyKyMmZ0DXYKp89f02o2GI2P6GyUuL6+RpVsbCOmoBkkSZlmU+Xw5AtMrUWpbHB8OMALEgplleV8xsHxKfX6Jp2OzeZbDbLVuzTKGsVCleHhgHqjzNGLKfXSezSqdd48fcrddzuocs58UkJXM6p7VdZaG4xGI0Yz768+UGZZRs18l1X4FFlSublZUiyovH41xNAL3L23xXIR0h+cYRhDNN1GEFXu7D3h5FThvHeMSJFH9za4uDjj4vIAZ5nTaJW5f2ebb158egtLVyc47oJf+daHfP7pC7JUJIodls6AKHbwwgF+oDJdDGnUu2RZRnNDxzYS5tMYRZM4Ov9X9G5GbG03iWIBPxtDbqPbBoq2YukkbGy2ccMB45sIScmxKhqL6z43/TFVs0geW7TWV5xd/YzxMOZ7n/zgtr0XhiRpgK6VsPQOVzcXZDksFwLlooEoZQzORty984iCpbBazpGVBbLusJxnLJfXWJbE5eWcaqVNhsPKTdDyHo5/QWdD5+rynP07D5gvBszmN+yuP+Tkske7uc5Xnx+z/6CKrd9DtWZY5Yw8tyiWLPpXT9ncO8AoiPjzNvv76/QHr5j1Ze7ffYQfjrHMOpqakWYeqlzk5Pw5kuLTtDcolky+fPmH7Ozv4E0z+uNzgmhCvZgS+nBxMkJWZTImrBYRs0nCxsYGl1fPiOIamllkPp/S7u5jqU1uei8I4gFrm3cwpHWur64Qkdhq3SVZedzdvwe5RKXaxFlErPxzLLvEYrLCNG22t97l5uYN9UaZYqHMcHKCZRaQ5BxZirl/5z4XlxOGownFosZs0qdQEmjU95ivLpCtBZat4XspK3/M5lqTJC4z7C/QLAfP85DUlCzK6E8CAk8gyy4RRA9FbpEyZjpyudfYYDpdIEguV+cRlXqBMBpSLrcIoxWakVAUTEyzxnI1pVyrcnjwgk5rl5Ld4Otffs16t4Cia5yfnNNZW2c08OhfLzBUmyRbUDS6JLLJyh+hTkeIyAiygO+7TNxrjI23cYIxG9tFPvtiRLNl4XkeSZ5jGAb/+J/8PttbEjICZrVOu9ZGEXK23xJZTVVUJWFeiRhOX3PTHyNmCueHPrPFEYvpCxK1jGKISFGEJJq4sxkICYlk0LFMbiY9EHNE2SKXMwRRRjEVghByWcCs1MjThCROCf2AYHlbsJBFBVEI0UQTw1Kp15sUCrdhXZcNBAW8aIzrCrw5HyKJPoqgICkKkqEBOaasYtt11jdamPoehvYuSRQT+ClpFrFaeiyXDkt3zMXVGV993SeN4tvmrSggiQrlcoVi2aZeb1Jrlag0ihSLVYxinSxv873ND8iT9NZoIokETkouLpjMj9E/LBLnU5yZh7P41zuUs6WDKrtcHrnMRjmGWiKXfArlBu4qpFRMmK88FKmFriREWUYmClhFneVshCzokOWEgYdUMJEVBbKcarXM4FonIYDcJs1umZBpvmC92+TkMsANMjRRwjAl4jhFFBLIA8Y9mSzTkTQgjshECUnViLOUYmkLx+shKhoJGRkgCBJFU+fk8Bklq0qajhHymDgGRZYRSHHCY6aTFfqfh2lZMojCDNdd8dEHe0xGPr6vkecZSZqjEhHGObossdW1+OWnzxHSOoIOWZwhSgZJMkM3iiRJwnwegiQhoJBEIUnmo9sZWdpjsfRIsyZpKpAKAkIukpHSaNTptKv8wR/8GMWokEQuWaaAYpNkc5LMRYsydBb4gYYu1/l7/+7fZrmaE6cJoiximhZpnBC4Afv7+/zP/9E/5PJyRLPdQLc0xqMITbMRhBBRlBGEHFnW8FwfxJwgHSMJXYqWRhwlCGKCrKgYepm5P2PlXGGoDbbXH/LTn3xGo1nhyZPHjEc9Lq/OKZebIAQ8ffqUdqvEo0ePuB4fsZy7eE5AFF7wycff5fziM3q9Aabe4KP3fpuXh1/h+SvskkCUzIg8gTTv4TgyneYaQTRCVue4ro9p32G6mKOICqJoE6UOLw7/FEUViSQwLJssl6hUGgg0MZUp/dUphrJNEg8xjZSm+Q6yHNP3rjEMDTDQFJ2SWaHT2sUL+3z6+ecI4jqTxZKi3sTSIBV1NK3M+Mrhvbc/4bx3yPVVj3LNJIlnwJI0iUiDjESA7a0NRtczNtarbD7eJ4kAyURTNEyphVYaM5nkbG61cP0Jglan3n2ErQl858O3OT35JdPFlMV1TKddJY4TSlWbXv+ColFArxtcnQ1JhYDx/JwoK9LqlIkCmd7NFYWSyMnlOZVKA0V22Fn/gOnyOb0Lid/67d/h5cvXdNrb3Fy9JFjIPLy/T9lMcRwH06zhLa8YLS6YTBKevPOIOB1TrqpMRh52ucbVxQnuKqPRkkmzmJevvsZUthCFDCHLCGOYTi9ZzH2iOODVs9eUWzqD0SHxUkdeL0Gm0GwUcJc+BTOnVimymI0x1NuVjav+hG7rLlEyZHA+4l8tHFTVplitEw6vKJklesM+gpxj6zAYnbGzu06eZ/zkJ3+GbTRpdapk+FwOztA0jVan8VcfKGeLQ9qNh6j5JooVYVkzhKxCEitsdt9mtXKRpCEfffQRp2eH6EaR8WjGfDFmOpty985DwOfTz/8UdxFgFSRsW+Sbr89otkvoWglyid7wABGT1wdPGc2OaLbKRKmDKGgEbkS3s4OzUCgWbYbDPm+99V0urg44PP2KenkbKS6yt7/NaqaTx0UaDZOr/ms8v4cxAFmDUkXi6uYAQ1dwlxqeE6AhoSg1hoMZg+A5G9sS7kwhWNh89FGXi9OARJigKVXG45T5NGWjraNbt1wzx+8zHI7RlTZ3dj7Cc/rkuYyh1ciEJWZJIctizi9f0mk12e6+w3hygWHatJtbtxX9WUS1VCZwlwyHNwioVFsFPv/yJ2iaRqVsoCg+Qhbhh5esAg9RFDm/PKXZ3qC7ZiMkEYe/7PDwznf5ydf/jM31NSgF+Ay46B9wd+8Trq4PadZTRHzu7d4nTeDV619gGA02Nz7ENgucrs5R5Ra72/eI/BhVsdFNgcnyEMPeQNNVNjZaWJbF3bv3+dnP/ozuRh1dr3E9/BwpK3J//z6eL5OFBa6dK1x3RqORETkqs3GOZEZIukoiBKyWPqghli2z8kQEWWI6n1OsqJDLNOtlBDSGfQdFSWk1KyBkyOYNUMfzekRRjKZVUFUTIW3iuksaay7rnW8zcH6JF1yRJSpxYkIU4nhjCmqMqtS5Hg2IkxAlnWLICZdXLymWypRSBXeZgrBk5Q4plXYplaA/GuAuNQJ5SKFQYjqZk2QJmrbG2fkVW1tbDM5dDDmjaChsNLbxI4c0lqlWFTTpHsvZDVEUI0vwrY8/4qvzY67O+jR8nSyNSJIMyyxgaV0GXsRi8YL9R2U++0IjiiIkKScnQpJUPHfFb/3ud5lcF/nf/id/SKPdR5UL+D87w1QNLD1HZ4paFFALAnkmYrUaHI99FBySZYTgJKSpgB8IuNEKVUrI/IxDSacoaSSZS+jP0A0Fz43J8hhFKhClHrKqIog5RbuFoRaRdBXD0snQEAUNQpVYmuCmcwbnfYQsRJdFLL2EICqoek6UqGhGTpqkJLOERPIRhAw5g0zqc6KKVIoVLMtAURTKxRKCJKPaMpuVDqK0iW7IyNLH5IkDmUIQhizcGYvZmOl4xjdnZ0SuQJQFBJGHrmkUSk0qnRYbGw3W1jaoVss0uzaG0WBf+DbVakQmWqR+iLdy/uJM9HMZZ/4N04HP/iOT+eKU1XSOF8t4kz61UpfDgxmTUR+zKCGqKmKuEsYJWS6TiAqKVUMQNcQcNNHCsk38YMpwHKBqNeIkRFZzlu6IYtVEVmLGkwm6voYspfhBgiLIqHrKZH5Os/YWcZyi6yXiTCLLU/xoiSBbnJyMiTOFJFUQZAlJ04kWc8plAUMPuLwao+tVothHlhRcR6fdrlOpBwwHK0AnwyfPc1RFwhVdavUNms0Sy+UxcaJQTEvIUsw4ntFt1+hsNHFOxyR+gGnVCYQpGRJ+GLOzv4MgTxFkizTLIMnRZYFEFJhPHP7m95+g6zGv/18+askiiuZokoGf+CRJiCjlVOtNPG+BooUIioRARJSEFDslSsUaedZgEQr8t/+t72EaMq7rov3/1I1RROB6bG1s8l/+/n/BH/6L/5ruxga6ZjLpa1xdzClVDVIMRDECMUWSRTRVRYoyHr3bYtq/hskCRVaRZJko8UnjhDCMWSxH3H/7Y5zFisdPtnj67Eu8qIWhNgijYw4OZhjlAFUukV57FOwy9/bX+eyXz2/5loGDu7ykfyZQLW8SJWMO37xE0zVkBaazMe+8VWa3+32qpSaRJ7DMnnP4JuL6+hK7VGY4uqJSbeE7ExB8pqsRvi+zXutSqZQ4P77589Z9zPODH1EuNRByHUXWaNU2WHrHXJz2KZQz3GCBVjARMxPVyPEWIcevTtnc3ue3f6PJp198Tquyh1mxsG2BfKTgT1p8+KFG7+qMwdEVxUoDTUnZ27tDGHiMehMKloRkCBQrZR7/4DGKWGWxnHF8+UPKpSabnR0sJSfyq2T5IccnA7bXH7CMXJ6+foaY5JxeSizzhJvhDXZTIDN6XA2g2t7m4PRzfvs7f4dffvOcUqVO09RwXZWNjW2+evYnNGpbOPIN40GPQqHDvd3vIBBwdnIOQogXjLi+bOLHS9zxkm6liVxfMBtMMEsWVlnk7KxPqaFg2x2SfMLV4BiocHJ2jakXcIMF6ztNFpMjIqfJWusJRydviKUrTK1LlOQMjr9Gk3LWN+7x4fY7nJ+9QBUUiGG9WyBMfVbzALHn4a4y7u+/z8h5RRgmfOvbH/L506+4t/4erXWbN4c5aSyj2RppIuKHEzTF4OLmDYpe4e69JomQohklXn/dw9Ca3L/3FkGyIMldut0OF2djXC9EN/7ypZy/dKD0/ZDh9IhWt4OQ2iynOSu3jyBnoPkIUUgarDi/zhGkjMnyG7LYRjdTxhMJPz7nwYN7zNNrJO32SS8MBb79ne/z6sVr1jc7IIaUCk0GwxtubiakecDx6QHtdhspaWKXFNzQp1QtY+oN5LzPYjBgt/kWqQeNikCvd8WXX1+xsb6FTIWX3xxTWxMxMpPh+AJbqxPFIaLgk4Z1CnaNqh7y/PjHdNt3UKotUqGPIEokYcavvv+bKIrEfDlF1AQGgwHlUotWrc14esroWuTunS6j2YBOaxfFFAlzh2U2BhRs2cJZLJkvckzdZmttH0Mt464SHu69z/lln4V7RL2wxmzeR1UzTLsEQkoYjjk6vCAOZbo7JSazkHc/fI/z83MqtQR/kVCp1NhaL7BarTCKbTzvbVLJ409+8p/x6K19JGoUOgtyQcKwiiDB2voD5vMJqjYnDU1G42v29j9BlkWGgwVnp79gf+8+tl4hcSXG82+YeQveevQR4jggClPSzCNPClxdv6Ber7O2tksQTOis61Tie/huxvXVknK5gRNOCAMPdyWhqybYK7xswUZli4OTNwjygqK1Rrl8n35vSakucX31Nbq0gxRLhGHAeldiPg/ZWKvj+wP8lY1cWCPJX1CvFrm4GNNqt5kvQlQ7IB0mt57YG4UT91Nq7YyS2WU8v6RcVCGzKVoqaRgj2i56mlPfrjEYRCh6jC3VODl7iarlFIoNsmxKkq3w0gNkdw1ZNrnofUWW5aw132Pm3tC1t3CdPnJuEfgrovyI09MalmHy6uqbWyRE5BJGVXR7QnOzwdIJ8MMZnx/8KVkg0jSbqLZCyjmiJJGkAcVajailYgolKvUC//z/+gpB0ojCDEHKkQQN1Yj5J//kJ1gFm/sPqoiKimFUUDUbRdFIhIQoivCjmOUkJkkcpJGALEpIkoIm5kiyiWFoGHpCV69BfmtZCUIHIRXIsgKiBIJw29CN0xQEgUywUFDJ85wo8kjDGH8VsZhAmuZIooylmpgFG1M2KdYKKLp6u/uXxbdImzhAzgSETCTOXYI8IPZERPG2ZSwJOlEuMgsDllGCpRuMF97ttFoW0URQVRVTN5AkBVWSkWWZYrFEt73D3TsPyfOcPM/xPI8guIWHO47DbDZjuezx/Mtjvv7052SxgCDIaKpBs9mkVCpRqVisb2xQb9b+4kyst+psbH/C7j0VRZGI428jkOH7PqHnMxiM+OQ7E5685xC4DvPFlGQmM1u9RitVmB0PmV9dIJkRghaTR03idEWqDhDiJ1SVIrEgIMpLqpENyoRy7T6WJpLmCQkioqojChGyLNPdrLByfNJUQRFjclFkGSxRBYmtNR2pkONHLoaSQOITiQmppqDJOWWpyspPCMUQC4ssi/GiiIbocf7SwZ9rRKqHlrdJ5Ig0U/BmDve23mO0/IyZt0AxNwmTENGWSRwJTYLl4pzl3CBSE6JsShqmoAiIeUQwn+OFKn4ioRkJaaaQCBJ5HKEQYeYSl1czMsVEk0Rk2UIgJ4kFiqpClkJ/GGMaCmkqkgsOkmiSRi73mm+z+3Cb44sf8r2PfpdGY4Pr3pSiZSKpGhkJWeRQVJq8fP2Cf/y//Cds33nEYjGgYsN0+kuiWAG5hJBFSDmIukXBlvG9JaYgU7GaeMYY09IRZYEovnWAF0sFwpLAtzZ+D1tX+eUvf8rO7vs0qntUa2UODw/pdLaoVUOmkzlZPqFYXOPsZESlIVKsmMwmczY3t5mNY3b3OsiKjhsaXN6M2Otu0Om0cFfvoCkRn37+GUVrnaOLHxIkKXZRoNO6h+ev8MMbkpuQUqnCzfAF5fIW9UqV5TxmPDxgc3MD3RI4OpxSKDQYL7zbQmY6Z7CYM+i7aGrGZBqxubXLYrEgCceEqcLKuUAVtoiZMhqIWGqDqTMijDMQTFIppL4Gnhcxni1Zu3ePZqPN5cUNk8kN3Y0ytVaXRw8+5uLyGEVUOXl5hlFYsJqNqJQbLOYyUh5x7Z5QNTfZ2/+Y6/4B82CIkBsYUQGzsuT48oA0gcFiQVmosfBzXN8jun6OKOX88Gc/wnNyzHKRBmUyb8VifkPB0JhOHcy6wU7xPTRF57PP/pDN9W12NvZYOALZZMGL50/pdNe5HI5oVC2KWgerErBY9hmdBDx8d4eLm2uSQEQoKFxdHNMoqdhmmSANSNG5V9vk2XKIoGkE3gpTVKiU6sz8FXqxzs69J3z545/R7WggCuSiwGA0o14rkgYzIlfGsOqk0RTTEHn26g+I4iKConJ4cUmS5ShZxGh4ha7rYHuMxwMa9TbXl3Os4hRRNVj4p7w4vmKtdp83z69ACcmcGf1xn0pVxA9crq4UKrUS52dnTCbhX32gLFSLKHKF568uaFQrdNtr+NchshFx0f+MorGBJEMSQRyZvPf4b/PTH39Bp10gX3OpVLYRMw3LaDGLzkkTA4kmlXKTt9/WCP0Cw+k3CFmMKKXUmybOcpd2Q6VcKXAzmGLpHWaDp6TBDZPY5O133+Gzr14TKg6t2gNOjn/J+sYOQbhA1218J+LevTb94ZDR7IC9zl9DkHwUOSfwJHJCNrcrjMZ9dHbQhQ4bu12OjnPef/wbnF++RtMF6o0qx1dv0FUVdyUwHR2jaiLN+g6SviQSAj764Hu8fHVIJhrk6pA48DFUnTxViaKE9rpNsLRQ5BK7O3f45vmXGLbF/Qc7fPH1kPnqmHJpC3+loJRkTKOEkJoYtRH10hqffnbAnTsPGI1m1BsV7t19SE7CN09PiZOQMA6xsiJRMiESbvj44/exbZX5fMxs6pGLSyQCzs7/FFGo3X7unsB4fML2ns7FRY8wcnlwb5ss2eH6qs+De3VmyzGD0RmyLON5Y5rlewwn18ShgG1K+I7MSDjlgw9+k//qv/xj4nRA1X5CJp2jWCkJIgvn8lYxVZ4TJRUUuYOmBayiEzqtJv2+jhBkrLUsStY6Byc/Y3vjHdqNT+j3HE7Hf8jrFw7f//7HHL25IkOjXFYRNBdN3EJVQ+7d2WUyDYiTkK++fMaH776NtzKI85hF+DmRv88yyXD9EEla3sLGLQVFbHLdOyKII0pelTAZ4c8E5rMbquUCilRkOLhBM1PSDDrtXY4OTzAtnbcev8enn35OlA7Z3Giymmcsp/D2kzZZ6uO5EpVCA11XIVO5nnyFqRcQNJ88kZgM5lQqbUY3IxrNDZZxDyHOKVfLSLJAHCfkKZycvsJotRmMFrRa22zvVbi4HFIuV/F9lUzIyAURRZEJvJDVYgmChCQNbr+75SK2bWPZJeqFKqpu3zaOhZw09gkCHzcWcHyf5cojSRLSdEWep1iWhaIo2BUbS9ewLRNdU5AFSOL4NjyFIXM3IAkjJKWIECfoxm3QUzWFKApISXGSkHCVQpaQxAE5MVKuoCs2kppj6MXbEoRooFsFrIJCnEZEsUvg+eReSkaOJMjMRQVFkjBNA7NggvXnvDpBQxBEBF1DMw1yTcBLPJa9JaIo3oZqPyDLMkBE1Wy2t1touoQqK8iKRJ7EiBKkaYzvuyxdh/54wevD16xWq784E/NcIE9vm9LNepN2u011rUy73aZerdFudrh/7yGSLECWkmUZfpoSBTHT8ZBW1WM1X+EFIzzXZTr2ScUulmAyGY5YzF0yRUAxUrxlQrGUsLun8F/9vxcoqY2h6QSBgiKkWEXQxJjT4RBRB1nWyQQFTTTJkoDddp2pc4XrRyimComAquuEqyHvPNylUlBJ/BtESSNLFFJkIMTAomrZxN4VSt5AzJaIuUiMQ8lq8/z5H2PVXZywiKblmLJMGGTIkkkeJ9zZecTTr4+QRBNJEMnEFFGOyROBu3e2EcSL22m7IpLmIeQCaQKVaoG1zQ5XswWSpJAkCapye1Xo+A5vv71DlPoslh6qUiCVEwRJBiFHzAwMW+JHP/0pG9v77N/ZZtAbYJUtciEhyQJIQwQpItck/of/k/8x3c0ishiTxhWKZZPBoE8U57c/tnGGkCuISkaex2SJQqtt44cTdLVBxbpLnl8DIEoKS2eErGyg6irfvHiOaa0xmS2QZIv5JKNcLuNHC0qlKqEvkskJk+mQ0NMQJZuytUV7t8p0NOOLz7/g+7/2d5ktz7i+DFDLHjejr1lMJLqt+yyXA8IoRu/ckCUG333/VwnTGa7jgyygVAwc18e26xjWeyydAZcnL3j7re/QHwSoRpFvXnyOpNis/AhFdRn2AjwvoFSU6a4X6fUGaBYMez5WwSSRBxSKW4jiBqII/dkJkX8L7b88v2Jz0+fmsE+r1cayWpxdXINssrWzz89+9l9Qrmo4TshqVqVeW2M+HyPLIlEy586Dtxgv+9zZ+4D+eEQaX2BrFYpVqJVMLq6uCUMBXVcYDHps7q4jKCbHx6e0Wg3uFO6hyDYFq8lkeo0fXhJECRdn55TtezRtk0SOSecChlblyb0G89GIQbzCtkISX+LXvvu3GAzPeHb4jAd7H/LoUZujNy8YXJ2zt7/PyumjiWUsq0a1ppLkHlaxxp2CzdOfH+N7Ppvdbbz5CLCQ0djrVnlzfEQuFbEtFcdTyI0Cy3BJHi8pG02c/oLtB/u4wYD+DPrzKbXSBqVaF9+9QbVEji/fYHibbO3UCZOM3uUJjU6FV6/mkHsUrAKmoDAYXrHReky7atAfvWI6kVCtOsQ5lWoZTbI5PL6gXrM4OpqzsWdg6gbkKQImfuCiSCqKbNLc3PirD5RFfZPFyiOJXKJEZjiaEcUiulUi9ixmvk+eeVSKDV4867OzrrO9uY+pQmOvgmbITKZD1tfKWGaOqmqkacx175jpZMFscYNV7RMtHhJHMJnMqJSbNFoFpKzGW49snr76Mz785GOyUEVXJQ5efcX+rkUS9+jN5jQaLXrXS6p1lW5zl2e9VwxGpzRbm9Rrv0aaZLjugnp1Eyl3mEV9PMdnPhtgF1M0IyVOF1QqJQaTU86uvkFVVbywTdlsI4sqte0mvtvH8ULSVZ2P33/E8elrjs8OKJUtslygPxTZ3n4fx/EYjs+QJQNd7pKrKavwhF98dU7g+1z+7Ix2p8Kd/S2OT3KqNZvpOL61+lRsXh99zQcfPCTBR5ZjLq6+5Fuf/A7lcplnX52gmwKjyTHVRommfZfT82dsb1cxhS1sbQsx94l8j1pdxVuCIbTp7G4yW55w+PqQte4246HJ6cmQIF1QtlqMpmdYJYMw9jl4fczalsLm+kOq1gd88/qP2N0pcHM9IUkSPv7gLeoNn9fHJ2T8CU+evMNl/1MW/iVZNicMUjbWq5QK24xHfQRZwCoI5JpDqdJm6dxg2zKVcplmO+b8+pSNTht/0ULrFrk8vqZW2eRh97cIKksG/RmzWUSja3A1OgQgDmeEN0VKps90tmR9/R32tt7ly5//iL/2177Fz776IWahhB8sMPUmWQaKZpEmAmFwO90wrCLT6x557qPpNpnQ4+7duyxHCrP5GY2mSpzBcuki5CbVSof+9DP0icyDO99CM2Mm40OiQGR3v8vKnyGLOd3u+zSbRXq9a9y0T7l9q65LRJhPfdY2FUQ9IxE9RrMp3eYGi9kcRSiR5TkgIooZSaZwcbLESW/46KO/xZMnO7x50yOx/hwrlCWIkg4kiFKCkgkoskae5yRpwHI8YDnuEaUJOSKirCPLCppuUipVsO0iqmnSqBq3RhLRxDRNZCljuZwzn7kMTm4QBIE4jsnzHF3X0XQdSVXQNAXLKmCbwm14TjOiOLzd8YxT8kxEyCUUMccsa+iqRhTFZHlOmkUkScQqWDGbzcniDEkQkUURCQVd1dANhUKhiGaoqKqMLN1OVhVRIopCfH+F664YJMnte1NlVFXFMAwsy8S2bWT5Fg+jKAq6rgGQkuP7PtPBlCy+Ve4VCgUKhQKapmGaFmubm9wxTeyKSZ7naOq/bnl7nsdqOmc1X9DvXTMdDzj59JDFbEYSZyRJhqqb1GpNLLPARneD+ppJt9vFLpbZud9GM1QESccPXQxLJFiETMcTOv+Wh+Mscfwp83GIu3BYLc74/DOPIKjixTMSN8D3RCBAFiKMcofjPxlQsNYIohWClCAKIrGYUKjVGM4DsugNabQgJ8UWNPQc4mDG6bFAGKTYRpXAn1Mq20yvrth497vsbdYRhGuSNEAsWcS+SJg5WIbEOw86vDkfQBrf7k5GGqIIWeJhmCIHB0+ZTCJkqY0gyohSShQvEQWLLBtzeXKGkBu3tppYQpRz0kgizUJWwZzZMkYQJLI8IUkEkHM0RWbl9Ti/yFh5KaKsICsyWZKRpRmymLO2USaMijx5v4sk3zrCMwSiJCBeBaiSRqNR5n/2H/yHGIpB0Sqy9CJiYYYo1JDYAvkcBOHWkiPrqLJy+x1IJ6xt7hFFNrIy5+LqBR8KMpKUEcUOYeKxmnocHXxOpaKhqgqzyZLuZpWb6yOCIKFYNplM+zhLkc2tLQ5uDljfNFlOc3JhSrNaQ5ITdu6UOLv+QxRpE8W6JoldLKNOo2qjGn1Ev0EcXxAlt7vcjVqJpy8OCSKfdmuTUrnCZ0//FMexWV/f5PrskLKl0bv6hkKhQOI7bHWbBGHGdOxSLraJIwijOaLQxnNdRCkm9GxMa8xwCLs79xAym263yZdf/xTLvsUuKUoJo+JQKBZR5DppEjJeXqKXBOYTn/HsmlK9iuv1WVt/zAfvfY/58gXHJ28o2RUcV8RxEmRVYTw7oT+cUKnodMqPEG2fUe+KZqfGH/3hC37w69/CLM5ZOjGmVaJYqpMmZRRFRdMjvnr6x5QLRXb3tkjDMiWzz81lwL2du/z+j37Ktx6/D4rHcBhTNktkToCTSwgJOCsPUbZ49HiP45NfsNP6GENZZ/2eQbXWQdNyGrUyV9dniLKIbhdZOSFJqqBaPqJvoUslqp0m4/lrtjYfMLx0sSsGyWSOahtU1TqV6jpuPqNaErl60welTsHW8cIlQizQapbonV4gCQmZ5JMsA4I0olG3eHP+lE7rDqo8R1NNltOQRnWDcktleOPRbtzF0BWOXo+5d/836HYm/PTTP+O9x58Q+FNuRjPu318j8kS6G6DqOUYmYxoWtYoBuUS31abXG+GsfP6yf395sDkGYiqwtfaQMOmR5mN0XUEViwSpiK3b1Oq7hAHs7MB01uPt93b4xc+/oHl/i35/yXIlEHghZ6cz7j/cYeVcscwcoihj5YzZv/Mxw0sbRfb56MNdfN9DkhzOz04oTyTeufMDri6HqPoMTyhQsDoM+y9o1R/w6P4jdjbe4//5n//viJMxX3z2c2q1Bo3mLlEiEkcBcRySJjKreUAYxbSaawxH15imwYO3thley1zfnKIbEqdnEwpVGVUxmEygWq+xXLiYRgNdKlLt6EhCkZOzpxSrBt5Uo7XTZbK4oFTQIXeJ4zFruwaTnsnF9XMkSWBrr8D1WUSlabKmVnGWDhdnIyy9ghCvYekxk+Uz4jii3tK4uh6zubHHaDzgV7/3HtWawqe//BLTNBmPXGQFquU1JtM+krTA1LYpl1r89Cc/4qOPPuLNyTOiyGd/14akThAPWc0lDHmbLDHIOCXyBP72f+Pv85/98/+anXsmSaRQKezx4vkxhlUjcH2U9g15prBchLSbHR4+eouz4ymlagnjxqJot3j+4muevHOfF6++ZHtrH1OvEYRXhIGCSJnN9oeM51csljPIUkyzhBP0sItNvNAgyySOzw7YvVvCtgu8OvgRW/sah4dHpKmGmJYolxV0O8BOVDxfIMqmFPUyy8Bh5fq8fPmS73yvjm5LvDq4plyrcjO5Qk5kQkWmVb6PLksMR33CMECo+ESJxP7jGCExiN0O7bX7ZKFFYvVQ1W2yFBz/ks56kdOz19TrLeqFT1gsLpGIKWY6prbJsHeGrsu31w2ApUvE4hTZhGwa4kVDqvYug8EY0wR3EbMYrXj84B0cL2e1GqIZBmfHX3NHUsnS2x/TSlnFM2Ok9BEvn39Fc01AEUwQXcTMIJcz8iRD0SCJIkRJIMcnSRMEEVQZJGwMMgQpIxNT0tQlCR1GvSGDLCcKMxAyJFFBVU1sq8DO7ibbm+vsNNZJDA3P83FWAUmSk6QCjuOwdDxc12VwuQAhwbQUSiULQcwwLJNSyUBAJM89kiSHRLxVGwoxkJNkEnGiUtXrZGqKKIEoZuS5gIhCnolEYcJ4tiQehSRZgiLdNnNNw8CyLEzTpFY30DQNVVXRVYMkSYjj24X5+c0IZ+GSpimyLGEYtyxE0zQpl4tUiiU0XcQ0zdswlGXouoiqwmI5ZjgKUc7F2+vMYvlfH56aiqporHW32drcR9d1UuH2Oj4IPGazGfPVksVixdX5JUenX/HF1wuiKGK1dImTkGJxjXa7y/pWkWq5Q6HeotteY227im4YSCpkiAhZjrNaMOs5/N7fWrJYzlksZixXLpNpjzyVSRyFOLrEW0whDyCJiSKVPPB4cuc+J1/f2r2qrQ0gIxFS7GKBb//gdxlPevjajxCkAN1QiOMETShQq8uYBZneaIpibpEGEZZWwJ1bNKoNsnDBzfWIUCyh5xlZHmGIFp4/4eFbj/C8Qzw3RVYVoiQmzT1koUoSf4Mf9JHSMggzBFREMSBNUyRJQ9FSzm/OiRITchFJAkkQESSVLMloVDVsWyD0XQRBgixCURRyUUCUPPrjOab+MYWKgm3K5LlAJqgghkiZSLvR5h/+w3/EYu7RbFRwXQnV1Mj6DncfGJydH0BukycGgpChG0U0Jb9lVioVVFWk3o65OpLYWCuQJBFJmiDLEkJaoFquUShmzCYBtfI2aXzD9eUND5+s8eMfvkLT9dsJqZAxHi3ottcJ/AGX/TOePP6YhbNAUSXMgokkmDjOnCzRMI0igRsgCRmBIzEcvqZYLBK6AmniczX/nKl7wvraLr3+iPPzUxRkKrbK+eEha90HKFqEGwywyxBGPqtgSRzK1JtdFDlDkSQiy6I/7kEWoRopSZRiN0s4zoSVM6fbsRhPLwkTH0NQKFYtLi9PqZX2WMxiapUNouQaz19QkMosV1PGExHLLLOYO5z0XtNqbvCD7/w9fvCdEovlK7746qe8OPpjXr+a8O79B1RKZVTFQdV8UkEgT5soKqxvVwiiCLuksZpFRMmAPJeAhDyxMaQW778rQmJRtOB60We920Q35sySiA8+uYc7mxI7M6bhClnfJpg4bD14gidecnzwjDSJ0OR98tjnJz/+lL/2W99jNPYIohg3TAh6fYrlJtPpFEFOWaxcVouc9d37LEcXaKbB0p1QLj/G1GXeXD+lITXwg5w8mZGlEu3SOlKocdLvs71TIXIEJsslAhmeM6NQMhhK54S+jqwZaEYRI53hRwM0XWE0GbO5vc3h8RsASqUNgvCGQslEFnSC0KdUsej1egj2BeVCE8NUWExcREni8mrK9dUBpcIadyp7RNKEq7MBubgg9Azm7RUIGc1m/a8+UPbHI6L8nHZlC11cQ1JdfDfjzfER3XUJzapjGmvUS02i4CWiKKMpZcqFbS4vL1nbqBIEOeWSxc4dm8CPIC1jFpfIYc696n38pY6k9ahV2/T6F+zubXB+6jBZvKLe/R6H530a9QqqoFIvdji/OOT+ve9zejKhWpf5//z+/56d7TsIxAy8M4bj53ieQLnSIUznqHKVNNKx6xL+ZIjjlFktBHIiLFMijWWajS6j0YB6q47jLJkMEmw75+TNU9I8QeAe1WqN128+J88n1O1dClSobFa47L1BlcskyZLz83OqLZv5zMFPhuhSEdcfcvhmTK10H1VPSSOFIBARRXD9MacnV7S7a8SJRpYWMI0uRydP0a2M3/6tv8H11Zh/ef4HmGYVSRGwCuA6LRRJJ42X1Ipdri7PUBWZDz/Z4/r6DFEqIFo3ZEINw2hzdn7K1uY+Z58+o1b5gIf3foX+4Irjl9BqN5GEmDAWCYKM7/7qrzGbesS4eP6MdmOd/bt3ODs/JIvLbO/Bqxdnt9OAMEUzQtzwkrXm+6wWY/J8jKj6jOZXNKpNLnpPsa0GYagiiQGaXkdTd1kuYkwTPHeMrOhM53NysU13e5Nnr19iGCZZPEc1XLyFjjsBRUkZDwdU11QWy5jZssf+9l1iR+XF8zdMlglGdUSnobCYr2MZAr7bJ01SzEKRX/n4E16/PMf1x9TqBfzJffI8ZzI74/zymicPfhtnFZELS4rFMvfbn/D1s59z5842hmHg+1OatTXCyCeKVyRhiCA5xFELVYtQhDXOLk7ZljqsFhqBb9JZ7xK5CuXCBuPFC0ajFWuduzx/fkJnY5f5NGI8POcdySCKAkRJIooDOu1N5tqY+bKH48958s5b1BoHRJlLlmVIioig5IRhiJiLiKJKHOVIkoZARhQGKIoPiJCJ5JmEKCrosoyg5KRpjG0LgEQUJmR5zGI+5LNf9Pj5T36KLIMsKmQIGEYJq1RFswrodoFytYJVKFDQU2y7SJQIeG6C4wbM5z4X5yMQBQQ5JE9zRCRKBRPTULEMBVlW0DWTIBAJQgeEGIGUIPDISZHyjFwQKBgmslZElm+DXeQnZElOGNwq70ZXIVEU3TIxDRVdUSkULGq1Gu1mk+KDIqqqkmW3NposS8iyjDAMiUKPPNWJfAfDMFA0ndUyIU1WgEieZmhqBgg43r/eJxIEgSRL/7wgJWFZJrqmockSsiJimjqyAt31Bo8f30dXVVI04jjEdWa4zpBRf8VkNGYxu+bseZ/RrEcQuiiKRKFQQhQKtLtt2t0KW1trFNslSk2d1vpDBCQyMSBOPBZTn5ubK77/QYnRXshitWDhzLjoTahWuvz461N+9uIKOZdYjEZkuYAyl3BnY7748qecX40gF7DUIlGwxDBkPKWAVop4c3FDmBTAEhDSlCCYo+sQpVPWuk9Qn48RRSDXyOWALAVFsjk+uuB3f+NX+NFPXhBnKZJ4C2t3Xf+WyrC44eJaRlZN/DBA00U0xWQwdrHqObIh0Ru6aGoZxJQszcgiCV3S2Nvd4PLyM3S1gSipZHmKpukMZzfc239Mu/UJgtQklzyyXELXTZI8IMaipOj8X/6P/4j59JRqaw8nuSbNCihxSBrn1Ks6v/jFGM3IEIQUERFFNBAzh8BdImQqojpnNbvDdPGSHXmDJL1dadBknSC4ZHKq3DaipYDh+BxN1rmzc4/Dl2d02tsEyRBZEqlWC1xfX9BsNKiWGqwaMYuVw/5+m/PzE5LIRtd1JHXFZPCGb935N3CCc3xXJE5cbLuIs4oplUoIYszXz55RLNRYzCJkqURtTSYKK9wMLjA0myzJaK13OTxa4K0UclzeHL6ibD2kUa1wdvkVqp6jyx10VUaxBnhzg9/4ze/zwz/5GbXKGpW6Tyb0WS1D7u5vs1q5jPo+j5/scPiNg26ucNwr6o0CZBa6YtPuZOSZjBsOKdVUdF3j4Phn3AwO+L2//m/Tae3yg+/+9/ner/wenz79U77+7E8oW9scHV1zKf6UUquDZeUcHfp0Gjv4bsDKv6JefA9BTPCCEdVy7bY9X5CZDjIUMeb08Ihu9w6eN4Esw85ianaBTBbJxSp2sGLuuNS2GqxEFzlR2NmtMRuqWAUBfdrhnV8p0J/cgBzx9FWfdz/Y4fTkmmJNYOuuwuXFFWJcZzYZIebbrG8WWQxjRFnl2atPeTv6kLtPCnz+1Rt2du+S+nMkbZdFuKCpVxHUnCgzETMXUSxSbbXI4jGaWKG+1mQ+jdAygVXuYChFknCOrElohki5avDIuM0TJ4cv6XbLXI0u0DUbSZBI8intxi5Z3ubuvogXDOl0Orw5PMMUFdrdLWrFNQQxY9h3idMlhlKm0enQG75E13UG/TF/83f+igOlUdZwJxKCOWMxGxBNa6h6jmR5JGn3dr9xEqE0cmrVJrIi8fXXz0jzjDibc34es/J6iGqVWmWfWr1Mr39CFGdIOhi6ynJ1xeNHHzLqZzTqCoqsU7Ar3N3+DUp5hFUoc3/3bV4dPmO0PGARnFGV77O1vcanX/7nvPXODiVtjz/94b9EN1xKlRJx7ONHA4J0gLMKabYqmAWDYlxlOp1iVwwuL2+YLnLatY+47PVQ9Zhe7xpb6/LeO1tMJgP82CQXM8JMRFRb5EqN3d06uVNHEwzccHHbVBcCIv92lB5ES+ptE01ZI82WKJpN5BvcOGO27+g47owkDfG9Jf3hKd31NexiTp5XWHhvWDkpa+t1xsOYBzs1XO+AYrlAq7GObVU5u/yc3f37vHnzEpkihUJOs7rGfLxCMxVK5jZZds7O/joHX0754MOYmniHL794yv27b1NqjDl86dCo7XLdf4FiBMwnCtWqTijFPHvxZ5TsNba276ErFlf9N2hGRuDHnJ+/Ymu3QX9wQaWuMJm+Yn/7WyxXA1zvlDyPGfVkTEvl/t1d5nOROF/ieB6ligFZndnCo1XboFRx8bMLMimgUm8wGDqcXH6FbduIsogsllj4MYYVY1VSJqOAYqWMXQ7IojL1loxlvYfnrLBNFzkrU2mFnFyeM5vrqEKDZmebQXJNHKWcH48gmGIb6yyXS7xVgqoIjMZzLFOh2b5DHIcUCmv0RjN29lrkscj9+3cZjyc0S99nJP0xviffhr4oRUhtapV12s0NRtNDZs41a2sbzOYjVFVGt0pkmYSfjolTBcPe4sk7jzm/OsSqVJhNHWRrTKkZ4Z1VyclJ0gxRELi4uODMOEXXDXTZxE2es76j8PLrGkZhSZoKiIKAKOjkaYrnOei6ikCOLIOqQZRECEiIogzcgrCTOEYQbq07aZZCLiGJMoosIuRgGbce4ywNkDSFMIjIUx93ds18GEOWcZHf7hqKdgFdN7ELRQrlEo1mm06twPZaB1EU8RJIsxjfcUlicPyMydzB910ECSRdQZIETF3HMmyqtTK1qoWpSYRhSOwmeKFHmNyqDe1aBVVVSZKIMA5IEwlBEBBFEUmSbnEwQcDcnTNzZqg3t9fdhn57na8oGqIoo8gmhq6TZj6SJLFauUSTGWEYs1q6zOdzsiQhEiV0XUcQ8r84ExVVplwsUCpb1MplROnPzTOKShRGeIGDphuEYch0coWh6SCkRHGMrhtkQpH2Vpe9x++jqLdoIF2TbpWhMx/P6zPseVydTTk5P+HLb37EfBADIrKgU2uWsCybQrFIpVygWlpDKu/QaQS0UgE3cNkNV6yWAT/5+ZBq6yG/+bv7hKFPTkqaiOThOqORj+tkaKR4wYDYB98VmM89UrnExShk4YSYloqgChhykcQNsWwXPy3Sv9DBj0DxyMnJlIQ09bhz7x7PX7wmTjNQkj/H7pjEzAnChCiN8QIFRWqTCCFkImmaEycuraaJ67vk2e1DRJInt5N/yWQ+HlGwdKTWNggJCAkSAkkYsbN5h4/f+YDxJCLmlIJZQzd9gshDznQqzTL/8f/mf8V3vqcRiGWev1hglWRyETJkZDWARGFwnWGWJPI0pmAWEKWYO7u3TNUf/8kB2xv3+cXP/4TdrftoQoKAgCyL5FlGqdggq0ZMpwNEsUql1WA+veCtt1r0ByoLZ0ouL4ljjXqteStZ0FJOz95QLNcQJI/Ts9fMxgLN9Qmnp9dUinusde7z7PW/oFnbI45E4iRCxMCwPF6+fM3+3j0cd46Yy8RBThiOCdI5gWuysbPOcukgCTM+/8Xx/5e2/4q1bl/TO6HfyHHmvHL68t7fzmefHKpOlSvYVbbB7XbTxlarW4AwSKgbcYElfEMTBI1o0RJcNNAY2g2Wu6tc2BVcdWrvE/Y5O385rG/lteaaOc+RExdz97GEQBhRzOs5lsZYc+k/n/W+z/N72Nw+QFFTnj495539v8t//V//+zw/+8cYekRMjx9/8IJWawOzYBBHAS+eXVGtlgijBcSbLFyfeq1AFGnkbY8wchlfaxjGjCBwSQINP1gwHYZ869vvMzpesFwENNZaOH4H0MkXSti5hP/T/+1/yp0b9ykV6+ytvYMqrnP37nsMpj1krUilVKQz6RLEHr4vIIghmi5TqdQ4Pv6IWmUHP5iR8CVf/+ZrOLOYo+PPqORvUqvVCMIliqijqEvGjsNknLCWz5EJML4MeP1mjcdHlxBMuRxeE0YilVaB4XxJKEjk83V6/UPc2RzDKpKx2mYs5zFpaKBQRVMk1tYlFuMuqnCf7c2YmWfwjb/1dX70B39EqfQaN7cjvGnMddshTL7k4O5rTLU+8dxADRZ0ZguyQMBVXdZrLabDAcupTRZ7bK7dIhBGiKlBist15ww/dZhMj8iXLQQHkmSBnNbZ2drkundB6BkEXkYn6vDmG+8RRQHD8YjL9lM0pU6xWII0pNe9wHA0NCMljU0MTeW6/5CcXiUKIVdQ/lVl4v8X2KD5CM8ROXnioWghiXCIIjeQ0yLLmc9cX9LvnzGZXRMEEfV6mZQYlBhNs6gXd+l1Kly1P8c0cjx7MuK88xNKFZMbO9/FVqscPmujyo/pXUvIqs94opMlORQ9YU3uIxQWEJhkocpp90usUsKTVw9Za97m/u3fYTJ8xsnoz7lxt4ou3ePZ4YdUaiajvsKv/tpf5/HTDwjDkOsrj0ZjHUWqMxp3MY0ifrBkNDkljRUMrcbtmybu3MLSCzjqkq36DRACklQg9E6w1AlSUiRKF+zeuMXSU/n04R+SLyo0G68hyEdkGfgOxOkQz5tgWgKVioUs1Olfjdlcr3GxvCCT57z93j0Uscls4qMaCZJQZ9wb0tqGSk3lyfNHvPu1r/Hs2TOuOg+xjToKTfqdMaWiQa1Wh9AkihZUCmu02232byjUynkuD9tUKgblfJHAtfitH/5dnh8+4emjK2SpwnS+QFKXNMtvcu3OmAymAOwfbBCHCocnH3L/jXssnD5Xlzbd3iXbux6PHw9x/B7b9jtErslkOENRSqTxGMusgunjuQnX5xlO0sZ1A/Z2qmhaxrDvECdLFGlBp3eCUU6oVdb59JNXNNYVdN3G1PM40xlhPKHZqjCZdkgiEak4p7e4xjArqDL0rztMZ6949+13uLy8RBAsNtdbFPIyaSwhJCJZnLC3s8PLlz1u7t3C8YZ0O8ccHBxw1b6gWrrBeOyTJEuErITjDdDNjJwlMRicMOkn7GzfYG+7xrOjf46i6CiSytbmAd3eKZaps3RGHB49QJIkNrdalHKbnF8cU6vnsOQiw841zUaVQBnRu47IGzVU7QkHO3ucnV/i+mU0rcFkeUUUxUiyRJLEjGaXyPkcKZAlJicvB9j5JUkSI4klMiIy/BWeR4Lbd7fpdq7oXU3JUoWMGEVfrXMVNUNRQJZVJF3+paAki0iShCQJSLMMWRRIUp84ShFFkcBJV2JUFhFk0HR1Jd6EbBVwiSGLPSaDCb22z8lLBd8LIJVQVR3LLmDaxdWEs5jHLpnU6zaaVkdRbYQkJk5CFnOPxdznerykfx1h2yalQpmikSdfrZGJGb7voigKuZyNlTORVQGBhPF4zHQ6JUkSVMNYtfJIKz9lHAWk6WqVPptPmU6nuM4qUOR4LqTSLyv48vk8xaJNvVHj4KCxmjSJMp7n4Tj/0k+URl9NOx0PD5DimIWwpGDnVoJTkYhCj5SEnG2SZgvEWEIWMrIoIE4z/NhnOesiiSZx4iGmBooaI8gKqmSyvVXhzTfuo2gyqQCx5+EuXFxvynA6YDIJuL6a8PTwCcPeB2SJSBQv8UIZSVYpFC3ypRy1ahNdl8kskWqxAUKMbolYhknm62yVIpr7EY6zZDocE7ghC3eOH+QYDgRkBWK3z3ImoCgO85FD2ox50v4TfGnVhBRlMYagY2kqoZKCuMR1lxhGDkEBBYM0TUgTjXJD4sbBAaOxT3vkYBhFVDVGSGVsS6Jc1omSJZlkIAkpYZKSphmx4KNqAmG0ZDJzESQbVU1JQ7ByOUpli5OTPkEaolsO89GEKFXIl22qefif/IP/JRU75Z2vfZff+6PnQJUskBCFjDATkCWDs/MHTGZLRLUFqYGVyxHFPlE4Q5EUbuw3MHSZG7dK9DvXBMYBsrryF8exiK7b7B1U8Z+K5OwqTnBKpbSJNy1jF2JUs0K7M2BtbQNFUVHlAmutLWaTEZNhxvpOwnQcI2lz3KXO3u4uWVSh1Vzj8fMJS2fM0u2TBXvcvlfEczPm44z5fM729j7dbhdVkUizOTubN3n06BBDLjH2fBaSQ3W9hJ43yVKFRPGp7F/zH/zH/zbTeYf1ZpFcocDBjT1sW8fzPCoNHd3UGVxcYRd0Xrx6xI2dbzMcH2IYBstFgCiK3Lp5m/OzGEXRmEwmyKnM0p/y2Zc/oVZrYhJBlmAoVU4uLigWIYlL1Mo3+OLRp1QrDX760Z/x8MVP2dvc5/vf+zu0NhMCv0utVmKxSKlVmxSLa2TCkvOLI1SljKxKqICdU3j+bMh8HLC+8TZJlNLptNFUk936AReX18Spw/vf/TrXR684Ou2yUalw2b4gEQ3C0OPG7Ru44ZSF5+ItbBRZJEsktlv3mS86yEYObxlhajpJFCAIeXxnRrFQYOEkiKLC8ckD8oUqUZJQK3yX7f3b+G5IFtq88957FNaO0ASBfCHj8cuYG2slktmc7a08iprR73Q5POxQzKlsNW9SKFo8ffiK3YMai3iEImqoQhkjV0LSY8y8xqifUjRhsTxD01pUzG1cuqjItFo1Hjz4glKlSLna5F7jWwz6I7pXQ37wg/cI3Cc4QQ9JKtBolqmUm2xsVZiNV1aoRiv/Fy8oJWlCmoxZa95DSCUuu8/Q8jq2muL7QzqDxyzmKZomUSxW6fTOmM1jGq0ck+kMRdDJ5WuUgxbH578gS1X2Nr9Lf3DJdeeMxL9iZz+H4ywx7JQffP+v8POfnNCdfMGNGzd4eNkn64vUN56RSGsEc4tq0aK2XqfT6aGbfXJ2lXL+Ju3eQ/rLAaqqstZ4l71dhZ///GOsPMSBTbFewVnGzKchfjymWi9iKE1m8z7rG3vEoYEqw8C7RFQsVLGIl55BZuC5Pm7QJWHJ4UuPSnmNpy/bPHn2EeW6gaQJBNk5upVDEQWePXvG7oGGKNYQhZhMWFDM36Bz1Wc0f8bGxl06vZju9ZIsuUZR8iy8IZbRoFwXkIQiQTggp23Q7XYplCR6XZ17b7/H5emCIFrS6U5R1CV5SwFBYTwcYJohF6cdWsV7NCu3sHM+3tQmCF06Xp8wGuB7CVs7BrNpxtHzgO1fU3j58jnf+NabeN4CdzlGVRRUpcCzp6+QhTKfff4L9vZ2mU9EwnhGoaDQ7w6wzDy94UvIDO7cucWXD3+ObVuUCmvM53P6izH1Zg4/nKxCWzeKSEKIF43Qcx45aZf+1Rk7W1Uurs7YulHnovuK/Z0Gzx+eoAwUSi0RvZjx7PCKUqWGnIlo0oJbe68zHF4xGJ+SK7XIUglLXQNNYh44NNfWmHVc5t45zfUiSeZi5iQqYgGR/KoXWwgp1kOuziPM/IDAjyjkbuDMp2h6jKHbnJ9esrW9T5oK2FaRIJwyGncgNRkMrwkCh7zdwPM8zk6vOE8d8rl1hFREthPq+TKt+l0Gwys2dxWuRx1OL/sE2U9xpiBJAh/99BPek5okaUoUZEiCQPt6SVJqgOggizp5a5OvfaPFxx88XAmbNEWQLARRIc0G/Lv/g38Lz71iPoqYDX2yLOH4YsrJ2RXtqyGBv5qSTec+cRwjKiKWViIVYxRFRJI1sixBzGIyWSKJRYQsQBIFsiwhThIkSSHLBMJVloVMEFaIHymPahRJoxjbEhH56ppgzHI2ZTrOSC4F4giQZDRZRdd17EqLaq1AzrLZbja5fesGjWqOLEkRUpHeYjXdHoxndDsjZtMlk/nKFynJMpquUC4XKZdLyIqMYSoYqkqcRcznMwikrwJMCWkqUW80kWUJSRYQhNV0CUTmM4fZbEGn1+fs7IIw9FEkEVVZCdRyrf7LM3FtbY31tTrVahlT11aCInFZzh3iOGUx84iShBQYD2cr1qRqI8sisi6gSHlSBVRDQUhBQsJNJ0ShTuK7KMhkicdyPCbLEkAmkVb3qygSW5uvs7sv8s77IlIq4DgjBsMxs8mS5XLJYNBj1JkxHV5x/vIZcQhBJCKlIXbBRtQM7IJKs7VLo1WkkKtTLpfZ2dhEES1k1WO5iDDFmL/zt7+J5y8ZLPssFyHT0ZBascygP2AyOiNzBbzEx5+bONMli+WQOFynUt7AdYbodRs5lhEVCKIC1fqc6941k6mNYZqIYkzkxyiiTJx4TCZTgniOLO2QCSAJK39kkkWohkiaxQxHE4JIJc181hvbqIZB5GWE2QTRiHDnZcJ0SW1zg8H4gv/sP/knjMcZ3/8rX+cP/8mfsujZiDmIRRExDRGiCN0U2N45wFm8oLKrkwYCohyhKTKdK4fUn3Nw6waaHhInRZpNkatnh7wjZKQJyFqG7wf4w4xCUafdfY6lVVjf2WM6buMuBbyFzM29b3J49Ao/uGR9fYtPPn5Mq76GrSsETg9ZG5BRRFVsAk8mivo8fnJBGAtYZg5Fimjt5VC1hFFfYG/vgIXTpteFt+7/Ja6uDple9MgCi/3dOv58zFrVptOT2b9xl+Ozp3hujBRbtM9PUJU81YKFJOSYTzwQglUHuiJhGVU0XcAwZZZLgUJFpz9uEwtjLPkmGxst2t3HfPrpp6iKRJj02NnZ4cmDAa3GDqo5Q9dMZotr2s9PuXvvBpsbB0hKSJT0Ob9y2d29RybMWCxNfvjDv0s+5/Phx7+PRIlyziBXiagr2xiWxlXnCdedPsVClbff/g4vXn5EqbgF6YyrzgtKxSqn53PyRYVICEgiiXbvDMcPkAyNs2en9McdPE8k03UWiUm+opIsFCIpAsHC0EbMRu2v7DU5csYalfIO7cFz5tOIkrlG6HrY9RRQEUSdemOXkTyiYBeYRg66bPHP//w/xHUVtps72OU8H/z0j2iV1okMi5GTsZMHP+4SF3WSxZJUFhHFEo1NGd1QSIMenaGGVl3guCr1+m0c7xlxOsU011F1BVld4C1H5PIqfmBhFgXcmUvJqBEIEaWCShAVmHpjkmHKxlaFKFlSrzUZ9JZEyRIhrVGpV9DUgH5/yK0bB6ShQ65l8+zFl//KglLIsuz/87uA/97//CAzpCamXGAyGnP37uucnp2RSBFhnCIJGZoZIAQbpFmfRHSIwzxIMmE6QvBzSFKGpHqrpK0iIIsFwmiJaRlUigr9/pzJNEY3LGRZIW9sE/hDFCXDUgqkQsLRYZ+vfbvKdGCQSR5B4OL5DrVmgVp+n2C6WsFN4xOcMGKr+QMOD39Ob/qc9999h9Fgjii6zEYqVkFkc20XZ5FSqVSYLeZcX1+xf7tK5zJF00VG02MMrYqcady6fYNHj5+SpjH5Yo7BsIsoaMQR6Fa4qhoMR2hGjlK5wvlpHzsvslgs2N9fw52V8IIjrq+v2d3PMRl6KGIF0pg3D+4zHCeImsgydMlkmE09VEEm5pSCvYmds/C8hFpln3K5SJJERFHGg8cfcHP/Na6un7G5eYssqtMfnpMkEeWqSuD7ZL7P3u4BDx8fUipuotsqhpmn3X3Kxx9/zFtvfZth1yUIu1QqO8wWM4J0yJtvvM/56RGVhs185nJ6fMytuzcgU/D9LovZEkUoohk6s+UZs0lEs9kkzSJOjgfk8jqiMse0S2TijCSRCAKL9fUcnjdAiFcQ7+3Ne/jhiDSTWPhz9EKC63vIoobvj6kWW7RHJ0hCiBKvo+Yzrs7GGIaFnAMtiRGEdarVJtPeKZZRZeyOKZdr5NV1QneMncvj+h5xlLL0Z3hLhZ3tFlEYM58vsXMKS2fC1to7nF0+wbRkrttd6vUmg8E5N27cotPpoKkZ41FCo1nGD1xkRSGfqzEcjhmPFgwHM/xggiDGKGKL7c2bqwCNfs1sJJIlJq+9WUSIqoThCEUWCJKYznWfdv8ZxVc5/rtfymSkSJLI/+HbOfqbEvV6E28hYxQWlEtN/v1/8Htoygai7JHFCqng484X/LW/+Q7/5l+7A15KdfsNfHeAO5/R6y9ZOKCXUkYTjcjd5tHjT7k4vabT6+M4A0gzfEdl6XoguciSST5nIMoKUTxHUSWSSEXCQhB9BClc1VCKCWm8AuBm+CSpAIKFKAeQRgipgaRGBEmGJGrIcgixQpoZxMKIOJQIQo8sFgm9kGIhR7nUQFdLrG1s0qg1qdSKJGLKIghAlEGQCJOYIPBInAWz2YzlwieMwHMjTEtnbb1GvVHkYGuTQjGHKIIgpgSeTxRFBEHwFQpIJJfLUSyUMHUNw9BQVAl36eC6Pl604lbOJqNfnomuu/ILhmGMiIxt5ckVZAzNpFAoYNt5REVGkCVkWQZS4iAm+ioB7wcuZCKapiFJ6io0pP2XAlf+6hoRkvSX6/wYYeWblWREJCI/IooC/MjHCzyEJCZKExAFFF1D10wkRV49tyDgOD6u47OYzZlOp4yHQ6aTOXEUrQYHsYhhmZTLJexSjlw+j5XPoZsGqq5h54sIQoYgCCRRhO86kGaEvruaAEcBjh+QJAnvf2uXP/vRB/iLEkkCYRgiqALd8wmvva3S2L3isx+3KJcPSMWIKHUwTZ1o4bN35wm+ozPrv0EqR6sWnCwjCRMid8IP/7LO0UsXZ75PddPG91VUVUfQAtIIhFiikDfI54v8+Yc/4hef/Bm7m/eYj0P+5l+/xeDJn/AnD3PI+RxkGiEuQghy5vH9v5Tjf/2/es7Gdh5VL2DmbYQwJs5Sll5Ky/D53f9ajS+e/lO2Dr7N7csK7//vP159VlnGf/HXqtS+9zv4/hG/+PkJv/nbf4N29wOUUCBLTCy7hZM4dHpXtNZLq5DMMubkvE2zZJMzNmlsiFxfX1Ep3+LF2R9TqTZZTCvouo8YiCydCWs7W3hRQjOp0w/6mLZCZ3ROSypQre3R9Qcs3EuCyMQJhjRyW5ilCD+cMZ7Z7LYOWPT6KBUJQamTjducT0e01upMJiMa9Q38qM+wF/NXf/tv8vDhI/b393jw6DmPDv8ZGzsN/HmZSqGJILocvTjn7Ttvs7Vd4/MvH1FvyWhKmRSRckmj1xvQHQ5Q5epXNAUJUVCI0gE5q0mSZMwnKaIakC+pXF0tQZrju1Ma1V1EDJwgRFIddLVK4DoYWhXH7eI5Ga31AqKg8fzw05WXWpcxTA3Pcwk8BdsqsrbRIg4TsixjPPQxDIPp/JpCvkqjWSbyM/qdC1pbB1z3zigaOUy9Qr1l0r88QRJtTrsjNtbqXI361BQFQW4wT8dky5BKI8dF94iyVUKxVXRRxiciXha4eecAPWuyf9DEWbR5+uhTTtv/dxyviG6tMZr7kNq8dncXfzZFEkWuri7I5XIkcYhl6fiZjz+RqTcqLFyHVnEd0zZI7AQ7UHjZfoWzmFPKr6NqAnE2pz+YswxkNndLhGORXEvGWzh4szmSnkfWNYrFEi9ePGVna5fFdA6xSbFkYeY0uoMu/9a/8R/9K9HN/9UnlMEasaCjNSUkL+Bq8JwoLiFmEnu7OSbTHmGgI2kTbL3KcLTyOfhhgqy4ZEmNar3K0+ef0WiWWNst8uXnz6mU1xhPQ2IvQJBEsnSOIKZc9y/x81PiKCVNRbbXU3a27qFqNp4b4gQjECKyVEBAJY1sRpNrUh80Q0dOtgjCJ/zi0z9ko7WJvtzg4188ZHevQhzYrG3UOb88pH35OTs7u4TCBc54jckoJPQlFEVgNu+hyqWVcAtFFksfWRWIY2EVVokCDMvDdeaIqoyu59GMAhkqqmSjauestfaYWwZ+nKDXX2GkBU6vu5ycRojoaOoE1XSoWL/D5asrfPmI2vomneGUxppGpbjPLz46ptlUcFwX09KZTC5ZzJYk6RLPTZFiC8+ZoqtV+p055bJNkFwwmlwRhJusrbcIQvB8kTff/Bqvjp8x7WZ869tbPHg0ptJIGE86eNGM7a0tnr94zmR5hEAN3/s5uqJx8bJHqSqTU00efXyGWQjIhATDMGjsJvQul+iGTKVW5eqyw3g2ZhlC7zKiWssTpjOE1CYSB6w1ayydiCD0mAwHbNW/TiQumC67hJFIRsBo4mDrLUrVIjnLZzpuk03zlOoFFo6A0/bYrbxPqvV48bzN1u4ekurh+ufs779L5+oVRmZT1DRUfKZeSLUZYuhNzo7HNFoK08ync/2M7bXbBMaMp8+OqJX3VwdUeEya1rAtgzROyJklupc+jcYO41mXjBmaso1uZMyXPfpdF1WXmS+vcDyf+XxOJsjcuCFyePolUZSRs4vI2oI0gKdPFti5c3Y2b5Ev6Pyn//ADFD1he/s+KOekaYIoSWRpRhx7aJqJhEalXGbuLnG9Cbs7a5ydeGhSRsoSSdOIQ43QETjtnqMLGp3Hn5Ir1IndmPlSoN5o0dwq4j2ZUK7dplJu0LnZYTjpMx51+OyjJ7z2fYvbr61x2b5meJ3xo3/xJblCDklSiHwdQXBAiVguA0RRQlUzZC1DVlbMVVHQ0TSZMPGREIlTEcSALDZQScmSgCwVydIEMV1gyTqhImBrRdIsQK5CGgsslkMWQp+Ts6ekLEEQEWSFIBbRDRvTtMkXTGrVEu/ceZtvvHmPtbUWhmEShjHz+ZzRaIDjrNLso16fJImwbANVNzFNm1qlyeaajWFHpF9NWyVJQlEkRFFEVnWKVRlDWYWBBEH65ZkYxzFxHJNlCa7rMp1OmToBvutx0e7gOEdEUYSIgCLJyKKIXSyt1uq5IuX8GqomkgkJUewRhjHufPUPcRhHhOFKmAlfTVGTJEHRVERBxjAMNE0nS1LiJCJOI9IshgTiOMP1XfzeBD8MybIMSVqJWkkV0DQNw9bJF9Y5uLGLoqiIGSRJQpDGOI7Dcr7AmTtcnJ/izB08ZyXAJWUlgO1CnlKlSLFcZm1jnfp6a+V5NnXy+SKyIBOGc/4r/9otoiAj9H3m0yFu4DMbubRaOcp1gUcf/THu4hzHC0kzmWvfJWdq/JXXvs6/+JNPCLIEQVQRZGkV1hISJFXgxWmA6xbRdBvHC5ElESFLkNI8paJB3jQ4PzvmP/0//1+IYo/7d99EEmwuO6f46oi/9G/8Kr//5S9QUhExmZDKGr475fVbLeIgI46XkFSxDYMUgTQVUFQFfb7Eriekco+d7a9xfPoJNfdNQCSKEyzDQBc1ltMLkCI211vsbbcYDXPMF2eEQQ29ssSZ9Njdb7GYDxl0B2xs7VPMqdQ31gjdjNPzEaXc/uqZwgb+RKFkaiiyTiwl1G9u8PCTj7nZvMXenRF7uSFrhSaV2jqyVObk1c9xvhA4nYdYVo71yi5t55p6bLHwAoRAx51eoRUMPGdI7DuEmUTeht7FnM2tLQhFhFjBkup89tnnSIJNv9vHXbi8c+dvMXf76JaApEx59vCIOwffRLFSjk7GpEJKrXSHONTZ2BX50z/9E6LY487rb3LdHhBnQ2bLlCzKY1lFsizh+eHH5IzbiLLPaOxRKq1TKDQ4P78kjAN67SG37+3g+SkXpycIInj+Idvr98jSCFGOePHycyztBlY+QpZ0VE1kKc9IrRQBlckgxs4p+FGXVPDpjxc0mnkMLWM0nuAtYtZv51gOJ9QLDSI8zq+P6PZDEAtEdCgWG4TzBVk0Y7l0MXJd8sUNLkZDlld9ri8HFO9VGbddKpVtJHmCoARcn16C3uGDTy45vf4EMdbQRJdqTuf1u7tUlyGT2TWXF4eIYYF7d2/hLQekiYCqlbFsg3n3jGpZY7LssphmTOYB1SYExxm3b36L1+69ycMHz/ASD88XUPSUxVzhV354hyePX5HEMtFA5mrQZS5cQS+HrRZJNkZkbszzl+dUSwV0WeLl4Tm/+sPvEMXL/1eS8P83QVm0bTIBktCkWLzL+dVj8lZKLlemdz1hEThUyza9zhDLiEkTlWb1DuP5iBiBi/YVhibztbe+y4PHX9JuPyFv1Bh2FjTWbMY9geGoTWujiiZWKOVjMiFhY3OfG/tv8ulHn9Dr/5RisYg713nt/l2Ojp9imAq1fIX19XV+9McPaLQk2oNTZM2k0bxBGr3EDY747ne+x89+9hN0vYwfzBiMxjRqd/AL15TKNv2uQCb2sAoeR8cvURSFNHO5d+frDLsBas3j2fMXZOJ41eogBehmDsMwuDyfsr2xS6c7xso7LN0Larm3MJQO09GYMIy5bD9GM1KkdItvf+sH/OE//QBRO8RfmuzvV/jkwZ/SPle5/94+jx5/RGtzA0sv0Ls+Rld1DMvEd1I2mzfpt5dcD34GqcHuzg0G/YA4MDl8cUqrfoN+/xWlap5aaRfPjQHQrTq16i0uL8+IMpc4DQhCh3q1geFPcB2PUkXl5OyUyeKCjZ0tlsslw+EpUqajGQleViROPUqNAsO+gygIbG3UOXk2R7YniJlJu3vJbJly0R6RKxQIIpiHQzzPJJdPyVdkvvjikBsHO3heTOjpOFGb7jk01ktcnF+TySLLpYdhwMvjMzY2ygyvF7z/xg84Pj+j133GD7/5q/QGU5ZpzHrdpCjnyVlVdMsn8lIMo8RocM3hy+e8/95vEfdGdLp9bNulWq9zctjB8TpomsFJdsZ0MsSyLDrXQ06PZtx9fZ9+d45hZXR6j3jt1jchzTObuBhGHb1ZoN8b8eDJp6xtaXzvG79Dr39Bq7VBGI7R9BKpMKF9scDK2YznzxlOrtBVA0XqcHqWUmtYPHs0Z39/n8ZGhYurl8zmm+xUSsjyjDSFTADPHzMYLhDQ6XS+ZG/3NhtrB9TrEw5fnGJYOZJURyBFtxKePzqh1Coz6HgUdZn3v/89/NmY9Y0tsmzBy5cLyvk3uWxfMppcMZ0smc5cnFmMKBncuFvmjbd3UYw5/+Af/Lf47R/8fY6PT9F0kCVAkPFcj/tv7pImGuenfUbDPoQFCuWMTJ7geQqCDKkgImY2qZKQ4SEjkKYysZCBKJNEMWQJaZohiS5JlJBEIMnCKuUtaJQrBgJlFFXCDx0EWYJMJPSWLAcO46srPv/pzzCNPIpsYtklmo1Ndra22dzaoNlc5+6tJuVyEVVfwbHnyxnT+Zz5fMp15xLHcUjijDAMV17VLCVKE0RBQpZVquUChq5i57RfnomWpWEZGrquY+VscrkCO3L8yyR5nCakSUYYxjgLl8ALmS2HLBdDOt0LXCcgjlfTR0kREUUBUzeRJAnTNLFy9iqZ/kvxmuIulzjehDAMVmGqr0QtggCSiKIaqKqKLMtkioQq66v3JRlhmiJ4Ka4TEIZD4vhf3qsoil8JTY1cLke1WWNzZ3tVMyiuwOJhGOO7Dr7r4nkeURAy6fTonV5Akq6A94aKbVYoV6vk8nlUQ8MuWSi6RKVZZt0uYFkGi9kYdwn/3v/w3yPwI1xvyXIeMRr3EbOYghWztxlwdiLiuwGu56EpKpKgYpo1+mcQRgmRcEKxXESWFuQsmVzS4Isnn3B69ozRdMLmzg3ypTssliFLZ4SupEDKLx5eIMkaUZqhpjGCqJIIEmE4pX0VoKsGQiYSJyEAiqDhB0sC2SUWLCzJ4Hja5p2b/ya5wyMkZEzbIggC9jZ3OE1cZFVHUQI6lxdUrB00wSKVMkbjId3+FflcDk1o8NYbt+lPj5AFk+GlS71RYuT3USo21z2Hze2bXLWPaeTWQczoPv0xV9c6W60bHNzSae6r+MEGUVTi8Cqje6IyXr4g1AR2mq8x914y7M5p1V7DECUi+YRvf/dtvvzoQyq1feobX8fzz/j8sE0SSqxteHzx6U94//3fwLJrpGmP+WICmch8IWJaGo57yXg0Ymdnj1Jhh/hGCsKIJGsgmG0qpsjx5UNuH9zh1cs+SaBTbxVYLpc06hs4zhJZjXjy5Anf+NqvcXp2gixs0NwU6Fy7lMoW00mbYX+AZeWQZJ8o8YkcjevOA2xjD0HpI6R1rELKcOQQRy2WC5fNgwZhPCIjxHUlihWLo1cXlItrLBY9/ChmPOmzs7PD0r+g25uRz8FF+ymFwgbBqYmaWpRLeTr9YwzLQJFNZEVkeHXG8NJnp2lhWBat/V360ynT6QgxEpj7AW+/8TaLsM+052FoDs1aRH/aw3FnRNGAYCny+s236I9dNhtfRwzmeEHIUec5eWWNTBkSCwEPXnzKu2++zpMnTxgMZ6wbB5jWbd64U+LhUZdM6+PNXOZTnftv7fPgi5+jRiWqLY0om5IRMrmW+Ma77+BN+nj9iPXXarw6PMRdOrTW97BadXxnzMlxm1yxTs2sUi6bLGdzqnWDx0+OKZa1/2c5+P/29a+88v7f/Mf/duZGHVwvIxVdCsUW03mHsr1LEgt0+yP8uM16/QaKMWY0ucJUtzBNm4dfvsCd97h77yaKWKVe3ePp84+5fWeXw8Njdnf2CSOf66sJpi1AaqBoIn4U4EUJjUYLdz4hFUfI6Q6FooUoX6GrFc7PushKhoDMjd03iZOITmdC3/lzNurfImeqOMsJrVaDheNydn5FPm+t1tP+kGp5dxVmIEUQU+bOGbJiI4sFEDzydhlTL9Htn+K5CmE8YTR7hZ0vU6/sUSqtc3YyplktkogLet3xiuWmllCViKuzAORLZMOmfTGmWLKwzDzL5TWVUpEkMNjaeI0sjAmSNrIqgmCzWPpIikejUkbJ1rnot2k16wixSLfd5+79Fr12yPpmld7ojDt33ufx44f86Ee/oFJXkESV5lqJpTOhXq9iyjt0h0+xcwq6WmUxCzGNInE2xPcDvKSHKDkMLqvsHqzz8PEzZu4Fvj/GlJu4WczGVpXx0Gdnq0T3ukfeEqk1bAY9B0FTSIURnesxtcqbKEbA6fkx04lCriiQOiq1Rh7PH+P7PqGb8v0ffJvJcEEchkhmlcXygpt3mkxnfeI4opSr4y8UvMWSlIjX73yLRfgK112iZjL9fsrmziajwZjUy2jVC2Sqx1H7kM2dTaKlRLfto5kV5pMzljO4eV9DTOuMpyN6gzFZUsf1xti5DMOwCN0ERTYJohGaZiEkJkE0IWerlEolbLNBu3uBgESv1yOKYXOnQMneJUt0YmGELJSxSzKfP/gzdLWEIta4uLpEN0TiyEcVinhhB9MorL6kw5jdzQOCxGc4GHMvSfh3fhIRRasv+v/t+zEnBWklFJSM0FO5c/cWv//7P6J9LmPkNKJkZUzPMgGFkF//G5vEmYJFwr37b31lJC+yu1ug3XaZj+oIUoFO/4pO5+qr9eecxeyS3/xru1hmnqvrQ7757bdwp3X+3n/jf0yxUCLJXMhkphOPv/fv/iX+3n//O/zpHz5FlCLaZ/Af/Qf/OYVCiyBMiVMHSZLIYoHZxEUWJGRJR5QDJBWSLKNQsAn8FFVVydKIOF6txJEi/CAiTURkOQESQESSFaIoQxRkJEFe/fw0JlWFryaIGVG0mhhGUUIar0JDqqCiazbFQoNWc4tKtUq5kmdre51Go0KhuJpYBlHCZDZlMBjQ7Q8YTabMpgva4zFJnCGk/3Lzo6kqlqpiWwa1SpFKOU+1UCFfsLBzOoqpIKvS6mwhRRZEMnQkWUSWV4Gm1frbZ7FY4DgOk/GqCnI6na7S7VH4FT9zFS4SZQ3DMsnlcqiqiiSIAERxihv4JKFDnCT4vo/vrxqBZFn5aq2uoJsrDI0oiqswlrgKZaXpKnxlIhAnCVGaIEiAJCBIIoqyehZLtNBNA8Oy0AwdUZZQDZ2MlZc3DRf4jornzhj2u4R+gJjJZLGErLnoUgNV0ym3DHSrypNnDymVNpG0GNO0sTQVIYMonJKJMF04tLttFos5SQhxlBFGC1StRpgmZMKcJIpYDmakXkD3esDdN3Xe/cYNri41XL+AG0ZI6mq1WRNEvvNrMS+euvz8o5A0L6O4IYlpsJwGvLYfsHQH/OIjid2DFTB74jgYooZsivTGXX7z/XW+957Onz36BMOoc9st8P3/4xMUVUcURP7sb2xQ/ZX3Gc0d4mh1Lv3RH/0e+zdfxzRNwshjMp7SWi8zHXtsbd/g6OQphlnGMCb0u1CtFukNzjGNbYK0y2R2TbV4Gy8KudMooJXyNGtvcvjqBVGksZz5dMbHfPzkY+6t36ayVmaePmJ/bZv53CRXbJCTM666rwCRShm8gUe1KnLedtnavs/91zf47NFTkiShVl8jjkOeP+9RrZbIF8rMnWvmExFJcchQsHIi/euMjR2F66sOd2++x1VvQqUiEnopxdqA84svMZV9JiOT66sJN27vUquXOD9r06hv4QdzlosQ1xtRr9zEDYfIckCtZnN17lKr24Txgl5vip2rsr2xS71Z4KOPfsJwcsrB/tvMFl3KxXV8V6RcURBQWSwSXr16Tj5fRDMdlkuXtdYu5UKTXv8cSU6Yz11MUycIl/hBShSI3Lx/h+NnL2i17iCpU54/+oLt9XeZOxPymo1iSShZgcXyGRN0NltrjPozFsMFmxs3uXP3Ndrtz5hNDCKni2EpnB8/wTTXuHW7wmvrBqqs8ekXDpGtrri6acjVoE2k5MjbBSbTPr4fsr5eQVeLXLeHSIZPkmU4Y5kffO3XadWmPHzVR8+LjC+mVDdaLNIRepoxHTgU7Aa6NWXe1xFln15ngSDG5BsqmWGsWKPLDD82aG3B8ctjTG0NQ4GdvRsMhhe4yYjhIKHV3OBv//X/xV/synvm95nMOqw3dnCchP6FT6VZQ9YzVEkmHfaolBuE6ZL+xRI710RXqzjLKa+9dh93PMNdTkmkGGPN5ttf+x1+/JM/xzCrKJLBdDHCj8c0S3nqlQN0rcrnj36MZYPnLag16iTxCt8gSH3cqc6k77G+WWbYXXLrxlv8+Of/mFt3brF/a5/16NdIo5Re94piUWc+DojISJKYIAgwtTI5c5MkzUCKiOMFGQli2mAx9SlVPHw/QpVCzi8+JQ3VFTLHBUPZpnPu8/zZh6xv7uE4HsNBBaSU8WiGIKbo2oK93Q3a1xdEWYe333md6yzm6FmXStUln7PIW3X0UoU4XGIWVc6eT9HUPLVajnqtRn9wycnJmGbZ5r233+P46JSr9hfI2MjSFmubedr9NrZd4OXRU8JQxsgJlKoWObvK3Dnh7KJLHOlsbHQJkzn3XvshX356zvpGi6vrQ/yoSxyaVBo5nj9xWVuH50cP+OLBc+6/uYOEwY//5Ix3v7mPrWs8776i3+6ws9MkV9E4ueyQkFGyCgx7Gba9RipMOEHgZQABAABJREFUcOcmtlbh/b+0y08+eEzkJqShQDCrUW161Pd3OX4xxvdGfO297/Do1Rdoqs3pqysMw8BZRnRPr9hobVGpB9jmNkdnp0jyAl22UHJVWq0EzxmDKHDSeYKgbiPrGa1tgyySUARoVHQSZcJ0FrK+VyAOqkymfc5OL5FlldnkGLQRQVxCnYuYtkAmXSOKkC+1ePH0GsNKMa08F+0zlouXVKtVVFliZ7eGKqwScLNZj4UzxzYrIJ/gR1vcvfV1xvMj5qOAankHxbjm4tghlQzWN/YRJQVVVfG9CNPK6J0O2dvd4EZagp98iSRBkoS4XkJaTPFDkWgpomkpj5++xMzJiFIOkEBegKeTKuAGEoPenLPBCW/ffotPP3vCW1+rc3TygKuhwlr5B9h2k+7oijhxCIKAjITxpMfWHpSKdR4/+YxqpcGjB0e8/saCjfVNxpMBsqwgiCKqKvOHv/eCX/utu0xnI9Y3amxsK6haHklSsHMZQSwQeiGu6/Pf/u/8Oqk65OGDM7xZmdRNGfYHjPtjUqHMwLvCUHJIcoZupRBr6LpCkvqrBhVJJI4TgiBCVVdTt5SALBNASCGSiZMEUQRJFMmbGpIoIokKIBJlACl+fMnLsyOiw5goyhBQIF2tsw3dolSp0lxbwzAM4jQlnyuz1lxn5+ZN0ixe8Ra/eoVhyGI2x1t6nLc7fP7gKV4EmiyhqAKGuUpZF3I2OcPE0HQkc9X7bWgqqiahK+pqGmlaVK0CjY14JZYlBUmQCcOYxXzJ9KsE+3A8Zjwec3Fx9hVPE9I4W/0tGTq5oo5hGOTKRRqmiSwr/5K9mUDgLFnO3ZWf8aupZhzHq+c3DELdwrZtbEUmTVPiJCTLUkgzAtdjESwJOyFRFKEoK5zIf/kMpmmimhK22SCXN2htvI4kyavpXyQjygFJ7LFcpszmA5zpKf/oH/7vGHVmCMTouk0gSth2nkqhTr2ZZzC5Jk5CCgULTbKQvuJ+ytI5iCKKCoVCgbfv3mVjfY3BbMCv/dZ9To9f8PiLF0iSTOBNEBKV5aiPXTB59/03+NlHf0oQ6KjJypsaBhlR4LOxucXzp0uWyyGCmCcI/dVUPk2IQx+SlPXdBlfnV3z73m8wXDxHuYYkTUl8D02X8OOA54cnaGaVG/uvcXH8mP2DHabLLqpts3QhlythaDJ6vULndMjN7RtMpmfMPJ3B5ArVTMgVqkTegjgYosY6OVkhWJ7SdhWWywHPX53S7XZZX79HRS/gu6f87b/6lzFsg6vOE7SgxPOXh+xu/DqFvMvLF89R9QrBwsMTxly153wt/2v0wz9mevGcIFZo1F/HMsvI9oAf//kDbtx4g9nikuPzR1TLW6xt6rx4dcTmxg793hQv8FGk12g1DIb9jHphh6urB6iyQjH3OoJfxI2WpFnMt7//TYbDayajkHt33qPbP6HXP0cVNymX1qk2VLr9jPk04s7tDb785AN0rYGzjLD1FrubN1h6U548PkdWJWqVm0RhiizDyckR9dom01nAeDxCFCXu3Hmd8fQCw6hgmnmurs7RpAI5u4q7TMlbCaWKztXFBFMy2LiTp98bsbZWJE5nTMdzauUbvHHru+wfbGKKEka5gCoLnD79gCdPD3EXCZ4G1d0DRCPjov2IDI1O7wtK5U1UscT9d9+md90hiBQedQWUqMursznN12zcRCenbpKrykSiSN6QyCt7kPicnl2we8eiWMyDlDIfhpSreR4d/oSC9jWm4w6yu85aw+L5iy/J1Vv4ypyJPydKNcLREtP00TWDylaLTHBZLiLCxQhRNDEUlappESwWNDeaTC4TrLLBbN4nRmI0DJkvUhqN/z805bjBDJEcdr6MZdTR9SV+NKV3eQnkydlVxKyMqk1obWwwGF4jLOYouk+psM1GYwvHmZFholgmXzz8EsGaUW6sMRx53L73Pqr6JV54wWTeI42nVGslMgJUuUwcLZnNxxQLGlfHAdvbFXKWAQlU81ucnnT45vu/xXyxoHM9QJXL5Ish+zv3WC7nFAsVMtEkSwXmsxGbzW1G3RAvecHl9RkH+7cRJRgNxgTxknQMmmbghBPcpcxycY3nuyDIlIqbROmI6XSGrF3huSlt75JCIcd4uiAIAnKWyXByQt4uo4abPHzQJol0MiFb+YXEmAdPP+bW7QNMeYurZ0N0uUi9WiVMB5ycO4wHIf3hOfq7BdJA4rOPP2X7Zkw1fwdRETk+fUm347K2toZhmfQHA0qFJu3LNq2WiazZbGxsUCnWcYIOmlrl6LBNp/+clAWlaoXRUKI9fIBZbpCJEafnVxzcXkM27qPLJrVqGWeSkDNbnB+m3L2zzXQ8QUwkHv50hJmzyZQenhsTeRY7tw6YLa4p1yXoGrx6NKFU0vAxCf2AyXjJ+toGUbBkMe9zsPsu5+fH6GlMsnQw8wqtcpOzxTnlSp5cLsd1u02rcYFdtpm2Naq1FpJgMxm/xPOmCEqejbUbtNsTEnqUHZF6eYMXz5+QZEvu3ruPGMm47pSLWQc3mZFVAtzEQizOsPMK3qmM66cslwrlhk4QeCBOEYwZV/0hrpMQBAG2qdG5XiKKATf37xJkKx9vSohhJRi2iKlvcnx6xq3Ce3jTNpaeI4kCFqMMWRERBJjMBsxHFqV6RKXcYrEYUigohNGMs8vuCjkTQZyArtfx3CW5nEXoxSwWSwRBQzVFMjEiTjwSZARJRdVC5r5A+8LDLpXxI5EscPj5z07YaO0xn/k0yhZuNAchxQsjJCXCH2UIQsadexWePn1IqVRgbb3FdDomSRK++4P7/MP/5J9RrVnEgUY+L3N8fMkf/8FL7r4t8uroGZa2ie9CsZzi+hFZqiCKKZqmcf+bG/T9pxx87YCDxneYtsfcO3iNL58+4ounj7HU3+Bf/PM/oVSqcXJyRbc3wBlEyKKCIiqkooCqyViWQZZFqJpElqWIiorvpSiit2rkSWWyRFzxQYMQAQ9RFEnVVe+2IChouoZtrdLnZDEZCbKkEwQB/cEh193nq2sEETKRNBVRFAlds8jly788E+1ilWKpsmJmVtfYPVgnU2UUUSJLwPF8ZtMFV8MZodcjS0BktUbXNA3T0NE0Bd2UsCwNQ1dIhAxFklFVDVlc+R5NTUeWZXLFPGbeprHR+mp6Ga1QOkHIfOniui7e3GPanxH4PkkSIUkSmrZqFDJ0i1LZolIqY1kWsqqsgj5xjON4eJ7H3JkxGw8IXA9V1jBUDU3TyNk2uqphl3UkSQJx1d0cRdFXwjbFCX0cJ2OYTEiSGFky0HQFO6fRqLfIMigVLcprOfx6gXv37vHxRz/jD/7gDyhVmvihjykGuP4VyWjG6emQb3/zW+xu3SIIl2xubiKbJuu1XQxdpJDLkS+X2NjZJVMUnGiEkKRMxi4bG1v863/7WyzcCcPhkMloxmTS4Lp9yY9+MifISiz8HtosJQ1nyIqB73icXirESYlv/2CdJErxQ4eUGMeNEZUYQxS5mvSQkjPspY1zqVKXopU1IYkI/AQhrWPYMvU1iaPjjxFjFdNogDKl3tqjWIqI/DnlWpGP/vwhqS9ya/ctNGuNUbvN3uY6vdFj5hOZW/u3EKkS+zOmixdkqcLPPvkT7u7eRhElahUZ3z1HE99ib62BnKqct88wTZneWcT22msE7oKTp22S2CRXLtAslnj8PEI2dHrxNfvb75O3qvzis0/ZGm5TKpUQ1ZBqbZ3r3hPmyzmV4h4CElGUYVoyr15eUasXya0VCEMfQVRRjCmjaRtDLVCvr7ZAm1tbvDj6kkK+Sa/fZTH18Lw5tcoWzfoehaLB+fklslwjcA2SOKRZu0nVfI+/8Tv3kPUpP/nJh/QGLzk8/JT926/zjW++w4cfnrG//R4XnZ+jaSaGPWc0uSBKJF6/+w2miwv6kzN0RUWWRQb9iCSBbq+NLGnkcgWyNGUxSblz6zbTeYcohLyhc9UNaNVVAgIEy6I7/oTlg59wfj6llEWkkkYm56lt7CG7CV53TMN4jCTfYCbnyRca5AyBx8/a1GyRVCqxtbGDZklc9cbcXK9y/80ZT84+YeoZvP+1PYqixWDSo1y5RbxYcHF0TaFgM527uNGEgtyiUdYJlAW6L9MPO7gJ7JVt9LzM7VsO5+dLVLvF3q7BbB6hJFUWzjW6nSNjgiSpHNx8E4OYm1t3WSxnhP6MF8cPscwyrTcN5rMeT5694uadu8h6RjBsc346/YsXlHmrhFGucNVecSarzQJZbGIbVXI5G1mNGQ9HXF+7iGIfOw/d3kPKFZMwrqGLKgt/wXX3AV7YodW4we2N27x8+RLTMpkuTQRJgriK64fMJ32atdukWchiMqTciLBUi6LdwCn20HVIY1AEFdEE3crheANGoyFb62/gOANGgyWtWpON9SqnR8+QpAKWqWKKN1AFD0nuoQoi9+/fhzRlMgqIkinlQgurkPH8+Ssa9XWidEJKCmmZKHbpdjo01i3SYJ8kgr3WTZ4/OyJQQ1p1m6U7InTzOGORb779Pg++OMK2ddY2clyfZ8j6hMRvMpg85dGjC6Q0Y29XxvUkOlcmV71jdBvSTCdfsBgOz/kXPzmnua1SLu9w2T6nP39FEE0pF97mxYtD0jRlZ2eX6TREU1UEEi6OZ5iFiPnsIYEnUyrqPH/6OboVUk4dXh5fkrfr1FobKGKRNBtQqtR4+eqcTJiRaiVUbvLamxX8VETSEi6uuhStDQ52ypSKUzw3Y9jfBmWGH7v0umP63YTN3ZQUHzNXoWzJ5PYVXjwZIMtzLk7nHNzYYm9/gwePPqFU0Xjt7dv02j2Wrs/V1Yg0FfGWAb2gg5K1uLrsoeg+zgLGoym5UrRqhREkOhd9nMmU1voa/Y5M4llk7pxcroFkNDg6O0er6jiRxIuLCZIWkgQKaTKmlG8yamdEjohhuXheAiPQtRxnp22snMD7X3+LSd/HXebIUonlLMSyCxy+GhIGc0RRpNoooJsqJydjCsUUw1b4/OEHPH14ysZ2HjETgALFcpnJ2MWSG+i5gNEoRpRHtM/alKsVPC/BmKf4foggCiiywvp6mUtfJowmNNdaXF4ukUUFy2wgMkXAQBUDUjkgDiNkRSFIUkqaxOnZIWWrghsGLGYxm2v3MdWbDBZTlq5PFCZIgkWSLlhr5SiXV1xZZ5lycd5mZ3edgrnN629fo/+jHEniIyoRYaijGTE/+/Fj9NwGu3sHqIpOLm/gLFIERSRJXWTRII49vvj557S2bvGzHz1FeL+AP/NJlgnX/Wfc2C3x3vs3+P6vFGhWtzh8dUGnN6JcXufwySlXF0ecXPuMh3OOX12jKjnGgwVRnKIoKoZpE0mr9beqqghihiDESGKGKKiASBYHkGWIokCWZETpKuSSZSBkEm66RJZl7FwBSL9anwurCSgiYpaSkbCYtX95Jo4mVxyfpIRhDJmIZVkowmpSVylVKJWrmIUc2+sNDEtH0zSIAnwvwnMjHDdgvnDpDn3iOCTJUkzFQlEkLMvAMFUkOUWUYmRFQJYyZLGAKEukgJBlq7V1mqDoMkWrQKnaIEtXneaQEscxnufh+yGL0GFyNiTLViltWVJR1dV0MZfLYVkWm2vryLJClCZkX/2OgiAgBlzfRVhMV5PLNEUzVhWjiqJgWRaWYWI2yshKhizKKLKxuofQw3EnJLHEYjxHs2SCMCHzBFI/JnFilBKEaYSU6ZiyhqGbDIZt7r75Nn//f/Tv8/DJ5yy8JfPREteZ0pnOODnrQSoQxB+CKGFaOTQrT7GoohkqsqogK0VKVYNyrYrvr3P77j363SXr9QLl3/CZTxcsgxlJ6DDqOUSJzt1791iES2RBxfM8kGJsxSSVIx5++AWtQgMkHZQizQOF4OU5sqyBrBPMh+TsmCgnc3V8TKu+jarUmXs90iCidzXFyC1w5hHhMsf9t97j5bMv6Q6GGEqEJU6o519nOWpjFEJ63RNu3ngHS5+wsW1zdpTw3/xbv0PS7zJ2F2QqDGcdRCMkCzK+ePgBtc0W40GRUjNjY6POxVWAnKtREFQUJWU2H5EvOMipyenZE+TK++w2yvjBFYORQaO5Dpgsgh6GqVIsbjGezPADkUKpiW0Vkes6G627xBxDYhAEHoZWYRxccfjsjF/79R0UfcJkPkBSQhbuEWv11wj9gFyuQK/XIU1TbFsmDjKMksXNW01Kw5Vd4tWrV+ysvcv+wU0av7vD0cljfM/BjVLc6YSq9TrVqk1/WCSOQxxHY39/m1dnX3DdPSYIPKLYxzbr9AfH1Oub2L6AKubodPqkSUTCBF3Lc3biEKUBoqBi5l1sPUerdIdvvXebf/xP/gAlUuhxTRBUuPY6CLFLKkscukfMX7X5r379a3hel0PvLVprOqenH7GcKORLLpkm0x9cosgTmMiUy0WGiwmnFz2q5XfZqeeIhBGnJ+fUijd4df4Mfxpz99YdXhw+ZKd2C6u4R/t0xs7tMp2rCUshRVZhf2Od3nJEppQZHg8ZLB1ea9Qg0YmWPq2WTbBUkeOMKPU4OT5h2p2TZlUOD1+SSRJepNMo6UhuwsXwnFrRpGwqePOQaVekWdvAC0//4gWlGKvopZiClMdZwnV3QLGQI6cWEGOJznBCLJ6h6jXqzQIXpz1eu/cGoaPxyed/zne+9etIYoqiwrvf+BpPvhzw/PkhqpowHB0xGncw7AhvYVE0D6jWLS4vjznY32ExjfHmOtVyhcUkoVKx2Kjd4/KiT5BNSbM5VsFk0I4pl8tous9wNKdWtfH9Po5nsbd3wGS65OziEIEpmrbJbLIklkLmrkcSxVj6FjcPWrx69YowVrhz8xucnh4zGqbkKjLedIZlVFcHlthC5BlClpG3RN5684BMSpnNZih6wByFu7fuMJldM593eXP3bQJnQJYtaNU3+MXPv+DmnTL7u29wcnzB/q0NpLTCZBDgLPeRzSlJEqOxTZodEUYm87lLr3eGpMRkk4TxwKVQ+gVJMidYllElndmsg22bwBLbtrH0HJs7OX72k2d0/BO8YIyRy9MddLk6dynVpySiQ+Dt07n2GY0usAtFHHfOevkWnd4JSVBAM8FLOuxtv0mttE3mhyAP0Uop4TijrO5SLg0gKpPPpfjhiPb1lEpVoapsMvd9cjmL5uYGeeUu/d6ci8sp+bzFcDjn5NWQcrHA1cWc2nrC1J0x7otkjFFEC1MTCR0XLSews1/An6XIxgIzV8TyIY7yXA1mlCtrqILDYrSgvtmgNxgS+TJWatJuX/Hm9hvMgikbWy0ePXhMNNNQ1RCznBEFEraVJ44WJHLCfL5kMS0gpHOyxCeKVWZTn3zBYjKHQmGFd8nbBbqDLmEQE8YReiHBVm1msxlb+wWSKCUjh2EmPH12TC7fRNFDwnhKLDiMRwk37t5kOBwTRR6WbSJLIVkWk2Uyo0EPu2GTy93g1csLbNPkun/Gzep9cmbEwnXQVQ2fBBWLJPGJIhlDNph4c8xaGdnq4S9nlIoVJosBXjQjTDySJCUIQgRxyr03qqSJSLlSpNXap991+PDPP+ET/QFf/85N1rYkhp0qyH1EEWyzwtPHfX77r71HrmDSv0pxPBeroJIkJoK0WqWGYcjtG2+iaQ739t5Alco0D4pcXHWobe/Q6zl88VGfnKkzu76mvlZjrbVBt3/J+9+o861v1BnGU8hUKoU1HnzxgjiSmE1CLs4GtK+GRKnM8dEFo+EYUdLIWKFBDENDElb96oKYIckQxyGCmIGQkWUCSZIhiApBHJMkAZAiSgICImmcIcsqgSiumoYU+5dnoi6JKKKEKECaxpCkpGmAv/C4ng65OHxJEAQkZMjqCpyu2w3yeZtyrUyhUqKxXsawTCRFIxMgdXxc12fpekxmS4Jglfj+L0WuLM2QlZVVQjdULENHVyUERUDMQEgDhAwQ0tWqWkjQDQXT0lZpbvilX1LMVmGfIAiZzoYMR91VpzurVqNVuEdEliXyha+8l7aBpeu/xBgFwcqnufAdZss5SadPxmo6KSmrfu1qeRPD1tBzOvl8jgyJKI0Q9Yz3vn6Pf/bP/oAw8chEgSxzAQ3Pd6kUivz5v/gTdjZvMp7PMOwcMhJ6XkQtFsm1LGQlIw0SSAQC3yGYTzg/XhAHMWkWEESgihqCEKBRIlRDLFumZufwTIta0cSXdhHCJcI9gSQVcBdzMlRSHCyrgGrGbNSavDp9RXt5gSS8znjRw/eWqLbIHvoqTJaGqLKKrkbMZzFCoDNoX7NzUKc/aFO06uzvlPnFL16gqAapPUbPDIbOJfJUQUwXbK3d4bp3SCFXZDpZUDMbuOMJk0Gfpt3km/fXaN56l1mxx3e2vk4Wzvj4yU/Z3lrnkw/G9AwP08pz5RyxXTigfz2gXCwQxEXm4wF2qcpsOQFxne1mHlkK0cwCjw+f0Ki1qNZyjKYdCgWN3uCC9Y0qQtYkVxiRhU2yVCRwDQI3JUkWPH35gHs3fsBo1CFNx6RJhpZbcHb1hFKpwOnZEaKQYBtlIt8hSQMQVDx/iG2rvDq+oFotous6V9eHTCcOkpCj3f4jEs75oz9ZkkUGG9smm1sFdEFCEwwE8RkvHs0p1Wu8PP2IODLpXPusrdc5efWCzc0dbKHKbHHJ0pkRxUs0pcbYP8Y0G0ynU1Q9Y3ftJv3emDAaUy1XGE99yqbMuD+gYFWpWWUcLyPfuouazGlad1guVEQkRBFG9+o8MjXC2S18hvQen1DQN9i/tcHV0TlpcUkWZ8RSSpy94uI8T75gkq9uUF8rMFoc4k1kNKGKkE4I5j5bzTc4vfiSenWdgiHhLhYEfpejp0u82CNfa/LwkyNydpFZOCMLOwwjD8NUGARLlEBBUGZcnI1JI5m5sETQM+zSOoqV4PrX9CKXaBGR1zdwsTkf9thv7RF2F5TlTS6OB9SbNnZR4fHTzl+8oNzY2ub07BpJqZG3BCJPI/ZCzqZXuMGMzFd452vvMB35jK8P8ccS3tzFW4jcu/kuF0dd3v/GO1xf9zl9uaBg5DDMHM5CQrVqVOo5vKWEK40xLIdxNyRvl4jiEY2mQhxXOTrqUKrYyFmV6/Epg/kSRVugKhbDYR/VkFFUg/6kzcaBgTuXsC2RwWDJVFQR5IBi4Q5WLmE8cjBKZRzPRY7BsGM8f4RlbHLn9hsslkMMUyMOFfYPtrh1cIPDlye88cbriJrDg8cvaIk72JZElqZEcYRqDOgcDtBNMHWNyfia1lqF2nqeWLqi305Z26gSeC7f/e679IanXHQuMCrgLmKSaEyQzdi6ZaKqW1xe9JksX6JmORRZZjptU6nZJLGIqMyJIxVR9lFUm2+890P+0X/2T9jeN0mULqqxTUPdxI3PGS8dWts11ktVfvLpY86OL9i/fwfFlOj2rtm+YdGZdbE3DN65/T5HT04ZzDtct5+SBBnb+2s8edlho1nHVizax6/oz86xinmkVETWzzDtMpnUZOpeoRl5JtM+zeoegZ9ycTag0txmOQtplg6ol9dw5gv2tzaZTOcodRU5DZnNA6y8xrSXIZsGmbggWGgYikIiy0REEPh0u12W84y79/aRpBmVRh4v7JGFLp5rM/VDVHNCQ2qsvHCJw2V3hK7UKeaKSJLHmuKhtlJ6S4duajPrBShGhiYGzEdLhGaJQrHMcqDT7S/QTB2ZGAGFJAvJ0oBgKROnAsPZCdVSCz2XMVp2WLgaZ1dnZIJLwSgxdyOSOECWS9y6/Sau66+QV2JGvtBif3eTaCmjymM2NgtURtmqxg2ZKErQ9Tyt+i0ePvoZ+Vye0JeoFEskYYqqhYhBSiKGyJlMQoYgQOynCKKKLOVWCCNZYDnNSLI8k4lH6C5ZBh6LyCcKIPIXLN05CBUq1QJ+sKDWlChelPjk56+4c3+H2/eb/PNXL6jVi4TRqmtbROD5wxHf+e4BF4d9FosZpr1NlrmImUiUuFhWCSd1WPod3v7ON3nxZIQ/P2Xsn9KyvsN6w6BRsgljj1enV8hli4vnR1RrCuNBRBLoeNKM/rjD1oZMIibcf/cuihLhO/uIgkqYJgwHDrpm8eGHHzLsO5BaHB9eIoowmTo4TsJi7qDKGrJikKYZmi4hSRmqGmMoGoIg4Ps+ksBXDUQrVJDEEjINMVV/eSYK0ioAlMEv0TyiJCEKKpoiEkUJpqQRRi5pIiOICcHsgo4T0D6JyBKZgBRJ0MhbVbS8im7YVCpl7FKOSsXCsIuIgoosK8QRuL6H7/vMZ0uuLodffd6r9bMgCJi6jGqsfJSWpmJqBhIZqiyTpCnIBooWkMQuSSYgIKHJKlbBhExElWTEr4yiK+9lirNY4sw9hr0pQRiv8ESAKIq/bBeyLANd19DNHEImIcsiomiSJSnjSQ8mEAQBQiwgqhqpEJHPWyiCimKqZJmAEEkIskKUOCvAvqrQ6Y5xXZelM+Wye0LqynhBgB9GkGao6ld2gFwO3TDQCyKGZaBZOoZZRlFW9xLHKVGUoPgCi0nINJqSpg7L5ZQbN++h5FoEsYOISL1awwt8AFRFhCziRz/6gH/6e7/P19+8z8HNJl98/hQxsVEEk9F0AmlMGidohs1svCTc0Cjt5jl9MsJadNlr7jMfXPPxj4+JsgTNNpBkm0lHwS5V8dWEolhnMfKoVW7hKSPOJz6K7aNYKuuNLX724HM6ccDo6QOSOdTzX2IFE06OnnK0/T5yK0EmjxLqvHnzTXr9OZXyJk8fH/HeN++TMSYMF5imiCyK6ILBra3bpHHKbKbyjXf+MrI54+T0HNMuUalUiBwB3QrZqnyPpy8+BTWPpVk8efpTbt+5z2//+r/DBx/8CeWKRRjPiFOFFje5uj7ECTcoF9aYh1/ipiLT8xmN4jaL4ZBKKU8cLtjbWadQLnFy8pDtyvco5K4wcjX6kzxn7Qk797Zpbhp0230ePHuMqtmkYkQqOjjLIbs3f4CSnRMIz/DDGdOjMq36JrVCnX7XZTHvIwgpghCThD5eoLNRtdms5rm1812ePX6Aist5p8/62h6b5Sonr0Y0GjmenJyQWjXSSKacL3N4dMlkYrCzscH5cZtiNWA6aDOfJmRZTKW8gWFUSEUNd6aRag6jgYKZkxhPekRxQq1URDFk8tYaohIxG+ts7eSYTRecnR+SOXDhfsbm3fvEkxnDYQ9JUFhOOoiFdWytyrw/QrMhFpbokoIcWxTsEG8psZmLsWo6jjvlxZcBlQ0bbzZG9Mo4wQhTbxE4IaosY+VtlosZ1kLlh9/8HmauzpOHf0i/P6N5J0/OrBBGCQf7b//FC8pXL+cMJiMO7hjYuSKT2Zyryw6aGSAisXdzVfmkiDkso4qx65OlKuNpGz1nEoQRjx49pVpp0O68ZGt9h8BVqNVMZLHM4xcP+K1f/x0mowWPHn/I67e+gWkbPHj8BbKSo1o02dzYQpJhuQyZzefoqsXCGVJeX2c2VckXBZbLJd5S4XjmMZudMh2nvPnGW8RSj8SFNGVV3ZS55Ap5dK1BuZ7w8MEzarV1slThqv2YNFEw1Aa/+7u/xsNHX/LZg0+5fXed/tBh4v2URrPB8fMzsrrOa3e+zh//6T9nY6fA3s4+3f4pO3sGs75Nt3NF3iyyuWfhOAvm7oiN9SqT4YBWbZ2Cvc319YxXp0dUyyXiOMa0FEaTHooaceOmydOHPQ7u5jGsNSQlYjDoIysJa1s2mrjDdf8Jy/AJqTCmWKqRIaHKZRTDYz675MMPuqxt30BKpwTxkGKhSn8wJg1tpKhG94WDVIjZ2m0y7HY4PTrlzXe+TRhMiF2F4fCa3dsyi/6Y8VgnSmPW1m6g5wNCL2Cz+W1yuSYf/ewXbO3c5OLiHFvfolQq0R3MURDo9B4z6SwRBIWT3udAShr0mY08kCAqSyhWxHA4ZDkLMfMSfhShmAlkBotFB1MtsBgpqKKMpcscPjmn2Sxy584eSaKw9Lokvoqs6QSeyXnnlMhTUZUCZAmCInJycoZtmHzW6eAGC8KsgJ9CoWihmTmuz3vk8lVylsF8rNLakDHzLU4uzwlCn2LJIIhCZLmKE0xXfdCqTZIoLJyAwBPpej28wMVQTfRinXnWxdAltjbXePriIQvHpbXWRFEU3MWSTqeDqli01tcRlRRdtQnDAbqhIaAgizaJFHFw84BMnHB66mDbeep1mdyJSG8cYSoF0ixBVjJETFxngbMMODjYpdFoEIQeFydLxmOfLO0jJj7LICAIA4b9Hrv7OX74G3s8fXTFeHrFt7/+u3z5xcd87Zs71Bo5LK3Ab/7mO/z4j4+IohhSDUHKkBSPJ4/OSJLvs71bxzT1FeAagTiOMQyDxdjBcYcUKwqHh8/JVwXCEHLmJrNJgLNIKZWnLNwUsoBZPyBvFRlPjtDlTabxK2ShQJT49AbHrK9v8vEvvsAwZSrlPN1uF1mVuXnwNnEace+NKmauzPr6OnnzV+l2RiyXLodnJ4zHLv48jzcKGfT6HJ9f4AQK426AgE9GhG5IkK1wQIYJUeSjKiaCKJNmwS/PxCzLkLGI0giIEFJtVTcr+ASxRJqExAnEcYooRmQpyIZMkhnoeQNFFglJETIQmTNxEuT5Fd1rgSDKUDWTNFl5uU3TxDYtarU6uUKeVrnI9loDw7RJBRE/CHADn8VshB/GDPpTLtwAEhCFDN3UUBQZRdcxTBHbNLBNC1PTEKUYQUy/wguxSounACKyLKMV8liSQFUSEYXolxWWq1W/ROBHhFHEYjQlPL8giSEVUgRJRJA0TDtHoZRHM2xKOYFMTBElizjOKBZtLDOH54UYhrGCtosKWZagqirTicPCmbO+tkmdBqqs4AU+cZSQxDGBG7Kcz3H9BZ3ONdGZhiSDLIOi6WiqtVrr2waGqVHKm1jrReI0QVJ00iwjTX2iJEZIFQQ5wwt8VFlBUSUuzk/4g9/7L7g8PSOLY6xKi0QJUeQqBDHxfEyYLQjTGFGCKHAoFvaYzlLOJksamzVCb8xIrEMSYxYqvHXzHQ7bnzOZDvje177P8yPwkyFN22IychFth5PDMzZ3i1ydd9HUMrOJQ7km0zt5hiqtYdam/OLBR9zZ+g7f/1t/g/bRpzx6OOX+mzvIus6L5z2sksB0MUaz4fj4Fdf9Y0y1wmuv36J9tbI+zOdLDCNaoYom18zPx5i2TTHXQExMFEEnSke8OHyMkRNxEw93Bj9447eRFx4vzwcEi4DAECmX7zGaXeAtT6gUy4R4zMMplfwGl2cTKuUNRLFPmIxZ27zLi6efEDghWSrjzkBsDOi0YypFga3WLR49eYiirBO518w6S/a3bjBxr5HkHIJqcnL+Iz7/osTbb/0K51cVRH1Jp3tCGis8fvCS/f19ms13efriAeXcTaazHnvl23S6x3znW79FrvE6f+Mbv8oXP/3PGXTmHPd6VPQy1WqefK7C5eUlxZJNoC6QJYMssdiq3WHhXtGdv6C29i5/9dd+lzDq8qc/+X0SPKIAkkzFMDK8doYoOgSBR787ptHKM5u1GQ5MvvH+24TRAm+uMewPGA2XFO1NJnGXNMuwxAa5hkqQOUxGE27cfYN+LyDMIhrNTa7bfVRNIE1iKpUKjqOSeGMuT0NGT55ye7uJLrkspx2SQEO2NCq5BmNviWaaNGprIA7oXj+koFv8/Bc/JkoNJr1zahuvISs5Xh1d8N6bX2c8/vQvXlDKWkoqRjiux2wxZzob89Ybb9C7dtm7v4Giyjx8OCTVr5gOfLa2dghDl3qjgG2r+E7Iwe1tAtcA0ScTXObzDNUQcJyXbO9WePD4AbPlJZJW4eTimDiOKeRq5Aoio9Gc5WJIPl/GMvMImUWn18FxpziLI3a27pGlS0bDCaZRwPcd1lrbFCwFP3JZjBxeu/M2R8cvCOIlfjTHSmtIis/ZxRGqluGHE657U1x3SrNV5MEnVwTBBMsw6VxfcOfmPYolg16/jivAjb077N3SCR2FvZ1d7r52l6UTUqzM0eU81laApd/BUNc4PTxkvZVjOg+QRAXPc9hau0WaxVilESWlwHwxoF6v8OLFQxqtIobdIA5s1rcVcmYRdz5iPp8RRxmRb3Nj/x5BNGfs+BwdnfHW23dxPBdDF5DUkIvLU8aLAZvbVWbTc/zcOjf29+kM59y/vU1pfY/LRx1+8eUfYwsJ5y9TNEnmN3/7PsOhRsHeQF9TOOuOqZQMJL9IFC4oFnTG8ymyt8Hedo1Xz04ZTS7IGTqpMOWdb9zl/OkpoS8ycxzmgyWldYPGhk5/eI1s+dTqNvORi2XlCdIpUVoi9fI4iyWiAgilVRd6NAAScsYWkqSgmUP6fQ/TWpIvSnQ7CtPJp+zcqpJECuPlJSVlDTnfRkkj1krfZTHxuVbmzGdjRuMRj64nvPf176EXN8i8gF7nEfdf/y1UKyR0fRq1XS46j3H8GEUq4wZTSD10E9Ikh6wlGGbMpO+gy2XS2FrRDXpzKpUVC1EWZIp2hbVaC8uIEeIck8mMjbUNojhlPp9z+849xqM5V+0zylWJMAxXuKJlmSRN8f0AURRAiri+ajMPPeJ0hqbqaKrB5SDAKFlIRy6ilJDGMYgJgqDhLGN03SBNJPq9GaomYtktJlMfQRCIA5fRbIyii3S7XX7lh7cY9mbcvvEWxycvcJwRqqriOV91YTsLtm9t8fWvH/Dhnx9RrpYIIx9NFzk56vDkYZvtPZs4CUjiFFGSSZKAMIxXkzuKqKrLs2ddbt4tYOklBm6PdvsL3n/vuzz8vI2sOjTLe1y2P2N9c4s4MhDyC4KZiiJYJJHK+fk5oigjiDJBEFFvbOI4BnYh5ujsS6q1PPlijsAVyMI6g4XHZBRRqG9iWCe8u9vCWyrcu7/J44dH3L3xd7g6mRNJI85Ohhy97DPouYRBxsXFBcOhS+BnpIlEvmAg8C/B5rIiIqkTdK2ALNmE0YxEUCEDQUyQFYEsBVGU8QNnJUIJEQQRBB1vmSIqIpmQEkcehi6hKmUyEkqqQJKuoORpvCQJXEaLHldXz8m+QhdlooCum2iqTqlUo1QqUaxuksvLGGsGuq6TkRD4EUvXIYgSFrMZYyfi2hsRBoAEhilhWBqGbmOawipIY5srkzoxAiKRnxEmCREqgiAQRV/ZBoixcxYFTUJVZVRZJ80CklggiRL8wMFZeiync2bjAdehRBxJCJJAEvs0a3V0LUcQ9kmSr1L7QJpJpMnKSvD555/w+lvvMBiNUTMZJBHEbAWel0VEVaRWrrJurGGqFopiIKHguQ5x6LJcLgm8Bb3hkC/aHRqNGvffvkPiJl8xQFNkWScWfNIsI5+3WUxn/Okf/ykffvCnxEFIuVjCc1yaNZswmCBKBnu3C7w8/ohbu28jSR+SJgkZGdeTV1hbbzOfhatNg28iMCVnV9hoViH18BYz8maF2eIa26xQzbeY9o4p1m+zDF9i5XTSzGN9bYdXL16yubbLwa0WLx9+gWJ6NI1NWr++xbTf58tffIFaUhAKQ54fzhEli9fffpOff/IB29vbrG00OT8/ZX19l0qxwmzqoms2hp7j6rLLnbs7DLoBxYpIFhRQihEff/Qlt+5scnR8iZ3TEaUIWSzSWC9hJ3usV2I+/PCPmS5Ebr5Tx7ZvMJm0EVIZU1bZ2rzF2XiCM+9w9nxOrV4hZ2d0+21qtRtcdi+IkjyNxhtk1hGVZgHBAFGf4roR7ZMeG60dVDlD9rf4je+v8+Xjn+I5HSSlxKAz596dr+EEc54f/pyJc06tdJM3bn+Tzz77jN2dPSqlKoIck7NNQKCQa2E3YuTiBqevDvnoz/6ISnGX22/c5ua7r3PaeYkoRWSJSZoumS3aFEsHRGmP+bxMrdpAUDKk0GC7tUYSwNbONp8/PESmwXatQic7R5PzLJbn5IwyS7fPeNKHLCYJDSChWtgm8ie4wRmNuoYmV0n0AqWqRJrC/tYOotBmOHQR9TqqvuDhs0fYSoONnT0yyafZWGcyvWBrbZcoO+P8bIRlahimwX7xJqOLLte9EDNfQhBD4iCiUfQomi18N6Z9/Yzt+jtsN1UEa8CLZ0t2d3d4e6PB17/3rxErVf6vr/5nPPrsQ9ww+osXlFfXPb73Kz/k1fER49GQm7vfIg5iwmDCq6MHqOI6vqMihhFbW1sM+w75coAq7bCciEjygM31m/zBP/0jai0RTajTuFXCUBuct4/pjwbYOZ3BGHRzQBzPKdlF5vOIvF1DkmOQPbIsIgod0kSk2aggKVXIJBZej9gdoqtVqpU6o+GSxWJMrmQyX4zQtT2Gg4RSaZ3Ls94qMWnk+ezTL2ltWjRbBppqc9U+xVBriILEi+OfMItSXnym8iu/scbd21s8+OIVse8g5RWqjQLeskrMiHt3Duj3r0kTkc3668wWfRqtPMNewNK7ZG97i5H7ku3dJt1uF7uQ8eLo5ySRw+ZWnc6wR8IM98KjXK7jeROEZEAaz9nc3CRNEwR0bu//kOHoklqtxsV5DzPvsLO7jrP4f9D2XzGW7Xt+H/ZZOeycQ+Wqrq7OffK556Zz753E4QTNkLTlAUmDomVJBiTYsvViwAYMA7KfbMmAzQdBQ0o0RdEUh5wZkhNunBvOPaH7dJ/O1ZWrdu2c9157r7yWH+roEvATH8YFFApVKKCwdy381nf9f9/v5xugJobk9TxxmEQ3BYykxmriDQRJJS0vUIUJesFhdBnRP3d5+uR7lIsVdm9tslfdBUVgc/UOL/f/nNl8SMK4SRAETCYumnZVuTlfdEjY0B9A6naNP//Bn1MvryLELoVCASOd5NXBSwLLwkgl2b6+woPma2w3RiOgWk2CoCDFFpVKFmtukzSSzCYCTthGNiGRKDGdOwiShOfFpLI6luthWWdoqsncdZAMg1jK07jssbZepX1hMZ2FuLHIbNInn60hSn1ur1aYDb+AuMO1SpWtQoLEh+9z0ZriWCOyyRB97Q69wRDT9UFIX2F6QpNsTkWXiyz9NrpqQKyxtp2nNWzz4vUpe+s7xL6BbkhEPlTLNURpgYRHqZSjUlhhNpkQRgHrK3n2D15DBOl0lvWNmzx9+gUiEumUie9M8WMVPx4zm3tI4hWcOghc5tYCOW0iCyrzqYSqKUymNlq6gJGyEaUIQglJlPBdB01W0NQEgmRzctLCdUK2tvfo92Uk5Wol63sOlj3D7i9JpXWQBgjBbTrtIb/167/Np599QrmYJ5E2mcya5LIF5oOQG7fz/PD7An5oQayj6xmm8Ygnnze49/ab6IaMEMdXwG4pJvR8wugKt9PttdjZucnC6nFr9zaaUsB++QmOa7FzY5V25ykLN2D35k0sa8pgAJlchWQiQpUlgiDi9s17FPJFFMWg3W3QaB2TSq0iyQus5ZCwAxvrO7jOkMlkTr83pFZP0mmcEToqXjQhmcnw+c/3OT+2EIJX5PJl8rkkot7gnQ/uYU00ynWZw4NjPDuNF10yGxq8PnhJuzn7xUy0JhqDfsRkMmJhuRh6lkDoIMsqpppBFjXiOCaOAxJGAggJbQnkBaIYEgYhghggRBqykCPw5njqiCgCxxOJRQlJVBBi8arpRhfIJK5W2wBxGCGKMb47o9OY0DjxQYYgEhFEDV1LYRpJCoUSZsoklUlSWy2jqhoJLYMQS18GZkZMpiM8e053dPUQEEYCESLqlzWUhqliGBqmGqJqCpIuo+omoe/jRQLLuYMogh9NEUIZ318QBz6anEJVEuSzIIsJ0K6uV9eB0EtjaCK1epXeFwPUtHiVGEdAFL5sJhJDFss5tm2jKgaSFl49aMkSkizj2j6B52NbAWEYokoiogSmqZMt5NFNk3J9BUEC0zRZug4CCpK4xHUiRFEmEq64xIm0hiaIPPzsAX/+r/8V0/EQRQDV0AiCAF3XGXbPGPR8MsUZjfYQL0rQbV4iCwp+HONFNqosMHLbpJI+caRTKG8R2Psc7L9EGyVZTEJSlSWJxDqvXj6j2XG59dYa01FILEyREwnchcrWyn0S6ZDFwsZxpzh2mXFkU0Jj4fpM+8dYbZ3ClkHj9AvGgU9GSCLGEeenPfZ236bX6+A5S4yUgOfmsK0FhpGgWjE5axzxrW9+k8vLM6zFOZpeIFbnnJyMWdu4wXTsIEoB9ZUCx8fn5AspLht9JuNLQrnC9rvX6A7miGYZ2YypmDkODrqUCrfod7pMh0s2VnbYfmOHV0cfEzFHkTbZqBV49eyAXKnAzDtnaU3IpmpMpz7Lmc8yGqMqCRRFwQ8sfvL5R5wc3sHM+owGMdXVFNm8Q7s7YmN9h97wcxJqHiGMCF2dfLKMSMRkOOWy9wxFLhNGC3Q1j2BrFPIqdzeuY72Z5/DynGnn+6yJAYlEQEdyGPdD1jayrK4WKeRTSEqBfr+F7/uU6nn8Ycz9W9/g/PyMf/ndf8Czlw8pJLawpxKGWsN1fc5eXPLmvW9x69Ydfv7JDxBJsFIrc9EcMHUP+OTBAdXyNQr5JJfNYwrFLNOpRUKWkOUl7UaHZGIFdBd/rpLN5knGAa61QE/4zKYd8skVHGdCImNy53YBd+kjyx6HL/ZJKiW+/uE9Muk8+yfPETSFRbuHnnHZKOToTya0L47ZWKszsqeoQkjv/AtCsUL06Mf0l002tnMsHIeXH3/6ly8od28nePDZcwp1j2xymxdPjti5bpBKyOhmHse2yOZc/CCNIMRXN0QhBchM5kdc373FwetzMqkqS6tLIASkkhIXjSNavUO0fJrZokexUEdVYjzvqks3UEIWksfcnpBMmoRezNSZcPvme1xcHhJGV8bvfDGFqq4zGQ0YjlqsrK2zWKSYTLuY+ha1Spmj48fkCyli0adY3GbUt9naqaIaS5ZWSGfaIVcooBZ8zk4b3L6fZ3s3zy99uM1kIPL68DELS8J2ZuTSt3A9i5l1hGJGdAYxC3uELtcZTzuMJl30xA6ymkJSXMbeOZ5qcdQYMB112KyvcH/n65TSdQIv4OXBIcgLXp/sIxSmCFICWdXQMxFHnSMKRhLbbbJc1KhXV3h58JBsIca3y+h6ge7smEU3oL6W5vmz11SrWTTDxzRVJoMBCTWBKxrk00nENRcpnHBro8CGrbBTvs9HwwELp8+nn07ZWnuTnetlnj9/iSSPQA6p5NbRWHJ4YjMczJAVjeG4RzpbZDxzSYge13dWafWnSLKAYa4QikOWzpRf+Svv8/nBI8J5mjCW2NzYIWLBeHSJmVU5OfKp13P0Lp+SyeZAVLC9LpIg4y4TLPQ57daA7Z0VNC1Bva4ghhLN8wkIOpO5S7exJFdJI4oZvGiCQBZnnuHRw1Pqq2WSUkhn0CaX3oNgnYX1F6xWd1ANm/NGm6VvES9kOn2L7vgQw0gQujKpnMPgpEM+VcCxA37+0UuqWxnqKwVERWY+76MERZJmiiBaUq5kEQKRjY0NlnOPwaCHmVB4/vwlu3vX6PeHjIYTAl8gkyzR7jTx/YDt9VVmU5uUXibtR4giBIGPIIhIisrcD9EikEOHzggUNUZzmuhyjkhTkSSZiABVTCCKAQvLQxZV9m5s0+l0ieOYySS6qswMlrh+SCwIjPsz3n2nRjqbwnaXHJ88xVks2LtV4fTEwvOgXN7GXraRliG/9bu/xD/6b56gqjKOE2AvRcyEwkc//YLf/b0byLJAHAaIiIR+gKoZSFKEKJg8f9LkK++XKNdUfvLjT9lY36VcWaHd7TAePebtd98ijiQkJYUopkilz0gndDLJTYazEXdu3adaXKF92UdKxqSTeVqdC2o3thnPB2RSNd5+8+v87KMfkcumaTdH1FeL+N6ShObyzoe/xtOXD7DtiPfe+TpbdYdEXuDFwRc0GjJbW3fZf7lPIhVy/PGEne1bRGkbgVVu3EwRyKf89b/1xi9mYhwJnB73KJfWuGx0ef3qgsmggCAIHL3qspgP8dwI35ORZZEwkAnxyGZ1PP8q7COKAUgBguqgiBGRpyEBoRARBTGxEmO7yy89iQKapyLLApEAXuAjfonwUVM6hpRAiQWCyCcWYuJ4hmsPuLw4w7F9YgHiUEdRFIxEBsMwSWfT5IoZkukcuUwBDBnDuAo1eZ53hSJauiwtl2F/SOBE+NEVg1IUxV8A1hNJA1mWSGomRkIhk8pfnSJK/pfBHx9XmBHMPMJQQMBHFCEWEqTzeWLRRSBJLNiIyIRBiKzLJBIJms0GK+s79PoTNAwERQZRQNauUuqSJKDJEnrCQDM0DF1DFEUWjs9s7vLq1SFbWxvouoqsalgLDx8Xx/eukv2agRgJnB2d89Mf/gUvnz1HICKTSSHq+lUTkSgQxhGoSbY33+PH3/0jzpuH3Lp/k6V9gee6qKaOFqsMey2miSTlUoHu/IizRoeNtSxKMYVIir31BL2exWg0o1zcplpdctk8Zzoa0By4FLN7lPNZbMtm1F+QNEucTZ/QG1TZXPkGCUUgspasrNwhc6vESf812/p9ZmMLI5VAUHyanRZ+nILY4I23djg+FYiiq1NI3xGJoggBj+PDSwJXJV+Q8Swor5dwHBDUGZ99/JBf+6Xv0Lzs0bzsEAQe67ktqltZItnms89O2d3ZRJcC4uWUYiXDM9cnuVpDjQPmswHVjIGqxWTLJWJvRmGzztt7q8T9S1xV5KDdpFZe4eTlEStr18jnDYq1Aq9eXTKe9LAsjxtvbzHsNFhOFyRzGeb2FEHzkZWY1uUZmmFgL0RquRTN1hnrG6skTImTgxmKorO38y6XnS8IQpf+aM5iEHN+eoAQTpgup9QSAmEGLhYl6tUSQtHGcyQ0ucRisWA5l5HlCEUqUsgUWVhNzi8H5IpZ2q0xt3ZuEwsRzw73+fY3P2Q0nGBbIv3JBYJ4k/fe/SoXF5d4tspGrYZrK+x3G/y7v/Er7B/9hHKqQvu8wTtv3+bg/JCLizOCpcOwv2RoL6lkUiSKEv54yXR+QS6vUMxkWCzmdLpjytUUi4lNtZwnk8iyXp5T2Nxk//QZplbGEx2k2ZS1SgXbVDk5O2BlZYdCLWA+aaBSZG/NwJA8zhseJ+fHaMkl3Q5s7q5z6+47f/mCUgiLJFNdTHkdyQgxDAFTXgW9ie8tMc2YlLBFKl1gOJhDOCBhpJlNLUr5HSQhQ6v/EFkpIbo12t0mrre8AqMrJYxAZjS75O339vBcnScvT1iMbVJpg8PHp6xk6iSTOSQxxfbOLpPZnKUbIYgC/WGHretFxkOB80aTldUCP/3p5+xeu81ioZJRQxrNLl7oMVtMCEWH8zONTLpKLI35+7//z/iN3/4OASLjsYOATD6/TkzAsO9jqS1y+Q0m0zSyGVKQVnGdkNlEI1KPMZQyQrLI0vUImGAvbBI5g1Bw8UKX9mUDLwgpVHUMrYBvQONiwp2VbS7PHfrDfbKVFN/93uf0RhdU1zWKxSKCHKEsDazZEimdpFbZw3ZmKM4MVfWZjH1qpRwLu0e5mmI6T3LZPMcJJ3S7AtlcgqOjI0oVk/lsiRan6Z6d8cbde6ila3THLtr1FX788U8YMKJYLZCqw3nnEGMxI5GUUeQ8tVhlubSIBZVisc75ZUwsu7x40WJ7Z52l67AcuLRSfZ69esq9N/YIhQyCkqZeStA8u2RjV6aa3iKXznHRajGd2MSaRdYoo1zO6Q0HJNU6i/kCP5hgKAUiN2ClVEdJegw7Ns5cxrPnaBmd5XROvZZBVMscnZygJQTmHhAlkJSYqf2aSv4OH3/0CXuLddYrq1iBS+PyFZVFFzMh8/TwJdW1GFGRUNwyJwdH2I6Nmc4T+wKyrjKbeciU0ZQEvjejVDbwliLD0YD8VpJapU4YgWtZKJLP3vU9nj06ZDYd4ocesuqSyaYRxJCZ1cWLHATJQdVifB/SGQVNUZAlCddZ8OGH32HxyReIYg9RuKqXCwOL6dBltVxFFQWiYIRiGKzXNlCkJP7PL4mkiFj0r4IykoQuJwg8EzEWSCcSjGYuXqgi+zGEDmEUIgsSmqJSXRX52Sef8s33f5d33vk6pdwG/dERC6fLMnqNqRcwEgXOWvtcv7/Nt3/lHf7FP/sxpWqGwBfQVJ1ef8LTJ+eYpsls4l0BxeOrlpj5fM6wb/GND++jUMGzNCLxCC9YsFb+Dq3+KxTZ5PS0QTZX4OLsgDt37rGxneP4cJ/tzU2EWGRu2bzo7JPP5nj85IesrV0jlUhhzef4ro8oRLx6eUg2VSaREFAUF0ny8O0MSaPC5XkLkxKrm5sIooKghey/uqBer6BpWSbjGWn9FobZR1USuIs0x2f75AtZXh8ekTFv4Vurv5iJR6efYSSgUCigqQlSaRkhFtm7/gbNy2OSWhnLcnj58imCmOaiecJ4ENM6mZPP5el1R8yn4IUWomohiwYJzb1q0pEiNEVAFOMrDmckIIkKkRDhRSGCGCPLMoqm4Ps+8Zdd3r4UECBesThjBVnSkBQFQ48QJYlQXAIRQjTGXg6ZTQPOT0LiQEXEIBZdEonkFe6rWMRIpUmkkmyslkkmtxCiGN/3cYOA6czC+ZKLOhr0WC6XBL6IqhgoKsi6iGmkSZoqyZRBKplHN0CW/Kv0fxggijGrG+uoWgbXWyIpMnF09driMCLwI1zbIZkwSKUy2MGCMI4IgysPa2BbhC7Y8xmu6xLFLoaRoL6yTrVaIlXU2NxdxfV9ohAIHFQ5JGfmCEOffrfNF48+5/z0NQevX+HaHpqmIcsKC8tCViXCKEKRBCLga994g+lgRiZ5FzVtMw/6XK/soWqHX/pOBaSUiyxepe1v3dzis49e0eoKlCoFvKnMqB2i6grN9mtyyRyZRJZMekY2Vebg9ABJnBIGJl88PCdXNkgnN0kIdzFkmWis4aamLGdDPMWg0XuAEJcwsytUa31sx0dQbBJahvW1Ikenj2h3QiaTDlEoY6oql6cj1tc2WSt9HT2xpNV6jWdHzMYOetZAQMXQkrz7zl16vS6Bo/HOO28xt2Y0Fz2CtMh8OkHSDUJJIlZ0mp1Dfvb4AW+98TaR7KCJGWqlEi+fPSTR30BJG6xkN2l2n/KHf3DE9a0yB4cDnFhAED3u3L/H2toa+/sPefjgGaVqDcfus/RCUt4aln+B4mUoVPO48Zz5wmVjbY12s03G2EWOmnQ6LUrVMpPxEt8LOW8c8da7tzg5e4FtB2zvJOkNTul021Rra1Sqm/Qbz2h5qyyDAo4yYzTuI5EjjHzOzxvcv38XRcoSRHOG/avr21uozIcd7LlHuVpjOp+RTOdxHZGJNURWMwhqm2y+zKOnH3Hr9jrd4RmKsEomKZAz17hxTeL5k0+IRIfLiy5v3L/Pw08+RiulWIwXvHHvGocvztjaeI+bNzN8+vQTJLPEdHmKPUuT1mqMp49JprYYj210qU4iK9BpnTOZzDh+fMhbb73D2OqCMMJYGjhRxHI+Iy0ZTNo90rUiy1mILMzY3Nxk2huztmZz985N/vRPfsDOtTUax+2rDdZftqBcLM8IoylHBx6pJBTKGs3mKxQ5wdrGGu12G0VMUCqB51uUKwVarXNqtRoiKc5PRxipbaazGY59zO71Gwz6YzRVpl6rMB1ekDJLnB6G2NEZXqCQzWpE/pxiViWTztBsjFCUmKk1plbfJBIV/MBmGcw42F8goOK4Pn4YUq7cxg1nhJHPcHTlBxv2HTLZLIE7RUvZWP4DXMvn/fe+xnQ6pZi9yXD2EkWRWI4NUslVdvaStHsHhMGSRusQ1VDImDn0lEZ7dMaNG28iKEl+8ukfokhLyuYdNDlD4Or0Bn1ir0gQQNY0sXoBXjClVC4Tp6v8vf/m/0bCFHnz7V36+xdcu65QnOzxYv8J925/E8ueYiajKzGoqWRSVVzNxveXZFI1vnj2kGTqJWJcpdOaMneHaCkVY2miiTlGkxGykOfdN79FYF/V0Q1Gcx487DANOnRCi+86E27VtqjVcwznLhI+jhMzXJxSq2RpnI9QkzBYzJhMZti2hplIM+z76KpIo3FKKq3jLnx++tEjytUU7caIlbLOeDzmonNE43xAedsknp4wVjJIShJdTGFZBcaLGD0pIQYygRcwHs7xIht3qpNOBBTXSrx4fUGprCNIMxazgISiYC8CynkZUXWp1+ugWfh2lkF7ScQc2V/DWAn52lfvcXj8HMEN6M97GCmD29s3uLy84N6dBBCT14scXnTIJEQ2dqp88nGDG9e2EUWL4cimmM8wGPYIQ59YcfHjOQk9jTsHOzohCEx213YZjUbY8wmmKeKFMwqFHPOlzmA8gljG8TxGo9FV2lYYIcYqvu9z784d0mmJSj2HHzqYRoqYDsQRkiiyvlLDUudcdpoUSxky6YDRaIbtOSQqCroUgKB86d2DKBSJWWLPEghFl1RSpzfMXokRN0QIfUIxxHNdJNlBlJdsbLzPaDZnYbtMhhGC6JDKJDETNcJAZjKzkJMCH3/6kJv31vkXf3C1eg0CD00zcOwF7UZIrbZCp/WadKqIgIJt26RSaVrnDpmCgKl7xNGC23sf8vz5UxQlw0XrEbVVg1rqTVrNM6JwgiIoqGKBzc0KnX6DyaJLjEc2XSUWQxTDIZZ8cukig0EbTakztZ4R6iLDbsyN2ysEjkhv7rN3o8ikd8lZq8n61i6nnZdkjTyXrdf4UcDcTxNLLtNFG1lYZTya47o+pWyCO7ffYTI9JZevIVNFFJe/mInJhM5K7RqNxinPXzzjg3d+l9miwScfP2R1NYuiG7SOznj/63dp987ZvJeilN5hp3qb45MzhgOLZw96IGrM/CWjzpJGY06/3yfwoy/f3wBFvfJtJpNJYim4qmAUZaIgJA4jZIGrLm8ifFdBvKo6R4g9RASi2CFGxPdiCDVkJSIWQmRZQpF1YvHKSxgLIoKvEkVLRv0RneYLguAqJHO1ZhfRk2WS6SSpbIZCqUgykaKyUUPVFGQZlnFEFAXYS5/5zGFpWfQ6U85PQoToCM1IEkURelJET2gsl0tyaR1JiRC+FIl+HKB+SThI6AlmCwvbslB0HWu8QDMTBFFIGIeIkghShKrLpIpJFpaHIEYsnBmNlkd8EWGaSVIpk3whiZlPkBY0Xnz+mpfPf8rr18/odRb4oUPgCRRzOn54hVWSFYUgDJFlBUmSkASBYOmRjFMEYZN33/kqrdNXTEcufugjSiK252A7q/zP/86/xz/8B38PTbtFLXsTJZnizo03GHcPePn5FyjGKqqSYukKdA+PCeKARHrJG7e/ytJu0+v0KeYr5PIRo/4AU1ghq+e57D1ntbqFrFSIooj2yQVvfmWbxvEzVm9c4961Tf7wX/9/0BIi+8+nVGvbjPsOmys7tPtnrNbuEAYvyeY1suZ9vvHV9zg8+SnPnz5jc2XOkxdP8b00U1UmDDNsb+4iKGNOTjpUaiukKmOePdrHc2zSqRSlQo3Xr/cpFCv0exb7+y9YW6+xvvomM2WJmS2SyxUIxIDJeIksZ1FrJp8feehJmZW0z3g2Z6kkMacztq5tcHbRZzn1CYQFAjrd7kt0MUUQx8iSxuXlBSsrNzGVDOv1PDDm+HDK3btfJ5VTGHRnDPsjipUknz38Kfn8OvXaHgeHz0hkK9y9sY47HoOj4tk5Pnz3bS4PTqmvVFjaPgolkgkPxAWSqFFdEfiLHz1mb+8GXmihx1mQHBZLj8lwxtjrMbciVisr7B9fsrJqEAsw7IoUKzKXlz2KxV1CP6a7vGDuupSyGY4a57z7lQ/xpXNOumdIaYVsKok1aPP0yYzd1R0Q+7TOfNYKu7S6A8rFPZaLJo43wFQ3caOYfCVBUnNonC8QfY/SRg67L7J0BkSCw3KuIksevXnIB/f2aD58znTuouSW6L5IQtPptk7QlAyhanDUtnjvO+9zdvIKXc8QBsFfvqDUpDzNdkC+oFGvbV41EnT3ubZbZjz0kUUBw1CwJgbDwZi1DYGEmabTaVEu1HG9CZIroKrmVWfnTKDTPcdcZGl1H5MsSaxX3uO88YQocPFdB2eQZG/vHpHUYDlbsLN7ncOjFrZlkfEmWMsZl+193nrrOnKwhuM4CPIJ/b7D7p5Ar5EmlRmjiAVk1SSXX7BY2HRaU8KyQuhblPK7SPUJ44lEt3+G69tIckjgLyiWK5ydjsnmqkxGE1YqawzHS4rlPOeXbY7aLzmzD7EjqFVFTKlEiMTCmTOdDLFmPorUZX1lE8QUi+CEVCbFZbNDvVzirXe3CTybXEGnddkjpSdZ3cxx+61fxrWvnrbFQGJ7dR2fESfnj1guQrK5FL6jsbG2jmmk6VzOUbQA0VeZTR0cd4EviORyCoPugmcvnzO8tIj1LrYMqhtjGmmUVptvfOs+ehTx9NUzMvkCceiQSYuMZ6AnWohagOPkkDUVWS0RLidX3amlLEsrz+HpC5KpDZxlhONGdFs2M80ik8oTCROu75XYuZHne997gVopkcgEFJMaZ+0epcwW4+UFCTNFykwxHHQw5ynUBMTOHFXUmE0GhFGfMM6QTa0gCH2CyCeTzaLqSSYzHTeYUUyv0h0MSSgaK7Vb7F5b5/z8nKVjs716Ay9eIi990qaOLw6prVe4aD1l3jUYJ6fYwZSkbuDaSz785SrNgwHOMsRImQwHTRwnoJAv4UURQiggyx6SIJDNVEHyyaR0kuYGi7lHJlVH0UNevT7EsiwEISaRSDOcjJEUBQmBxWKKoWaJYpdXr56Tyecw9TJzq4nW6iGJ0pcnHiHz+YDEWorN+g7jyRnzic1iKiErPRTNI5HKELEgCBWIBGTVQNVsTo46XLu+webaPT759BA9K+BZHppgEOHhuEPW6jpxHCLKIqcXp9y+k2S+tGg3Ar724Q1mU4HxvEO+UuKi2aJ1dsIbtz4knUkShQKKol1511SVJ49OcV0HRZGuvIORhKJozKcuZnpBsbDKdCSQLU05Pv2CykqGi8tDMgWT09M+xo7Fta07DPsm+WQZI6EzW87wo01QBF4fPMXU1ynk16lXfTqXHqZi4rjnLJcBxHmE2CSRWvDi+StMLU8i7fP6oEupsoPtWRy9foFq2BR3tgjCDDtb11i6PS7OusSCh293KORWEcQu3dETvr73HWbzBnJcRZJDls7lv5mJWshoNEUUNNZWq3R7B8hylvpGgn5vwHDUp1LdotOU6QxnbO1eo9dOYQ/bPNv/nJ3tPd58Z49CrYKvT5AEF4QMx8fHFHNFXu+fIMUJjg+aTEYezcs+y0nIfH4VzEOU/w1cXJPRNBVdiREkiTCOiSIREeHqNFqW8ePgapUuB1ctOvFVwCYKIkQpJoo8iCWiOEI2TdRk8hcQdM9zkAWRwGszGcYMB3B0IBBFCqJ4hQ7SNA1Nz5LJZcnk0pipBNVaAW3buBK5kYAQXzXrTKcLppZNRMhlp00YSoiRB6JyZZv48pTS930UUeH05Ija+ipz28blatWfTCZRZJnA9XBdFy8OUVUJTTUQYxEJgUiKsT0Lf+pw0bxkMWjw4MFjfv1Xf42/9XfexfVWOHo2wrYnnBye89NPeiiKhut5CJKIqqp4nnfVGy+I+EKfRXfJTnWDbCFB1bqJ1TxHM1VAQFVl8imV73//J/zVX/0qn312iZnN0esf8OrUZCWfYuvaffrjJeVUASmaUqsmMM0kh68bLKWQ23ffwLV/Dn4dd9Hl8uKM995Yo3NxyJ17X2Huj9gorPDZy79gZe022UQNddflyRffwx18wNe++j5PD37KYrFg0s9z585dXh48wl9q9HsTNE2j3+/TbPw+o9ETPv3pOf/pf/rvMbMfMOlJpPMiQZhj6bc5P5xx/U4dRbXwPBFB9IgDh5QK+89ekU6ski8aTDoz7mzt4MgOZ8dnaMoqsSYiGCrTZZswDNEig6SZpDE5omO53CllCSYCpWqBWNC4PL1EM3Tu3qnz8kWPm/e/xuNHH7NRXSGXh8lIYzaZkkmZZDIKppbEyCocn0xJ59JYyzm98ZyVeonpUCKTXmMyn5FK65w1nhL4Inv5LGIIQxuKtTK//NYGk+kJHdfmXvo+mQRcts6R5BTZTJ7LRpuqEFEo5NA1E50Aa3nBYh5w5+abHJ1dYmoqyVzASfOSRELDW8LOzg4Hr08RRAUjAdPxhLW1DZZen875OUXlAxKyztPPXlCoyjRPWtzcfQNDdjE1gYk3Yb6osHmtgNWPmPcGDAdnzCOVhKayWIAgiUiSQLMxolrOEPopBGGCLybZWE3h+yMefPGEveu3eHLwBZFSYNu9D1qSQhJiU8FQshhqijgVMpyGrBlperMpzZmFIdQp5VOc9w7+8gVls3HCxmoNIdbxrYhM1qBUKpFIpBj2JsReTKaq0OxcghzhByr1tRLdvs/h+Wsq62/Q6ZyxWkryxWfHDPqv0Co+RadC4AUMOi6TzkvKNY1Q1mi3Rnz4jbeYjGdoRoTk69Tymzx3nuJ7S+YjncblGavrAs5AxA7PyGRyvPfOO7x49gxvZpHNgR8JDMcjKiWVIIiIQgGZFOVsFt9PMOyPyJV04nhKcSfk5GLGSmaXlJqm1eySzGqI4wrZVBl0CVFucXE0QEnabJY3WC7meKJA1Cxy1o8pVJYspmNUUaKWrlAqbuIGbWZ+RDa7gaYG2IkZC7sLokgsyjiuRXFFw12KHB1fshnX8L2IZLLE8dljViqbpHIbvPvuG3g2XDQO6Q+7rK1Xef6ySSyBkTPIZkyYDuiOxqiqxXSSQ5EkPv/s53z1a++i6Cs8e35CoVbAmpoYuoo1CyGrcW3nNoayhmWPOT37gmwqCe4W+bzK8dkD8toeseOSTKkcH80RIovRcAGxxOHrIRnDRFSiK9+jJjHonyIpAVnzOi9OzyhumkQL2FjfJJ3a4/nzPyTweiRyWVxnQLvVY27PSJcT9HoehXKNeq1M73LEVvUGHhb+zCGZyoOroYRzNlYrGLM5Z40JYTShXC0wnDlsXdujOW6TTHtEkk7jso8qL5lPQ+rbOgvBoX3eQIh93vnGFrIS8/SzCZWbKc7OLTRylKsKnYlFFFncWL9JqlKm3bxgGaq0+wMUzyCZSuPbNsVMmq41Ilw67G1s0u12KBY3WVrgOBIQMpu1EQSBZCpFGOZQVJ2pM8UKfWzb5+zsCH/eYq1WYnvusbCvQg6iKDJZjlnIEc2Bx2A4JJFykFNFgthDV3QMdc7IFlEQiOIIX1jgEnLv+g3efGuLRx8PkFST2BcQCAmjmFgIsRcOuZKEqEe0jh8znAZEwQq5tIyoejx+csbu9RtkkjqLcZ/WUR9NKXJ88RG6LjOzHFQ9RMDEMEROjxrIqnLVtY1NDIiiQRAMSWXWuXf3G/zRH/13GIk1TEPgxfPHqGoWw09gqgrzRZNr27d59OQz5s4DVmubBP6cbttCTzvsbO2RMHVOzp5iO3MWnsXBcRtNB99bUK1WCcIRjfMR9TWNKBxzcHhOLlsml6mztbLH93/wr9lev0tD+pRKqUa3f4pkSty6eZvPPvsJe3urdDonpBJlDN3jxx/9U/L5PCfd5+xsXcO3o1/MxGJmk5m15LJj8cH7f5uzzqeE8whDLpMwYgr5MlOrTzpXxieHZzmsra8gCGluKl8hCpdk8gYX5+eIqsvMOUMXqxSKJYJwwdtvr1JNrRP9cpJGe0Qo3CKlZviTH3yPdDKPHm3y6PkzXCtk2LYYz2yEUGLuTFCFFJqRRtYsFJL4gocpq4Q4SKJBEGogRISRfdU4FUkQ8eWDQAiCA3GSwL+yEkhCQBjqiLJGQkyAHOBKIiESCh5yHOF7HrPZGeORSxToBKGLKhpEAhhaHjOlki0myaRzJBIpauk0QiaDpq3yLPQ5Oj7AkIFYvHqgCmwEQUU3F7Rar3j59AlGQkWUQkAmlUyjm2kU1URSTHRDIlYNMlmBKDCIgxjbmTDsDxiN2iymNvZ8QDZd4b1vCqysGmQT32Gt/pTjgw6//Tu/ycv/4L+k0x2SMNP4YYDvuyiKTBhHKKpJKp3HztkcHu2zEt6lXk9QnCZYWEuEWEZWBHKpLD8/a1Iqlbjz5m264yG3dn+Nh6ef8OQkzf13tjCyEZcNhWUoUNuuoQox1ZUcph7w/NkZkVBG1Rc4kwVff+cN6lsVZpHN2PbBEhiHM6JlSLGYo9F4QlLN87U3v8I/+u/+jN17OyDFZPUaCENef/GAzb23mdldZE2gddKmtHqPG3feYHB5wtZbDg/3f8JousRIyuiFDO2LFmvVLMmEw9gaUaqlWFhDJEehXDepROtcz+bpuxYJPcs0HuGjILkqlWKGg/0f8Z1v/ruM1XMsy2K0OGW5UMnk0ySWWTbKHoq8Q3ZdY+E0SBt1bGNOu3tM4F7n3bfrxJJHuVohNiK8MI+uxxRyWQbDMYloE4IWo7FBsWjiRyWiyGOlWMGMBcqlAloiZDrN0Lv0qK3ukjMkTNOk0WiSTGRZLJoEbgVFT3J77xr1/Aqz8Qndbh/bjyhm1qhWSgihTJjp0zg9Y54pks1WGY6/4M9/9Jid9TLSPMHEFjGzeSQvpLqqISgxYqxwc2OTRy8ekC6vMJp2cJcx99+5x6unn/PWvffx3Bhd9Hj71ruQczk7apNOvclqJWa5EBiNVBauT+12hS/+7GckkhHDQZLavdv4gUWjeYlMjUyuSpz1Efw1pv0mA8+nvGVSVtbZWN/BnrUZjYZ89LM/5L3b93BDn+Xco5CB0djFH+jkMgk0oU+ARORbFFIJlvM5gmT+5QvKTKqGqhRIpkzm8zYvXo4wkxL7Rz2cpcDaSh4jFTF61aO6KtFttrAmIaU1lcVkh6/c+Br/5Pkj2rFO5XqN3EaSe+/eYHDscnrxBNubEYRjnn/RYrV+g42V6wioiPIAe6kieCp/8fM/4q03v0K7dY4oiNy5fZeF5bN0bETdZRFMeXU4J1taZdjpUa6HpMQiGWOF4bRPp9elWE6ws7fKZDZBV1WyhSSSIpLMFhmNB6yt7WAKaTonFsmMxHg8RUhm0BI6R+efU6lmkRPJqzWREpEr5Xj66hmiNkRK5FnGEZWtG0RuRCFbptl8ydwZIoZLFsY5ncuI27euISsS1rxLOhshyBHl4k2Gk1OyhRWEWCeZ1BmNpmxtvMX29jb95oz2qUXIFNtd4As9Pvlkgp4xaLYGeK/7rGwUUUWVYjaHqsqcHZ9TKWXZu/Y+pdwNWu0+Ai1GfZ/JuIuqC7x6+RJVTVIvlrGdMV40xDBFFEXDccZIkwLl4gaLRYyZUJhaDptrVURJww/mVNczTHoC4OF6MxQUFlOLoahTL+zw/T97RnEromLK+L5JGCW5uLRwBA+ZCbNpFq2gUkMkryeYLpas5tNsbNbY2lhl34tIpxJIiQKvz56RKmSpF9dpvHjOy/0G6YrI6qrJxckSQwmorSb48YOfMA/HfPjuNmkh5q/93W3u3dDRpwle7p/y2XGMLRTJpEUySpJ55JHM5Vn6KbIFBWs8ZRFNyOXBiOvYkYu7bNEbjLH9JTE+Lj7t/gDP1xhaFrLgkTGTtHszhpMlQrJLJq8xOuhCqLL0A9LpNAmjzsIdcd44o76yRjV3m/PmAbavYKg+CD7D0YQYlViIrsSfpBN7aRz/EffuX+fyrIcrNlDlmxwfXuI6CmKkgeQiyRqiIGFbY1Zrde7cu8UP/vSnJM00lj/F0EUCO0CSRDRJZnu7Snd0CYqJR4dOr0e7HXD7Xo2T85foepo4dskXUyx9k43dTfqTF0jpBbKbwtRinIVHDASRfyVM4BeswjAMAZiPF3zy4M9IpjOIJGi0n5AvKfiuxNKa4XoBqurywx//OavVuzx89DMK2RW2Nt5Ckp4RYLP/6hBV7WMtHE7PjikWqty4uUK7c0L7MqBc3KI/esVlq0O2cJ/JtE02V2Bjc40nh19giKvkSmkiuYOu3WX/YJ+1jS2GvUNca4QqZzg5fUUcz0klyjhWCkSB6kqB8axH4Ptkiv8GbL7/4hJfXbL9rsO/+Pi/4PzY4xsfVMmnPyCaXnErC/lrNHufEggaaXODmWWxtObsH/2Y2zfeZmHb+FIfXRVRwwqSqEOkk82ZHB28wKkGhK6NHc0RZBF76bK7l2elfIsw1KjcKZE1S8RTAzHU6E47yFqa89cTnr86YT7TmQ0s3EVMfzFH0kyCaIyqiZhGEkGQEOUASVThS3ampukEoYLn20gYBOEUVc4QRjahoOFES3TJhMBFFgXwDXyCK7i+YKJrGeLIRzMUXFtGEH1cb8TckhmMB8TBMWEYIkogiqDrGpIskDBkYkEgCB0kWUOQRGRBxnNj0lmV3/hr21y7IbH/ooM3WeObv1qiP7ng8IVFpmhxfNwkEa7w0R8fIEoJPN+6ug59CRCJsckVqtgLiQc/m1CpZGifXTCbLrix9x6dwSkLe4mq/pv/sSiKV+imKLqaia5P92TA9dVbDJotZv4GdaVOwjgkJiSMIkqlIvr8JS8P97l9810kdYVY9tE8lcw1Ddcaks1nmFgtbl7f4vDgFCHOUC5FvD5oUVuvI4oSmi6Sz9aZTxyePHmGj4CRPWCjvsVsMOL2zV9GFpfsFd5CkEuMzl/zvT/7+4S5LKuZiPyWjGDkWHan/OzzTyjVJfy2hKKVWHZOOVt6JFZ0FE9i0nrF0LJ56842l+cnTAZDnMklZnYdQQm5PN8nUzQJ1DSeJSKkQwbTkLvvfJvmxTm1soJvWFhji37fZ2fnOhfNp6hy5iqhL6fJVwukEjHZ1HVevjhlufBw7Igbd+7x6WfP+eCDb1Bd+TmDTpNeP0GpVuf67k0+/vlD1lc07t9b5dMfXKKnDObeBb3OGSm9xt72Kh9/9IitaxssFxK+EHPW6PFb/853yCQEnrw4ZXNzk0lrQPtywdp6jZPzU7LaLYbjS9RlBrIT9o8+YWHZ7G4VuWh0MTdMhstLrImO47ZRFAkzIdEbjKiub5IpWuixjOAXSJohghlzeH5OYmoQRg6d9gmJhEWhtoJi6Mx7TVxbQBBSrG9s0u1NuH79BjJzzo4vGAzHjMYh1TWf88aAw1d9drfKlMp5ZsMiq6UbDEZzrl03abVfIwlFTLWIbmicXz6DWGG7tkp+Y4XLx0+Jzkrcu30NazjEUMsY2pCILGNniTWwcZc+07HEaDFCSVYgpXF+ZnE2crm9u8fYGbO2VoDxv72HUvy3/cV0LkUyVePp831GowWpTBrXBc9zWQYdnr/a5y9+/DO6/T7LmYnvzlk6HRpHAb1ejz/97n/Lu7/yNpKag8hBLOqcHQ85OXtBsaoTLxOIUcTt3TfJpA1qqwa+F7K+vkYyLZIsTKhtVGmNHpEviSSMCloCZC3Gjac4rsVs7nDZO+Ok8xFnnRd02nNaF3OWMwcI0QzQEjKyYpJI1BHkNIulQBgarFW2CBcyzsigcTZkshzgi0l68w5N6wUXg2f4vkejdYgVn6GnRGzXYTDrUcxnub2+RzSL0YIM047LoN2nXiijCCJFo8J8NqRez/P2Gx8giirD/gJnKdDvLzg9mXB6cUSvP6DbnTMaz+gOWjiuSzqTJ4hdOtMTJm6L568PCeIFaWOPREIhYcLmus713U1Et44YZDHFMqZaYaW+DeQo1tN89PPvc3L6isBN4nk+pqnTOXd5781fQQxz2MuIlcoqURCysEcMh0MiZlxcvkLXMiAKLLwBCSOJCCiqQOCLEEpsrlXZWi+xt32d9XqFvZVVLtsjOoMGm1tJIt+nltpEEARy6QzlRJaUmWLz1g6pvEE5n6ayXkXRV9m6neY3/1ad3KrPDz/9E9SSQ3EXGr0HbG1tkEoHzEcWkafxy7/0Aa4Ds/EYNW0g6Wm2t9ZIG2nu7XzAfBBiWTN6fYO/948+pTmboBhZUhWB7HoAusF5W6C39EmU8zx9/hmFsgSajLuMiQOP8WxIf9BkeNLD9nychc16soy4lBGVJG4Qctm6wPV9upMZR60OQ3vJs8N9Ti5PSOV0SmWNra1NUokyX3nvOxRyG/yNv/6bFPMlup0RoaNTz9ZYLezSOnEo5rKYuoYoxohSSDaZQxUUTKnAtBdy68YNfu3Xvk25bPDNb91E0fyrAIwYE4sScQy6mkAg5PD4mP4kIpMyUcQQSYwwDBUhiCiVZbKlJaIyYRYfkM1niOUZ1R2Fl4eviCWPRv8Rr0+f8ezoAXrG4tnLTxAildu3tnEtj9APEGMdQfz/HSVX34dhiCRJHB69YjLtMhosmEyXzOYOvnvlpxtNj9A0hSgwuWye8/GD79MftWh3W/zT/+Gfc3x6QetyzHTsIckGuXwaRfMZTzoM+lMCz6RYLGJNDFoNmXtv3KJxeYw198gX6nzx4idE4pKXBz/GW8hMhhEPnnzEdDmm2d3Hdx2GPYHta1WmsyFJs8ZkMmJlvUQYRjx9+oREQrhiU544v/hM50w2NvfQEmne+MaSN7+hkSrCwlmgqiquE+GFJximhpGASBozm1oMhk3q9XUSySztwRmJrEi6oON4S2L1Etno0em+plBM4sZzps6EcnEHe+oz646QAomTi+dcHAfI01V6Zz6XozFCIYftmKysbvJ3/8Pf5e/8R+/yn/xnX+U/+z98jf/6H/5v+N/+77/G7/zNCu99tcbKapIIm8VyynS2pNWaMBwtsKbyFbfR9VFEA02TSBp5YuGKXaioAoom4UcLIh+EwEHXPBRFAsVHFH3CcEHgx3i2ThjIBIGCIhvICiRMSKRECgWDbEYnm0kiS1de0TgWiKKrlihBiBFFmTAKUOQEi3nIb//u1zETVX79dz7gV/56mVv376LpSX75N26wtVviP/iP/wqpgsDEsvFDh1CI8IWYUBWIdYVYV9B1nShe8slH+9RqNfrTV7z13l06wxNS2RSuc0VW8MMr39i/6XQPkWWRb33tHikjSWswY2f3PnEYQOwjIhKEAUEUsnRD0lqdN++9y6tH+yRVnbPWCNPIMGz6OHbM4cElW1tbvHr1jBs3bvL+B++wv39KWk0Seza9zjmGmSJbuI6cUvD0Afl6ha3Vu5g5gdnS5vadN3AjAZQNGsPX/MH3/x/8n/6vf4v/+//lP+f+tfdpnLcYNId8/cO/Rj0pIAClWkQ+LBA6Pkfj56ixwrjTJJQybJQNOm2falpBEEMsIcYLT5lNRhRqK1ycn9FuhRRLa1jClEA3mdln3LxfJ19J02r2ccOATEnDXaYRVY/x7IJKaR3fMdneqjIeDjg7bvHmvXfJZiTq9TKj0YxKvcBgYDEZprhsTLi88JFEk073mGu3ba69/ZK5/4BRMEdNySiigIGKGeeJnJj3v/ptlixYzHUEwWfSn7L/fMR85qDKLq/3H9CfvcAPl/R746u1/+SIycQjDERsz8X1Jrw6fIruJ7mxleH1iz4RHs3uIa5n4Nkes0mLy4shJ6cW1iRNqzskXytybfc+K9UtTCnJZeclczfmzbd+FW8uEVgLFtYMJ4wJQpd2u0k2XaM1fs5njx4jKUm2r6+xs7PHN7/2FouxzWwQ8Ou/+TXmyyFje0CjP6Gwucr6ep35NMBeRJgJGdWMUFWV1eodlssl7bMxSB7FdJprWztgxoxnU+JIppa/RpxaMhm5bG+tEMtLBD/P9dU9TMVmPlMIHIetNQnfXrC6e4fhQuDl0SX/th//1ieUetri9OwRktHH9opsXi9ydhwgyQoLq0+uoOLHA6QUHBwNqOWv0+00WN8uEYsLquk9qvIegv6IceuUSJCYtAPqW1u0Fg22bxRZX/ufsljMGU9nhJHDMnxNJn+HpauyWC6RhCKZRJ58epUpPQ6aX6DqGqEM7tym2w5J5RPUK1VmcR/bCkgXHdKZFM8/fUS5niOOBeZLi06zjWle9f2OGh5e9ha+bbG2UgE3ia97zCcCxXKVs/MWL7+4wFQM1q4niWSV/mlIsbrBwckXbK5sIMsVdvbytIfPmE76ZNM5glghCGWSyRQ7GzcQ3AzD/gBR6+EFI4QoS716hzBacn7ewPcF4niEpotkMwUK5QqNzms+f94glayDsECQdBonIabpUczl6fZH6HoGy1IQRAElMSdYWuCmsO0YiPjz7/6Qvd0ypp6m2RqRz5jkkhvUK3VG/QGD0SGymGc6yxK5Vd7+ypt88fAFqpxH1+acn11iJBOE+IysJUJoIAgR5VyaducMy7TJJWTytQrdZp8b22XeKW9R1mtYYwuRBNPZGDGS8aUZ+92HFIppxFCgUpU42G9SKSZI52I8b8i4ncedJdlY2SWbjxiMGuSKEqPWOeV6gWQ14lvvfpNWc0i1mGUwDljaHmsbRR4/fMyNaxusVmt0Lk0CZcIPP9/HtvP8P//wAs8CUQ+IhYjrNxL0BgPc4ZLL9pjb128yG0M6XaaQqXN6fICRSOHYHgs7RM0YWI7NfBkSaSZ9a04QzslU8nQnE9JmFWcaEfFlf3Oc5tq9O0ReFy90WKnf4PXhYwgUnjw+ote/xEjG7OyuspxDFFhkcwKr9Rzuo9ZV7R8ihhIRug6V3BqhMGEx9fAcj9KKTTono2gq8TxEkQ1s17s68UEgjgIGAx8/1jHlkETCIAwC0okkvdaAjW0Fyx4hiiLpTI3O6YxiOYETjElkk0yHS2JfJBJVzpsddCVAtNOs3lEwb2T52b88RiBFKHqIgvALv50oXvEEhS9/JkoS85lDGOm4UUBzcISZyjOzQlTNw16INKwRitajVFhn2J1RruT44tknvHXv6xh6nqOjR2TzOcIAer0+CWON8XDE69fnvPXOHq9fNImiIwZtn52tVSJ/wr17d3jw8SNS2QKXwzFqbPDu/Ts8ev45ZlokU84yGzlsrmwTRwpiuIk9PeZg/pi33/4QP5xRqRSx3Qmx5HHv7tf46c/++BczsVzKMJ2PmJ+mSKV/mWriJ0RLl+FgTL2Sp3vZQFTHrKxtc3zcxpUtUpk0q2spVNax5z2yeYUnzw7Y2dhF11VmkwACF9cfEGkVisVtHKfJdAKr5XXidImXT/6IWr3E5ekxcvgu2xsaSzzWSmXOEs/47MknzOZ9LpqX7GxtUyzn+PnDH7K3m2V95z7++xGeP8W2DQTSHBy/JJ8r82L/BeOuw3Sk0x9MWcw9HMdCxCCMIxLJABAIlRBJLGAmQCIm8FQQbTTBIIgiFCXCDRzCKEZSIuI4JAplEAIEUSUKAkKurg/LshBFEU03v+RISgiCTBj5CAgIwpWwc5c6Tx83Wd8NGXfACxyO9l+STqjY8xnr5V1W85u8ePoDVEUBUUIkgSxEiHJ81bUey0CEmYw5PHrJo0/brKyWePhwHy09w531kCQFP7i6L8iy/OXJpAJESLKAE/hsXdvg8ckD2pN1qrsS3Qcj3lAkvIV7hSVy+rxqNSisbLB3e4Pp+Dk/e/g5d+u3yaZciNO8/dY9fvjD7/POu7/Htz/8HTzvmBcvv6DXarO7+R7ZNYPnT57w/r3fwnVCFvMuMgatVou9tVU6kyb/xX/9v+La+iZffPKIcafHvfffYThs8v/+h/+E23vvUXQ6RNoqB1aXzY09Fp1jGod94nTItZVV5v0BorHGWt1A0W1Co05ZqxLZbZaeRGs0RNNXiIQ5zaaEkgjZqBkcHc9I5kKmiznVWpnm8IJuv8V4PObtd96n2Tomm9cgylOrGCzdPrf27vD5w8eMx1MqpS0k2SCIrwS4H0MUCpydH7FSL7Czs8vh4SGNizQT55R05ZJXz2POXl7y5nu/BgwwE+8gKBEvHx9jlm7geFPsaUBuxUCKxqRyPmbGR6bIRjXB6eUhq+t38MMl540zqrVNOoMjFGmHu/eu8/jhY3I5g2s7d6gXUnz69AvWdncRgzTlrEsk+JRLKs3DKYE3IpdbI3Q7VMu3Ec2Q7/305xSrMR5LYt/Em6qsvWnSPdPJl0ymzpiMZiJJXUbDLuv1GqlEGUmdYnkdGkdNZKNC+X6Z6DLF1vUSF5cNCvlVlos+xYrJ1uoWp/ZTUtkNDOM6F+dTarUsshLS707Y3LhG2OsRzbJsVZKctV/QGnjcvHObeWdMsZDG7xS5eT3HabPB3vV3MXQZazFFtEJkVcXIJvHjGCPh8rr7EZ2zmOu7G3/5gvLsvM/5eYe33trBmqgMhiMGgwGoPWrrOc5PxmSzeUwz5O0PVCQ3QzabZ+92ntBtceveHX7+F39OxIJ8fp18IsPOrQKjuMdm+hbHizGPXv4IgJ1rm8ymLhm1xvllA2scECJjGFNq+Tv4jsfSGWKoq4haAzFOk8/XWC3mOG8d0DzoUEisUsya+NGEg/PH3L19h0CI8b0Z1nyBIicQpRlB4FMp11GTAUX2mC377N3e5PjsBE0MefX6nN/6pV9la+MualCh3e/xvZ/8MWldxHNFRKHC+cUpjnhCo3vO6maVmady8eqQWPgTKjWR5cIhdsD1lkzmY8rqNfKZFMNRm4XdJZnIYehpTo8PWF9fRxIjNF3h4Pgzdq9vk058jecPL5FXfbIZlf2zKRPZJrtcYTJdMrIuKGVXGU+GbGp5UqaAbTtM+mOy2Sz//t/8j/nDP/n7xIWYXFHG9+YcHR3w7vvbXDTarNbqLOc+w9lzVjfeYP/VMTEum9cSfPTRPkmzQi6fZDSeEkYe1sxFRCRh2ISRRy61QuAMmVgz5vGS/nDE6u1reMMFYWBjOTOeniz4G197h+eff0TH5oql+fIVt2/tUM8kmS86ZNJriPYW/caM8bhLNruNvxximnmKieuQ7oICzUmbh0d9yukaO3sFRo8m3KlmGVtn5LMZhosenafHvHHn12l1J2TSZSoJAV8LsaMWQlBkORtzcPAKM1Wh3ZiTSGc4OJ5gkiZbnjP3JlRqW6TNLPNlEyeYY4/HpPU8jXkLxdCIrZBqPst+Y0w+B9ZiAoGIaYpUCtmrNC4BRibN5f6AN++vYuouYeTy05+12L1R46233uDTj07Yu1/EWXjISoh7dEgUh+iaRhiCqIkkzCzEKpXVHBcXFyDIyHESPxyQMJMMxQVxZCLLMQgRnudgJg18N0MszwniCFkyQFigmhJCFFOpK8RRmXE/IlEUuH69yvOnTxF1nUw2iSQukASZo8NzUsUcfhyzVilxejIikUiRzBssXRFREa8St8JVGAf4hbj8H7+GsUCn1yaMDErVAqPhlMn4yjZjJjZptJ6SyxZx7BgnWOKPNYJA4+PPfsy1rXcZT0d0B02Wy4j6ag6ENKI85Zd+6ZdoXD4HwcX2L3j7vVsMBk3WVuuUqgZvvr2H74mcXxzwra9+HVyNjY0UdhxTLtfIaBLO0iaZyEMs8Nbbd7Fcle31t3n85GNEbYCiKPSHCp8/+9dMZ4NfzMTFYoEgeWTSJVSxTE38dQ7bn7G1YTAaDjEyCm605PPHn2GoGar1G1jegNcvXnNzq4qR1jES66zUfeYzj72dG3QnHYKljmFmseYBybRHKllHigLGgwiWNr/5V36Px08PydYuaHSeoOXfQtYinn7+BEGyUPUsi8WSSnmd4ajPdChRKlUYj2xuXNvhsnnC0vLwHINf/eXfQlYkhsvXvP9hnmp2l2QW8Nfodi65PG8z6FsMh0suLxb4VsB0sWRqjVksQUQiZoSqKohhDtlwIZJJpVUCP0bA+HLlvUQkhRt5XwL7A2T5qoXJ80PiGMQvKxfDMAIhRhAjiAECwlDi4tjh2s0cgelgjyIUzaQ7PiSbrtAfX/LzTz7j9bMp6ayO60XEUXwlYmMFMY7QdR0/cFksHIIg4g//+c/4m//+Do4vc339OhetlyBe9aJH0dW6O4qiX6y9TdNk3G3ihhYb195DkjUeP/se72h7V3WsioyiyFhWwK07m0Txgma7jYjBr//mX+fi4xe8+8G3eXV2zNHBIXGQ5vj8AT/4P/8P/C/+9v+O3/ub/xH/4B/857zaP2FjN4GipDg9PyaZUNms3+Gy/RxTvIkViCSKFXZ2r5E3sozyHVrdHjfe/RqnRw0ET0Qxl9y7dZvDJ0+xfnbO+ld/DTedo3rDQk9m6Hdc3ljfQnHnrOyV+enjj3At6MkjzIJKJMq8c+d9Ju4lwdhEcoYka5uYhkwqHRDHMclEDk3O0+nsc3a+4M6928znM3Qtc8UBXfiM7SHLhU05f4vv/Mp3+OLZQxoXLc47KkHskM0bjKY2iiywec3g+RcvWd+oU6mUaDZb7Ny4gyLXOe8OqFZEGq1zfC9innvK6eEJN6/f5Pmrh1Rqa9Tq68ytEelSDo+IZy8e8saNt7Gc5VXByWhMvpRmZ2uL0TCkmN3B9x167QGBo7G9WaNvzfmzv/gzUsUKzX6X2BqwUl5HTS+YjxRWt3WEoUQul0dCYNie4TkZMiWXhTsnnSoReS6KrNDt7FMsSczmQ9KZIrNBm1rxDqXSaw4PXrF38xoBEp999pDt+i5aSuXZ0wOkWKG2Wme271Bf0Wi0F7TbC9LJDtmixv7zMbX6dXZ2q4wmU0ZDCyMpk8j6LOYp7FnMe29ucWM84Qdnhxz+/HOK9TTPjkW2V6tIUoHiSpY3bqT57ne/y9b9XydTyfPRTx+xfaNMTq3SH7dpLwYktdxVa9ZftqCUJJntayVsN6RcTUFQZHWlhCdGTEYWtbqJJtcQRI/xYEzonlMtXuPodQdFm/CH3/8jyrkElWIZx8rwyaMD7rxp8/y0x7tv3mM4vMRMCWjiJsN+hKqoeMsko+GERCKBpC6ZzWY86j7k2kaF0NcoZbcYzByKxn22qrd4+voPyWdkVleu4y6T9Ef7bF67TjIl4i9CjLTAYrzA9ZcgzUln03RbV8iHxWhAWs8hSzWmY5/mhYUqnrBSvcFskuXHjU9Zq+xxdHpEd9QkLKRxrCmj2ZTZ2EIQVcqFNdJ6itD06FohrbMJg9YcUzdZLEO+/eGHVGoew46F5zukzA3Gwylnx2csFhaCINC87CEoMY4roesrfPbzYyShi2EkOLtsEx7JBOESSUziuRdY9hxZSmPPfFzb4eK0RyKpYRohX33vPv/B3/1fc231G/zJH3/Gct7ASKQJXJvqisTBfoNicRVFcZEli/FEw3EuqNWSeG6WJ48vubZzm6k1wbJ9ppMhxVyKdDXPeDkjW04wGI0ZdwZcv7XCo+P9q/WrqtI971HMidy4W+bpS4+3dnL4QkyUKPDVNZNf3vPJ/Dvv8gefWYh6gfGpy8QS8aMZkuMhGCFH7Y9ZW9tgPjLIGC3ySp1QsRjZAWrUx+ot6E02KJYjeo0ZljWjnF9Bzqa4sPb56Rc/xBQjMmmDRisikdWZTSOm03PKK2mW3ia9ywtCIUYXdLLZLPVimURW4OLS5+zihHyhihcOiYmxZw6SGZFNZ7CsOSk9T6lYxA5DphMLXZWJEcnIWTYrNbxogjVbIMVVbt1MEdKn3ZmQTBS4d7/G3t4uz54esLtnoCcMomDKfD5moyCjfpmU1nWFTqNL+Zc2yKSrDIYt0lkJMEgYRUS5g7UcEgsaQgyKJBADqqHSHU6YudKXSBgfIoFYkNC1FFF0wrXdGs9fnzJfLLGxsRUbI5FC0iQS6QBBCOgPR1RLVWQ9JghNGsNX2OcaX//mfUo7CV49GZJPSPiOQCwKv7gB/4+iEq5OLCNidENiaQecXxxBLJFMpxn0W/jeFMNYYbY4vhIRUZqFfbWWOjvusbQeoOgucRyjqEl64w7JREiplqXRukDVEuQKJkn9JvlsDdcfUizqtJoXVMrbhKFAqAwYLA9YDk3sYMzG9iaj1phUKmQ2n6GqedqDxyQSCYQox8XlIW/d/w7P9v+MIL4gCJbYiyR7N2/8Yib2+11CBCJpweXwAE3R8N0M66tvM7M+ZRF2UI1t9m4nse0hC8dBiHNs1t/jK+/d4A/+5T9mdesG2QJM+gHdS4vMisA8nqErdeT8hPPmX1Au7LBaXaPfaSDOLR58so6gGaytXsN3noHU4nJ4wsLJkkzZ5LJJBF9E1/OYmQmxK5DKl5hN+kxnNrqexzDTdHpDXrx+zM7OTU5+8oy763fpdaY02hNSyYD5pEN9Lc36Rp18qcjQOkKKbKKoiGGE/PDPXiJSYWr18e00g1aLxSJDq91GkiSCwMTzrKvaT1MjCCMUU0YQBGRZxPd9NF1GVsD1LFRNJgyuxJyqcYXpihQUFRAXiBJIusy47WMHS8LxmErlJumMihfMCF4mCYUQUVURowgIECWBKLj6m7qhEMdgmgaGqXB60kSSrtOffs7JWYw1E3FsF13XgSv8kSBIX67iI9LJBMlSnWD0Gs1RmDinjDowS44JQucKFi4oJLMZFqJAOVFk5gu8PjtF7RW4/+ZbvDw4xA9njN0e2WqFhBlRWfX54x/8Pl996/f4W3/j/8h3f/L72DOd67vXePDoe5TzK2hGhBeWuLai8aD5B1c2pOUdFoZFuaLSsg45+ecz7t/4KqIw5J//0Sdsb9/hP/mr93nyTKFUKPD9k884OrZI6hppRYNOzPa1a0zcEfn0GlrBod/qkC/epVIs8+jTB6TzBd558zpPH76m2z1nOQpJ61lKpTSDyYzji3MSZshaOc1sGuE7NlHsXYV0k3kce4IgSBydPObJM5uVtTrJlMbSHiPJMa/2nxJEAnu7bxDGUyIW2LZFqbSFqs1Ja3WaTZnJsIfjjVm9do+bt65xcbbP9Z1bmFqKej3ixf4Ztc00iujRaGncvf917GmDbvs1cnqNIEyQlj167REbG7ch16XZaVDKb9HtdtnZS9EfTgjEJLYaUCwXyBRVRqcBJxf73H/7W8iVJb32GYpeYuFMqWSrZIpdRv0LDEVko/JLOM4R/UGKte0ya7V1/vC//WckshukV0wCJ4th6CjyXWbKASpJwtihWqkzGk1YzRWJXY9yYQV8UKUyqYxG0L6gslomW8xj9UXu3dvCcQXKqwJjZ0T/soMsyxDfpFoPaS4umZlVlEmNXOKA9Np1cisynlBib/ct9HCAktjAnR+Ty1TxFkN8d0q9ViAhewSWjS5IqEGaYm6Dhf3/B2zQte27pLMK//0/+YfcunGNuzdWabVfMrVGFAt1xsM2vfBjKrUig57HzvptLGvBdLJgfX2TSkkmUTKRfInxYEouCLicHLGTTzPqNsGPiR0DMX1Kv+MhkqZUscnnMoTREt8XibAQNR9BSjEadzEijWp2j3xGxnIPKdXKZLK3GM3bBLLFux98DWsq0m4eYrlN1nPrFAplAneBJOWZjxeUSmUWXo/J0ELNhcjxOoLhUapWECKRIPb52aN/RfNyjuP8OaI0J5PVOX3aIpcukVTTzOc+guwzmsVYVhYv8CgWt+h0J+i6SRRKaLrED378AN8F3/HY2MwyGbXZ3avh+yGRrDKex0hxiuVkiu/3iEKJRCKJoKq0BhM0JcFw2EdTIxRBIPRdEgkRz1KYhxeUV5IYRgHHG9MbTaiXvoKmqbw8/mM+/JUtukMYDEbYSxtF07EsD1UrEAgXGFmLWNTpDwcEYZKlJeE6EW7QZjydoEoOd27doHHRIRTmSKZLbxiiyGl+6dsfcHD6HJkYJZZI51XSpshkHhB2QnTFxHYChr2QkhGzxnO29Qrf/ckl3fEmb3/92xwd/mN8v0kikaZ15iHIIYgyp6/7bO5ssrCX5NIx/cEQ0Va4d+sDjk5OmC4sNlMVtNWYdjvGcy2m/SkpPc3B81NyRhE9ZbOQhvQaKvlsif/Z336Xn378Me2zCZV6CUnK40UTYkJG9oSz1oxcqoBqwnHrHDnwsbwII1+g1WuTjZNInk5gSrxud0jpAcmEiaaKCAkFN1gSigGJlEyvP6a+dh3HWfL8WZ+lfYHtDlhf2+XlsxbzWZ+bN9/kwUfPMdMxq9X7ZIYNNO0IVbk6UanWMoSKy2D2HB+f1epXKdcMXhw8ICmskM8v6fVt5ESM54VImo5q6uwfXrC1mf0y2GARuBJ61mQxD4gji8uLE2TDIZl1Wfghi2COoGawFzbJvI8viihmkkJWwRnqaFKWUjXLJ5894vHTJ1Sr65w8cxFDB0H4kksZgS5f1eZd+ScFZEXk4mTAcBCRTqdpt4/JpPMMujPSqTwz5xIEG1FIMh6PSZohnr9AU1O89ebXaHePGY5npDIpXG+ELpoMxxNy+RssnCmyHmNHTTJGmWUwoFBNkkqB6hicnh1Tr+5SylU4OHzO1naRjFTh7KjBG3e/yWj+DNcPqa2rPHr4jPE4Qy5XwAtG/Os//adcu15jtkiRzybImBkkSfjFTMzk0hyev6bda/PeO99GFkQSgcJHD79PLpci9j1mA5HVe2m6DR0rOEFTTFZWdjg8eMW1jTeQUgLWFG7c3EInw9HFJbmihDMPGI/b+LaCY12tbwXTpbSxBs4ak+4FnYsm5coag8UjrLFHIZMhkVnlxcuX/Id/+/f4x//kn1LfCCnn6oymFp4DRycdKnWDxXzMq9evkNUUy+iSN99b5/mzE0xdpVgLaDVeIUVJnr/Y5+7ebzGbLhkMQ0L5lHKqRlq6xfv3I/KrCo/359y/8z6lZIXpfMKDz39MbWWFTz8+R1Elzk/7DDtLfDtgPArwPO/KyxhFzMcOiaSBJOsQCiiSSCD4CIJMHAeoypXANAyFTz95wLf/yq8RBwPqpTzV0i0ajXOa4wm1VYOPf3iKYUS4S//qhFGKEGIZohhZVpBlmdnURpADZFnk7KTL8QuVD77yLfqDES/3H+K7AbrOL07a4SqQI8QQE2K5h7SbU3JlEz902Luepf95hziOUHWVwHeQpQjkFLFkcT44YOPmCqVcipODhwhpA91UiUKV/bMX3Ln2BqX8DfqTc/5ff/9/ye1bv4GWHHG0f8zX3v+rfOfbv8aLFx/hLXNsrq7xuPkFKaVKOjTJ5HLAkvHrDnvx1xA3JObLc7JGntv1NT54Y5U/+ocvWd54h7uTGdbpjGx+zFbhHWJvxmi4wHKPePU6xXw6or6bw3JdRNekuJ7hvfff5Gff+y7TtRKJqs4e73PWeUYQCcw6AkIc88WTH+E4Hb71/l/lcnzB8eFzUvpNbt3TkSUbQgHPX2AkNXLFBOPhhPffu0+3O+DV6wckU2lSyTIPPv8JmqZRrW5jLfpsZ3VarTZRcEi5XCaT+iYrq3n6zSnTXpNW02Zjx2S4cHCWIRu1CvV8lhcvT7hzu8Zs2cd1bRJGmqXdoDfo0x0X2bt7Eztss3TmxKGBrqWp3UpwcnKBgAqCR7pYIZHOoYoGsrlPzlR48vxHpLMpRv0R6yt1iqUKk6nH0lHY2Mixv99FiI5JFfK88ZbBJz//mNb+Fr/+27/Hn/7ohxjJAkVP4uXhE2SyvPfGN+mMvuCi41HJl1jbrdAcd/BjiWw1wWePPseZFzDVCoVUBaIeZy9neP4IUVpjOoNELgOBRLWSYjGHYa/PpNekaXU57p1DmGZ1U2VstXlTfI9iMubk85+gazYT+yVOKmIkxiQnA+IY1qqb2H6fQslksZToDubEiwmHlxP4nb9kQdlsNhgN89zafRMCjecvHnPz9gY//lEXPwWr9Q0KtXW6wwGZnEokLcnnV5AEk0w6gaa4XLZ6tHqv0XMq+WrIxMpTzuc4aUwpZhU8T0XydUytj6xApbiGNVnSHfRI5bJE4QLfUxgPPSQ5Ymdzg/1nfRaTEarp4YsilycXRILCchITxTqGotI4u6S8XubjnzVY25GwFl0qhVtIqkKmFDM6jRB0BctxWc4fE8k61VqZxXRMv9+gnC+gp1QkPcAelon8JKNxk/G4z8zxMVMZ5vMlRkrh9LiDkfTx7Bm6ISDqKt22RSoOGUxnFLMrhKHPi/1TTDXN82eHFIopoihDqaoyHMzRTAVR1tBNneOTIxJaGkU3yK6CokVMhwIbWwaqqePYMYg2speiO1iyseswHEpksmleXPyM//Lv2Tx78gmb10wqlWsc7ne5dfsGjz8/4ubtdXqjJ7heSDadJhD6KJpKq9skX8iSS62zcIbY1pzaWgpFyhIyorIl4zoqSrSJn7L5+OFP6XQ6oKQpmik6zQVaZZfOoEk60aGQVmk6Nu/uTfEvRnSaDn8ezfjsIKYbOfzoRz8iCAWiUGE6Ctm4ptPtTFCkVRZuh3ZjgpGoIis97t1Zw/STdC6nFJIr9OIOi9EqrnvKwjLp986w4hHJqEpCE9DUkKdfjNGMJGHkEaFx+nqJsKxTrXscHIzJF0VGkyaxqGIqIsWUjC9PidUl2ZRMIVOk0e+jhCJaPYszFRDCgMidkitfZzrsUSyqmAmVbn+MmUnRn1nkVBklYXB6+ZLtjXXWN/Y4OzVZLNq4fh8/7qKoMksL/trf+FUuLvocnLwibsyRZZMgiAAJ1IBs5hqvjn6IoiyRZY0f/PBfkUyXQW4QBD4IGnHsIAkqcQCyqjMeLyjmHVxfBDFGFUSEKKTd6/PO23fYu6by0yc/plpZ4eXJCUEIqaRN0pCYL0Q8wSMQJ1y2ZdYLORKpgPtvvstseIkoJPj617/Kp9/9fRQhgSeEEItEUfiLmRFFAZIkIYpXXdSzicV0ZBMtEyjpNPVqivPzcxzPp1DWCZwE48EMghhrGkDgMxtecv3GdZbHHoaaY2o3sBYu2ULAy1fPeeudGwgY1FZruE6TV69esb6VRdEziEIRQZ4wdy7wlgEr9TpJ7RaKuqBa15lbArN5go3dDU5PGpTraVRJpz84ZGalefOtW0RxQBSXqFdrCFGBs8bhL15fbaXKb//6N/nv/+D3+eyTn/Htr/8Oc7uNJA1Zq3yDl69sCmmT5UhgMn+GKtaZW1NMuUlaMxlOmkhKk3TmLQx5l1JOYzSZY8+XFPIVDENHjDLM53Mcb4QoKVhnLoUVCUFTkMhwfDmm33T49gd/hUXU5ux8yMb2XfaPHyIkzjk8CWkaHVLlAo5l0ur9Kfm1X2Vt4x6TuUd/3CQry+w//wIhXEdPR5RKZT7/uMd6PYEQ2wiSg6wpjKanmKkbBLHH5dkxhpihfX5JJmvweP8HvLn7NS7Oelzfu0UoLnnj3Q3yhTTvu0viaMlq7Rbf/+6/RFUSnB73IEwyn8cMeguGowVLy0M301c4o0C9Wj3HHqKoIAoavgujAUTeKqsrm1w2+qTSWY6fvyKUErS7FvlCEt9LAiGRwNXaXXAQZAgF4UsSRQpJhDByefjZK77+nV/n8eOHJNMCsiwjcCUor1bfMUTxL/ye6VwFST3gWeMRebNGMbXGlEPiSCaOIsLQZ7ZYEBYjzk/a7G3tMpzbPHr8F2wUtvBsge5UpLKa47T5jM9f/4R6qc5qZY233r2HKjU4vzxG0xWCeMDnj05xXI1kOoFeKLAp3LpC8hWr2IMz9Ow6k4SI6RhEskdJTNA9GWDmQ/6r7/4Bd+98h51tgY8/fk25tsU/+fEB/vaE7Zs6HgrLvoXXuUQO+vjzMaqS5eGzB/xK5bfRxBXWN7foTZqglWlN9tEkE8lIIPsCupfm5madV2czLs5P2Nq6Se7dMkEQUSuXePnyFZ6/IAhtwtBlOBmzW72DFEksRhF7O7f45MFnbO+k6Q9P8TyFr35lndlsxNlJ+8u0fcx85pDNXuF+nMWApTeiWhWRBeiPzlmr7aCpSQajLt/+5rc4f/mck5MLEqs6nt2iltsmbZYQtmNkaU4iUSKbXSWRbDAYnFNevUUiWaLVPkeOm9Tru0TenFarQzq1ynh2ymI6oZCtkTQmTPpjNDVFrNnUizcp5Ets71o8fX5ChTX6wz7L2MKaXNA4h7VamWx6BTmbhPSYk5cLzk4b7Ozdpjv9lPPLF1Ty26hqlkGzTTVfRIohFNocngx474OvcNF+hoDIWn2Tk6MLdq5vcX7SIIwjXN9hPAy4cz9Jo+nx9ZU3OXn2iqhmkNE3MeMl/nJKfxYg5VL0Axlr2mF4ZrO1tcL56QVS0WTaHnD31hbHL/tUKlV+41e/wtnLj7l169a/rUz8t095r1Q20aUCb9x/H9NM8s6bv0yzMaJeLZM0FSRRJ/RivFGFlFrFsWzcMKYzGHPabOB6ZaqrK5xcTFkuVcq5t9mtbOK4JgklxNQypLMukhIwHllkEglOjp8ynA2wgxSnzWPmIxGNq47TrbUVLg6buM4l1VoRkQTWQkDRciynDiklTeiFLG2fciWHovQRRYtIUkjIBRr9YzrTc3702c+R1TnL8ZyJvWQq9XGkc5bOgmarSyaToVjIYV/4iNMEorfEGy+4vlIho1RZX9PQxSymXmBpxaSSOnEkYiQVPB9mE5diLotju0hikZklctnrM1kGLEOVy67LaCpzdjxAlDTMZAZRTGIt59juHEHTmDJjMhtjzRTKxU1KtRyLxRyRq3q2Yj4FsgCCzMGLLrPxBFFQWV/ZwvPHrG3oVIvbzMcOv/c/+RvMBh53b+2QUJMkFYl3rv8qurBJXrtOWkkz7gRYC5P98xaBqpGtlWgNRzSHZ6TSGpOORWx7LManjAcXzJdz0jWdlapErValkE8yXXSQJRD1Tc6GoEoC+VDG7nbojg1enPu8Pu3RnV6yf3hAtpTDKKyQScpMp1N8ISSd0ZhbArFgkcJm0e5w8LzDwlXYuLaB7MJ2apXhrIfnZxEiG1F0wDYZWkuQDWRDYtS1CGOYjG1arUt++L1XHJye0+m6+H5IsIwJfAd3biPLKm4kMuh55NUqwTJAk5PElkpSv0pfZ0oGakUjXdogJkRJRsyCOZ1Jj1hSkLUsxxcTnr0+YepGHDV7tNttNFkn+eW6TZRCMpk6qVQJaznn/GQfe9nl9s06+UwR4hhN1/B9yGVLHJ98im9n0dV1Hjz5GdlSgdF4TDKTxHL7yGJMFBvEokwUBcTRklhSWHo+nr8gjGIm9pgoEHFnCwp1n8vLCd5SQdJlwhgib0Y095BcD6dvIVngWRH5Wpn62jV6c5vm+AUfvPUO19e3KdazuDL4gYCqKEiygCDGhOFVQlcUVaLoCiEUxhb1ep1Wq8vdm9/h/0vbnwXZlqbnediz5rX2PO/cOY8n88xDTaequrq7egLQAEHCECyQhEmTEi3bCodtORRy2LKvFHZYEZLCNmmHJcumZDIEmqIwkWgADTTQ3dU115mHzDw5T3se19prHnyRhYIdDkfgAt4Rf+S6yIx9s/L/3//7vvd5/42/8+/T7JySqAmRqJHPLCHrMeNozNgKqM7PEKVc1KxKs9OnPlvB84ZU0jO4ps3ZkQ2ChCHV6bV3iEODQc8lk5aY9AP6/YBETLj/xi+g6xqptESrNeaDD3+bZ08fYrsJXtiiaGh0Lh6SkQSEKKHVmnLrxjtoWkAxewU/CBiOTxmMhuzu7KPr+lfr9GzAWfOM97/982CYfPzwIwpaiVwmi2mPKJauICkwMs+4e/s2kmhz98b7rKy/RnMyIFfPoAsrDE5tFDHF/vE2mbSAFGkIiUKrvYekRmj5Cc/3f4ztaiytX+Ho1e+wSIf3N97gN77/N/j5t/4e7dYxS5U32FqpsmDEmOYpfpzQKKRxehbDlsfR4T71ylVODy949OlDQtthZWkJaxIxHaZxgmOILnj8tMOtjfdoaFXKaolp4NC7eEil1ED0QE8qRIlEoHmMXB/HGxEEQ3w5JBF0ZCnLH/3gYyZth5ePTjhtnXGy7xEmOm+/X+Ctb73Lv/lv/33eeX+F/+n/6j3+o3/0b/Hv/Hvf5W//929QyV3OK4bClCQSkRKFJBaQdBfHBlUt8M4793CnA1x3wKuDc26/ucbu9hTHDpHUPEgJSCKqJqKoMoKgYOhgm/alaUyI8D2bVFbm409e8Ec/+hm+bKGnVgjjAL50dkdRRJIIiLKA78HcfJlu8xAvylFIZSiWqvTNMQvVRTTNwDJtEsD3m5zvPwADWlMP+/AT9NM9TAeqixVm5j1OTp+yNL/J7Y1vcvPG61hBB9M6w/J97t3+PltXXsN2e2xuLqBqHhenO5wcHJPW5+h323QGF4zDMqZpUlRTPH7+OROzS+jbbFy/iiwUyIgGqxs1uid90hkYjFt865tXCeggSJBRIrykzts/9zW2Xt8iElXSlYBSxeAPfvBfMA1baIUqO7tdBM/FH1sUMyUaN2YYqT5JTaZr7pNJa2SrOvu7n4AlctLc5uHjI25c3cAdjQldgbnZZYQg4OB0h50jiziwqEgN7t/7Ht7UYGt+i2IqYTKZXCY2yQGaEqMJAnIyIPJs8kURZ9qipKVpFOdQ1Rlu3X0dQ1KJ3ITZjSV2Dl6RKs9y9eZNaoVFWi2RaaQT+BOivo87sXn1cIfm2Svap03q5TxffPwMe9Ljar5GMClSKNVxA5d284x+8xQSDS9WKRaLVMoGimKQTefJyhpEUx4++5ipo3LlyjXKeZCiFMWMQiETY4Uj9i9e8bOP/oyfffoxQiSRz6VIVBFFL1PO1biyscXuwYfYgxPsqMfDB/tszNd57eoN3rh/lQdPfkpGM8iqBRqb+xjlR1jWHhNzn1p5Fj/yyVcVgqBKQV+hbwfUb2+yMrdOo9GgUKxenqVOj2cvnmMNHWQjg6aqnJ+f4vsuupVAogA5IiPidHjKJ4+e0I1LPOv0/+oFpTlMaLW3efTit0mkNk+ePmUwGiHrCelMDdeWLlV7ElIuzCAJOcJgQrEoIErw0ZOf8OD5F9y8c5Ura7doHjuc7DU5PHiMIkbsH+2SMdYwxxmWlpYoFivMzjSYry8Sey4aea5urlymk5QLHB41KZQKlMsL9PsO6WKDvcMvEOI01nTIzvE2n798ykm3RaFeRquG1Bdz4FXpTWwCLWSaWKxsVGmfBzgq9MM+niBzdDZm/+CIqxuvc3RwyOzSBb/6Gxv4vo3nyDQWZ3E8mE5NfCtFtz+i1+uQ1jU836VSniWJNFwnIiGi1zshDmQ0RQdhgoSGN5Ug9kiikMm4Q4TH8UEfc+BjmSaKnAZRQJMllKhMrV5k6vQQFR8jrZEIEuOJz/lFn/NmD02L0FSBtJFCIKCYnWc0CDk43kbTigzGHQaDEc9efMbrb8+ysbGGbbvcuf0Wd26+T33ORpJjJFHjyuY6oeciiT7d5gXjfg838Bj2xgxaNhm5TE5ZZTKx8AUXKZXi1tXr3L33DlPXQZMy3LvxPSRBpzyTYqZQBTPPZ4c+Z1KZvhRxZKbpaTPs9boIgYQ1Fun1OhipIrO1G0RBionZo1QSEQWVtnVBKAWcd1o8fP6Cz5+fka7W6HYmiLaMIipkjRS6WkTQFVwCxmOPo/NDGkt5BCUklSqhpwTieEqjtIYq6JAomN6ITH6RbKnGZGpjuxGTscPO7jFJmGLUd7GmMPVtHM9mOpWJApUwGHNx/grfigmdkIyeJaeXmQx6zNYKCJHK9rMjphOT44s+v//DH/HJZ59TzM/QaluMJz5jy2Qw6fP8cMrBxZjRoIqsSvhBgGU6qKrA4FggSbKkMyKdlksup5IkCVEcEPsFVLGBKP05+/GysoJ0eTD7gYvnO1iWSxB4OI6N6RwytJ6B2GJrfRU5CVhZULj/xk2WFlbxfBnT6ZOEKnOVWaKpQycyqS02ePD5mIEt0Bp0OdlpcmV+lsB3iaLoyxmyy8NYkqSvnkVRJLBlWic+/87/6D/k7/39f4v/5B/+b7A9G1FMyGQiTKeDqmlUqmkKxTTmuE86m1CtFQgSk0xBJRYlhqZNOlNjZq6CIEc8efkJUrzO1IS0vkCtvE4hXyVjzNMftXi+/Zy9o22K1TxXl1/jO9/9OTqTc4rZCuVKlvxskULmCk4yxYm7lEolxm1YKr/Pq1dPEeMsYlKm7XRQ8ypxYn+1lhcy/PgP/4DWjs2vff/voak9VhbvktHzfP7px0ThhDi20KQ83abNYHzA/vGnnJ3tUSzMcHI8QKKKgEIU+sS+RG80xE7OaPaeomlZLGuKgMfVjdeo5hcQ1RHf3lqhFHVo/vg/Y+7it3lv6SVvlBK2hId8eyUimLi0Oh7lShZBXqPUKEA45NbGfd67/99mYg45PPuCQkXGiyak9DJLizNEjsInH1h4oxDHbYEE6WzEzEyPnmtRV1ao5FbZO9ihMz1mr/0pkqyRjl8jm6wyPGwyHR6hSDk2b8xSqkXkFwWurzcQ9CKh2SMKa/T2xhzsNYkiONkeY09EsnqGe28ZIHtEoYSAgiQnBIlHlIAipxhPeojJJUez1xswP7NErZxheeYqjz/qIYsKvu8Thj6u6zO1PAQhQlVlZClNELpoao44EhGSDCmtQLc9ZtTTWJq/SrdtI4pfBmDEfFldh4QYhJhMNsXZaQtr4lLKLHJ6+ArPcpkMA1zHwTAyiILKldLbXHvvbZrdAcNWh2/+d/7n/Jv/43+PrHOEa5ocH32MOwn59V/+X/Mbv/4/4eEXu3S7HVSljKrKvNp/gmVN8WyFne0DpvaYxmKKQtXi2as/pVDJksQqiubwfOcRulpkcf4qpjVk9+SETr+P43iszC8zPG4j+BrvvvcdyoUa46aPLmkc7h8xdabE+h5/8MkfsN93UKMCulpmaXmW9fUrTCYhb91/h/XNRXwsMvksZ+19zIHJ1BwxnUKhkkbRFM47Z7QmZ5x620i6SK4aMDR17t69Sy5OOD/cJtIqzC5tESUtxpHAB9v7nHQvWF5eJt+Y48JzGQ1OcawRu68eMhqNcD2TXt/k6PgAzxW5tXAPLcoRTiPEMKKUqWHZPYKkRfvsiI2NBSxvQrG2xsLKMtev3kCXJTrnGrnqHFM3YuvmMqEZsLT1BnYUkC7P8O7XlqF0wutv1Dk+e8DJ+Yir95ZZv1Pnys1VMkURx/GYDkOmUwHTNTGMOQ5f7VHKiTgmzMzW2Ttp4XpjoiBFrIbsHG9z0drDD7pYQZuzfgfT87h79ybPn3+APUlTrs+xcWsLvTDPN77+bfrmIefnGqqu4k49rm+t4JshK6vw6vkLommafDaL7/ogjVGkOuXyHJY9pjZbBMGg306QJJl0Oo0oZXn67BXdwYRyPo8QO/jelFJVxHNUFDlDKpuhUCrx0Scf0uycEYY6hCUk0cAx/b96QTk0dynkK2Tk60jxPJ4bYzsxtqugpSqoUo3dlx1q9SK5QpZSqUan3/uKgaSqNpGtYg1NXNOj1X5BuuKRK80ytHVS2SKTqUU651GfrXFx3iKJY0LPoZgqktE1nj/dZmf7mLPmNoeHDlPHZWI5vNo/5ORoj6X6a0wHY3LZNPXlEnotg6sHPD56zm/9zjN6oxFu8pSVKyqRFXP8bMSzR0c8PzknEmwkWWTQDQm9OlGS5ri7i6hl+PGfevzoRw84eOGRzZe4GO5hiq8QtCGNxZh8WUQSRSYTC1WVGU/65As6jdkKIhKRd4nV7Xf6hJ4BQkI+92UebxKiqClEMUMUReQLKdJpHUWS0eQ01XIDVRBIkhBZiTk53Wc86eOHIY4bkUqlCYIAZyqiCDrOdIIiw87LXY4Oz2m1xxwdm/T7EaFg0u7tkjEWePzZOUvLDexpyD//vf+AduuQXmuM64+Zb8wy6rsknkBOmaWUyaGHBoacATni1d6IzuScbL5BuVymWmwwbIVMpyArOqtL11hbuk4YR7x4/in2sM/K4gbNgc04ydCKKnxx4hKoU4qZNUYTif5oH0HqMzu/wHgYMDNTo1xcRPBnmYxCRq5NJI8ZDRwmox6KLNIbjDjo7uAZHg+efkp/0qM/kBFFnTBKGA5HBLaMKueZjgfEicOrvRa1RoFiXqdcMMjlYjJ5GavXZthskhKzxL7IaDxmOPUxA4G2OUbTHFwLfE/CNE18L8H2PFBslLRAKqWzv3tKEstEjLA9E0VOkS+k0fUU7YlLZzAkm82z9+qcTttF0vPImk5v0CWXK+LboCkCSWwQhBGCnGBaU84HRzzf2aPZdrHdCeNhgGuJ3Lj2Gl98coxtg/DnIlK8NMdczvolhGGA57k4jgmJQfuixfrmDNc23mX3RZvmSZe8UaCSyhFPNQSxjyCJrKwsMb9YQiJADSWOdp7hWoesr6bZP3zK7GKWQt2n2FCJEhVRkC8DpBPxK2Ebx5epMqIo4noT/s7f/h/yjW98k//Zv/sPcP0e6VQO3xOZWiF9d4SWzlPKzNMoSrx1ZwVD1ehNd4kyfVqtEY35KnLKwvFNVMOiWJbJ5GAaNJmbXaZWrjG1QhA1kHyGkxFn7WcUc4tMhj4Xwz2arWNeu/MGnXGTTx7+mNZFh9PeEbatc9EckskGTKxjMqkUmhEzdnq0eybpqIRtvcKNd75aDx6ckM6mUNIOhy8S7l2/z8TZwbUU7t3bwrbbFDLztJtdzs4uKOZnqBdv0usO8MMhI+uA7uCUufkK40mXRGrTH57RaU0plSrUZ0qcnrQ5OxnQ6fT47Ivf50//6AGnVoEXikLmzVX+6E8/4A9/+FtsDz7ih7t/wA9+8Pu8PD8nTvJ0d0OMtEuuOMPmyk3Ozl6w+2LA7Zv3EYQsrd6Yk4ttEgLmGjNEjkDgpri2eZVnL19gixYL69dJhgGrK8ucdI+wJy6VUh0xfUG/JzCetFhe0biytoCm5pDSIXsHTylmMuy/fEwmriL5eRKpx3m3TRyuoQltbPcQRYVh75xeb4/x8IjWWUC5miJOEkQR4kT4Et0T4gcRekrhow8/o9u3qDSWqDQqtJrn+FaB06MxWtrFcy9TdCRRR0DFdX2iKMGe+iSxiOc5XybtXJp+UhmVF48uWJxdYmV5Edf2UBQNEP8/iAUQY5pj5mo3qVVLvHiyixjnIJaJ44gw8i/B5sBR/yU7H+4xU6mSEqf8k//rf4rY+DnWrl5l/+Bzbm78MvO1O0Rumt/5g/+Ebv+Q4+MeklAga6wwddpMpsdY0zGl0jJvv/GLlAtrPH16Tr26ga6l6U+OOW29IJepc9J8RUCfer2BbGg8+OwzRNIoYZ6DJ3s0O7v87o9+k48/+QDDL+BYA4IkZBxaqN4837/3NQxDI9A0Ou0xn372AU9fvELQbX7rd3/GxAnZvH6LYq3A05fPePTpJ5QKRX74Zz9EUeuIQhXXTnPnnbewQ5/Y14hcib2THUauzI03b5PNSHjTCXNLs9h9l8hyWJmfp2zU2H7ygNFJl/dW3mJpLgeByExxDc8F1wvRU2lkFTrdY168OGPn6IwXh2eEks1PP/8TXFFEkGaoFOYYThzG9pSXBz/j4mxCY2aJai1HQJ8w0Snksjx9+Zh+ElLN55F8k9SrQwq2yPr6N3nrap5vLUl8bzVC92M6HZkf/uCnCJZKPV/h29/611jbnEVPZ7GmHvNzi7x8vM9gsMfjh9tIioAkguMMGY4clpc2uXV9DXM4QYhEAl9kPLE46Rxy1gxYWJ3j5bOnWB2HzeUcp/svyepVUoWQVqfD8+fbFFOzTKwmj54c0Wte4/7X38GLA9568xsUihnu3FygUY8JnSGJkMMLHdAu+MEf/gGtizGK7uCFIoXyDLP1WQylwKg/oH3mo2kBjXmDTruPpPgsr14hm8sxMbsUKhkmY49CofBXLyh7gy7d4TGd7gDPTZCNLjOzCY4z5ej0EYnexQ1jYjHHy50zDs4u0FN1hiOZ0/MxuWKBwbBPMVfHtqbkszm8aQMlVSAza7KyUaff8+n0fI73YzL5GoN+gKxqxImHoZWpVzbQUgLZzAyzS0WsaYAbdtncWsEKdfqOQ9t6geeHEMQ4owvahwfEk8th771nJq+em5wcuCiBTNWoUktf4erW1znvjuhPeiwszxGFHoWiTOv8Ansgk5Yb1Arf4mvfuMXPffe/xXvv/wKxArVaDV0rcvetRbauXxLtJVHH9wTiGKLYxXGm6LqOZqQQZRnfi4AYPaMSRDFGKofnicTxFFlVkJQMhXyVWrFGKVPg3q0bLCxWSBtZZFllPHIJg0uYaej7TMwhpXKeKIoZ9KdUSjWUuIBvJ5jWgHyuhhvY9HoWjpnBt7McHpySLnWJhVNkWcTQ8yhihpu3GyzOzSIKEVevFslqBoo8ICXVkaMU+bzEeBiwfqNAFGZwvAGd8wApiXEch/39R2i6yOHRCSdnL4m8BCnMoUkGY+uQfCpHKb2ArlVYXK7RyBbZmq2ydX0VQo1irszOducykWeyT6d7wWh0ysJyhoX5Cu1uSNfr0Xd7/Oyjn7K/84hyKs2gd4qSLfD86JThdMrZQQ/BiVlfvIYUZ9h7ckIlN4uiBFy5eosgEfnokw+JooD5uWWmVoBeUMgV66TUBcSoiGcnpMUs1fQcSqAyMn0ifBw3RtFCBLWFKCboYoVKKYesqSyszaCm0lhOQLt/xmAyJEEkSGxULY3juFiWzavdQ6Z2wO6rPU4u2kymMa3zAyyzT6t9znBiEiUeMR6xAFYUsbp+nWfPX9LtW4iyQK8l0b7wSGUNTGv01ayiIFy+e0kifIk7iYiiACF2SMIp49GQ+dkq8wtLrK5uMVNbuuQJhlXiqE+t3GBzc5ZsJoXnWhCpGLrKcuYK4Simc77D5mKD05Mjfvb4D3CEAX4Yf/Xdl8ac+KuD+KtDOYKj/QH/5//sPyCQztBSIr7vY5sOSSgSjSTOdn0UpYgbpRhOJVbXr/PWa99kY2EJSfHxfR/NuKwcSaIIScyg51OuZNjd3cWyWwwnB5RrErI2wfcDltbSaLk+iqQSRBOSxGbcEcllGpQbEpPJmGKliiKL1CsNiLPcvv0GJ50PabUmhJHP5tUsODIZeZFBN/pqSVpMvjRHd3BOqZEwNdPs7T+nmJ+n2TqiXi8gyh61uRhBjJCFGudnu/R7J5wetpDQcGyTXq/Hs0ctLk58zo+Pub5+j8kgod1uc/3GOo7n8mp/j1iI0dMl/nh7l4+et/ng0Qzf++/+L5hfuorqLTBtebTlPGv3N7h2q8Zy7R3m5maQtSqPXhxQmanz/OWf0GoOyeVnyBU1TLdJb/SSD3/yY4YXAf/+//I3aJ35vPnaOwxHCSfnNu5xhLt9xvbZCd3ec0z7OZNRm5X5TXTVQFVzPNl5SGsyYdQVqaYFHGfI7OYm+VyGsR2xUb6Kpkv8+Cc/JGusU8usU86tcvvuG/SbfVRFoFxYY3G5jmkN0QwVAQXfi5HkP4+7FGl2QrKVCs/39zhte3zj21/jv/ntHzC2EiIxj6xKREmIpIgkYoAsGRgpic3rRRw7IJ1RCUIHRJcEH12Dzz95RLs14uWLB5d//2W605+/v5J06fTOZDIo6SHbu18QRQm18gLlYomzswNkWSZJYmQxYeL0SZVnkbUcajaLkXf4nd/6x9z9+vcQyfHowSuKhSV+8Gf/O/aPP+bK1gKv3b0UdZEnM1O9xurSHQqFHAgOL14+4/ioxdL8LerVEsWKQGNmmUqlxOJaETewkJUIQWsTmQE3b11HxKeYFxmOWxSrM+gpjbVr86hlqFUWCHtj5vMzfPPtv4bTNXDHIwwxplw2mJ9bpT5T4un+v0DOdEnlE37ywU8ZDmLe/tp7lPMFbt98h7//P/i7/MmffUSzfU5tXuT43Mc8T7CcEZqRZ66e5eWLI2KjQX7mNrV8jU8++Qn37n+freu3OTt9SiFf4hd/5a9TqlSRfInW8YTQCdjauEEiOHixx+L6DCmjTLV4izPnFKEWkJ/PMnUmNGYWkVSJs/YZru3wbO8VghxgmS2GvR4Pnv2Qn3z8Kbfv3eD84iGmPSSJVdaqq3zy6AvKjXWS7JTOywfs/fB3+c//o/8KoRcwlx2TSmR8J+C9r7/J3/xbf4vbb98lkTIsLb4OwMHxS4xMls0bW2zeqlGqD4nDCNt2mU5NehcRG8s3sIcp5uubbK4uknbg7eurHL78HHsyxTLbhJGDZmQ4PW2j+iFyDKLs0h1csH5llpe7L0nl60S6z8gZ8vhBxGiY4pPPLlmqrVaL3oVP67TLy1c/YefVK1xbY211CS8Y8fLFHrOzc4Q4fPzpT3n6/BlT2yQIXKJQ4MXTU2QxYffFIZ5vkUQiE3PAk+c/YThpkk5n/+oF5cQ7oW+9xEmOuWjtMBgf0OocQiSjy9AcXCBqeU4vppy2u+ycfsbLw0ectJoMrCnHTYFXJy1enT3l937vR1imS6v7Oednx3hmnsE45K1332R5eZnD4xadYZeJN+TF3iMSfUq+uMJF65Sl1UVMC0Rlgh9YEAsc7D8nCA5RjAlJUmeh/hqvbd3nylyDd+6+SSat8+aNt/mVX7vBzWu36FkCjw/3cTWXcnaeTDLAaifoQQWzPSVvpPn4z55zc+tbpFI6RtZmYlkgTvkXv/t/4/MvPkKTqxClyaRL2FOfMDYpltJkczqpVArXdfE9h6yRQ1V1ghAkFYxcSK22TBzmL7NiEwE7cFG1hJWNZcb+EYF4SqEAumrje6fkSyKT0YDpaEK5UEcWVVzbJiFiMhkwHdtUSllkSSCtG5QrOUrlNKomoBkykixyfn7OyGwzHNt89NFHZPNpRgMVzYCFxTqv3XmbciXLdJwhUY7xbZNvf+M9SsYGmxs1YiOi07FZWZohsUMGF10Er0S1XCQWZErVAr1Wm9FgQn884ZOHf8ZsfYaZSoMgGZPVbRw3zVlnn87pIeN2iygWUAvnCMGIb37te+jiEpFgMRk7lLJXcdwJ5UINa2Tgdpqk3DopBGqlHCQBiZDCTTRmVzaYrV9DUvOUZ0RKqTzrjQYLs8s4wZQrW3Pgalzd3CSVtak0Snz/V36J1SsNptM+S4sl5pfmSadFgmByyRETBAxDIA4dGvVZMsUqgeijplQq1Vl0LYcsiohCwqBl4VhTPDvENidoskbipzAUnTC0UIQUhUyaUqGI5wbMLy0SCzHHZ6e0OkM8V2H3qMdB02XnosPEDkhiCc+NUEQN/ALPX+7QmF0hly3iuxKyovH42XNarQuMtPzVAXj58BfVFSFJEJKIyEvwrBgJiYPTx/zW7/9X+NgousZw6KCkHSxTZX/vGVLkIwQS+XSZ5cVN3DAiJMCcpPCsIl4QE4YqzkQncKfk0vKX82bJV/uFKIqXKIsvP4aR5o//+I85az0kX8gw7Hq0LobkczWSKMZxJTq9Y5oX+8iJTk6bwzZ9dnf2qNXWWJ6/THHSpBSFrI4WN2id9JmfqTMeDui2zxmPx6yubBCFCUeHbRRBYzos0OuMmJhTVmc2CSebJFLIxdFj5KBKpE4oZWpoKYFCeQbbl/nk4w7tnsny8iJaLBB0dfIzebSsQb8tf7Vmq4sIwRQ5BlmQOT47QpIC+qN9iuUU44lNGKpMnQluMGT/4JTNzQ08L2Cutsm1tW8ghAVyORVBHhPFNlev3ePJzp8ymvTwvJBX+w+wpk3eu/9NvnP/7zJTz3Fr7Qqzhs6Dz1/xL/50wMcHEXO33uPIS+NRoH8h8Pj5h6zcy9EfqiCJXH9tnmq9Qr1hcHK6RyoTcnFxQevQoJxbZWXhNreuvc6Lzw8JApPmXo/GTBrf9AhinRkpxTuv/xLN1gW+JzFo6xSqLqoh8uzpOZ2zDBtra9y8/R7Xt+oUMhmghqZmwZ2iSSJ+CHeu30XSVklrPrl0lleHfeJkyFx9iUr2Bq/f30KU4kvyRZx8KQyFS8SQptDttbhodjG9Qz76/LfxIotPfrZ3SROQZYLQQRASgvBSZJkTh+WlEr/2G68TBh6+HyGL6S8zy2UURcNxQk4PLMqFOmEQoCgSkvTnFzKBwA8vK++RSLO9z8bGbfK5ErZ3wXA4YHHhGoqqgJggIpLPVKleWUCRXcZjj0gX2X7yE9odjW9/769RnU9h+S0OTnfRtDxhPCSTVnn25CmHZx+STVepltcIgoBqrcj5+Qnzi0UKlYDdvYfs7u5hjhQsd0xnsM/83DKeIzNoZdhc3aQzOiNIbGRdpL5aRpDHmN0Bs9m30MSYSA/5lb/+t3jn9mtUZxRmNysUijWGLQfXicmmGxSKGTxHJghdAq+Mkc5cwubFDPdu3eGjz3+f9kWab33jOzRmq/iTOZZm3+I3/vW/y+L6FXrjKW/dvM/3v1OgO/wRhxcPaKyWSAY2P/rRj8jm57nz+hU+f/mn/OCPfoooZ4hLCndu3SOKTDqdDrdvvEe1ukl/MMLIRTQ7z7h6bQ3XyqImKmVdxG/JzJYXCMMzTg7OKRRUwsgibdQxchNGo8Fllvtoyq3XrlDQMmSLJca+wxtvv0l7YKFW5/jtn+3y6aHP3De+y++eC/yjn5jsNiVuX7vCb/0//yX/+J/83/nD3/1D/svf+j/xf/iH/ymjwZA4cfj86eeYkUUqncWLDrh1e5a7r3+b2fk32Nxa5tmLz2ibRzw9+H2OD08ozaY5OWuiUeLN19OEbp/FxhL2tINhZCkWy4i6ycbGGu+/9+uoco7N9Q1mqxtkjWWmQwchUkgZIsvLy8wvrxIlGS66DrXZK6wu3GJzY4N6rUSpVEDXU8wvVDk7veDiyGJhZRktpWIYNYLQw55GzDWqBG7ElZVNRv0L+t0+GWOOSrWMH/XY3jn8SwvKv7TLWxHnyGUj3KDN0ekxq8vXuX3jLZ4+vOD44JTa4hymNaDTO6M7OKdYjej2BtiOTtxL2N4+QhRimhc2jYUKERq9QYRqdDGPTTxPx/N9BoMm5ZkyrfaAWBjR6zisry2SLR0z8dp4pwGylJAr1Dg5aJLWC7zz9dfoN6d88fwZy+sFLprPSYIGhCUePXtJtuCRVme5aJ0xjQeMbdi6tkLkqQhyyGg4YX0+RSFbhUgBwefOjSUSMSBd1oAyjx8/Q1ZtljeKDEyLOBpSrld5tX9AkrIolOd58fwVc/MVwijCc0VSRp5ISpHKJpx1jhATEIUyrhPQH43JZCVEJSQMfFS9zM7uPq4/5NrmEr3hiMXGAufNM8LY4/q1NU5OLgh8FTe4hANrWhpVkpFEjdG4y7XrG6QNhWc7n1IozqCmGownbaJQIZNWiWKPVCrFdGozmjj0Oibj8ZDV5U2ePNljbXOGjWsxjx5ZXFm7zrWrmxDpaBmXv/nrd/nB7/835PJpDg+7pLMi7771LV6++IIwEmi2z4n8NL6l0e62qJZVps0OfjymVouJR2WG7imFWo3JdEQuG6EVJY7PHUraiPPzV1jjMUvrJbyRhJZOYbsqf/07v8r/9j/+36PndFbqEXmjDFaGUe+ApbUqvVYLVcwzaPdIKQlrS/NM5AHXr73O88NdGosNYikkGNi82ulSblSxbYf+wKaQVzCnEwajLJJsoogBttOhWpmhNr/BZDqm3TvFTcqkFIHxMKbWKKJrWVqtCZPJgHwxxvNkCoJKqaQx6k2JHQ0hDJmOJiBElOdyJJ6LoijECPRHQ2zPJq1niSOBMEyww5hY6HB0bpO2Y0RBx/cgkX2SOEZRMpfmCbtLtbBOu/cIxAR/CIGf+QrCLCAjSSJCEhKG/mVmspAgSx5BIJIofZaulBkM+qTzBS72XiBKEZqsEIRTdLnB4asm9dkiQZCwvfOUdKrAbuspS/k7XLu6zvOTh5hegirIbK1dZe/JNmnjL+C3SZIQfykq/7zaEyciO6+eUFrKMu7IeFaCphURkxRR3CSaTFBkneFIxrMOGfnn5MoKGdnAbpmMuwGBNyQJ8xzuvWJ5PsXG0ha9zil6SsCaBNRuVRmNRgyGAdliSLPZpOiXuLH2Tc5OTokiDS09ZnNlg1Y7RRB3CaOQTnuEKw/IqinOzz9mufR1FmrrOP4eaV1AjeY43H6JoMgUCn+RaxsEDoViEVkTaLZGrK6uIaCTSZcZjbu8OvqQnLFAKb9OKeeSMxKGvZhf/ZVf5aMP/5hifp7XbnyTn3z0e7zx+ptkcym2jz/H9qdksBmMzrm1+TXESOWLDx4zbr/k67/8dZ4//IRi/Ra/9hu3yEkGp8cJx/tdNPUKs3NXcTybVm/E072HaIGHKqVo9i3C+JSYkF/+pb/O3t4uhhXwzlt32Xl+wObKXW58t8SPfnzA4mKZP9v+fZRmmbX11/jiwQ5iqsbBy6cEXoFCbo0Pf3rB2lWLgCFXV7/Gynyd3uEx2dkVZOo0D56QLgf0+he8vbzF73z0hKWNBro6oT1+Ska1OD44IF+fJSDkvDnAvPhDBClE01QEdBAdEAKSWANiND1m3Pco50WOjyN+4f2/iSR4tJsd8tkMUdAFqYAYK8RJgKaqJHHA6nqD+YU0VzZXaF70UFWVBBcEmSgGSPjZj18yv5RHEMHznC/5kwmCIKKqGmZsUigUiVyNbCHLvddu0+rsUMjWuT+/Qfyv/glRGKDoOqVani8++Ji1tTkULcbvd5GNmJ9+/luErk6QDMmn6hQKZRKhjWvOMxRe4YU96nN38AOHFy+/wLJcGjMa128tUpuVeL7zmKmVYqFYxwmfMRr3KKpvUq9scHpygRtM8EO4eed1xt0R2wevmLg2K0s1Qi1k0mvhiTqpFJwcTrCn+/RebhMHFge7Ld66eR8tb3O4f8bcXJ1CapXm+QEjfcRbb95n2B9BouD0XTav3OCkuYPT8ymXFnHdEa+2H+HM5Dk/m5DNxXzx9COqVYGpFbJQvYY38FlevoZWMAgmh1ycjVlevoboOmREj74d8aK9z9bVG/RGHUrlKoVCjoefDfn5n/sG3fQ5vt1haSZEU3QKmkbX3mN3O8Xq2g3y2RRCLDGwBlxMHHKywFxlg+9/Z40gDDk+O+Paa7donhzw4MEeV5c3KRQNnh2dULx7g3p1lrE1ZRolaNkqi1eKNEdPufHGNSzfYrd7TrUyy9tvrDDq9xAVCzWtctLq8cUXfdJyzHj5OftHP2P1WobVKzoTs8/X7rmMmvMMj7JMIo22f0gxO4OcWqWQ1tjb+xzPnjLtm9TnVoniLI8e9kHeZ3DRpViSyWVmiL2A+kKVo+YD7lz7RWTd5sXuAyrVLIYO2XTAoD9BUxQ0TaDb7pHNVDg5O6HVHLN5fZYgvBxL040cqpYBbExriOeIBIGDphQRBBtVE9nfO6A+s0jM/x9mKEuFMs1zEzVa48bmd2lUb/F7/+pfMnZfkC+UiOI+veEemuqT1vN0ziUujkVGfYu93WPkRELG4Oigw9FJm4ePdzk4OWYwdnECFynb5KPP/4xOe8RodE6r1QR0bt64zcnZIY+efsB45DIc9DDNMc+ebqNqRdS0Qacb0Ro6IKlMzDa6lscwioxGJmnZYS47g++d8OHHzzEnCY05l8mpS7/d5NnOMwaORX1xFdMJWV66SzFbo1TUmK0scvjMQYtm0Q2JIDQ43feIbHDaJfptC9eLiAKdiWlSKV8iEwgUEl+l153QbDYx7S5zcw1yuQyuk+CHY3KFS66ZIAaomoGYHyGoDtE0xcNPznBjl7PBMaVKmVxaJQhdvMAindHRDQXLsphOXdK5NGESIYkQRi79UZ9MJkO1UkORMqQ0nVzW4fU3r1IrXQNRYm4lRbvfQ9F0FMVgMDxH0sf0+ucEsc/WxnXOzl/Qau1xZatB62JI+/SI9fV5DEND13JUZtI8ePwB9tRk+9k2qbTC1Zt3eLG7hzmaMhqaHJ91iSWDwE9hFAp8573v4AwkGisBpVKBg1cjxpOQtFqm3xpQqyW4tkgmq9Du7LO18RYf/PSPLtu25gQx1FGUCk92XrJ+dxUpY+DYGazpMU5wSkqJiVwZrSQyCSyCYEw2JZMt5Zjf0mnMa/j2lHG3T6/f5uXuOVMvIApTTGMFJ9Cozi6yvLaK7cactyakMzWKxVlm65tUZ+p0+z2anWMyuZhcsYQf6tiujRcG2I6AYogkgv0VSFnXDCZjH8e2OT6/wIpCWuMBqpYml6ogxSKxbyGTELsirjkmcH1czyEmIE5E+pMhQiyip0MsU8DyL4gFEccfY04cJDF16aoWZERRQhQlQIBEIIlikigmilW8cEqukMYLNMwpPHrUwpqkkMUc7XMLQoM4UMkVsrTOPRy7A7GPTMJicYWikieyfHxbxtCLlweuBGpa+8qI8+dQ8/j/re0NIIgGrgenhx6d80vIeqNWpds5ZGVlBdPMYo6gVtFISxJamGF98R47L7p0xjvY0y6L83WUuMrNrdfRNYn52WUmgwRFqDNXeQN3ql3OsXkaQqhz7/o7hP6E6ciiUV8lX90giCacHr+gVClydjbENTukq2PsSURgScxVtmhU51BlFVWeZ37uLfSiwK9++28zk5/h6vrdr1YsTukM27QuTKZWD9fpoSVV9nefEvpQK9xiMBhRr2e5trlFLi3SupgwHRu40wRVVLl19Trvvf1twkDCnOgcvjpEDAtk0wWq5QpiUmJ96V1mauu8842bPHl2QjDVmE6nfPHwJ4ShiZyWCBG4tXgFUdAwUgpxYFHOLNI9dcloaWRRZKlxlzdf+wWSwMAcRtTzt9HEiNmFkKfPP+XJgwEoJsf726ytLLNYv8uPPvqndB2HYEZEEkZc2Vrh6++8y/UrNU5fCNy48k26o2Oev3jClZU5JG/IycE5lblFFHUWkgLn3X1GkzbhuINtR8w0RHaOz8gV6jh2l3v33yRbTKMrPpmMSKGYwnMjVFVFEBNIJGT58sIkKxH98zQzmXdQEp1PPjiiN3ARJRDjNFEgEiWXhjDPETFSEnOLWdLpNPVZBdM0iRMXQUiIYpcoCkhndQ4PO5yfjEgSvmxx/8UMsCBctr49z+P2zZs8fPAp9jRgaf42xWKR0bhFGEQosk4URQybIxYX8gx6DtdubFAsbnJl/nUUygwGe0R2ikef73Dn7lUsu4+aNgkjgWK5yEXrFa3eUxKlS7VaZjqd0m5f8PnHezRKt5ht1BAFGdPqMbUcQpqM7W38wEWWQwQ54MEnH2AOfHLFDd577+cRZZehfczAamGZh+SMAsfmOY9Pn/KTP/kBjdQSf//v/gryzPTSzDQdYw5disYqv/y9f8CNazewJy4zMwXW5kp4pk2vPaLd3qOQT6Mb6S9RPBIT+5xSPsXiXI2ji22e7PQZWwLmxGXvxR6vxi948OQlP/zxD1BTMq4XUqkv8upoDzmyKNfrFCtZSuUsw+kxdrDPxtYMspIwtk7Z2dtDM1LIukq2vIKo1SnXlqnN1Pj44xc4Ix93nLC0OI9jaczP1dl9sceTx4/Z23/J8WGXMFKZqxZ4cvgCdxSwtHYLIchysr+LhMy1pRmWizkqhTy7212KhSxC4JFL+4SexXDY4vikSacZglelmM+yvnIFPZXj2ZM2Syt5zs6e8eGH+xweOjz9vI07KvL9v/YtSpJOI5/j/GKPwxObpatbpMozqMYqgRSzffqMUBpSrnmIScI7X98gnTZ4ufMZIg6ra9e5ffObmO4ZpxfPaFSWyRlZhu0pKiqVSoWL5jEnx2OyRZVm7yV6Wubm3XUaM6uXsbVZgWrdIFtI8HwQk1Vqs1nO2z1sJ6BYzlMoaly5soVMlrQR/X8Lwv8fn790hXL7xUdEoUgptYKacXnw6GeUCnMUqwKnJ+cYWYuUXKbXjXBsEAKD2I2IBZdKYYahbWLbNkKYwfbHlIopwiBicDHGSZv03REz5TrZTA5Z1MjqOYKpymg4ZdR3ib1VXn99ncdPPqVSKnE2vcCNDnCGOT779AWVGZH5VZWjvQG5dEJMgqTnmE7OkeIyklPmtbtjei2P8/2IahGGXRG71yWja0R2jDNWCCcPqJQNDk4O0LMy3/1r77H98pAoiihV0vhTge7pENv3SDyFdDomtEVKs2Wa5ydksyKuGxKEAfVqnuMTkyRWmfQSZE2hXE8QyZLPFxlM2pyedsnlU+jFFFIMqzcapLPgSxOa7Rau6yF7Au3xGTONKuPxFHM6QdE0+oMRbjAkZWjk1VV2t0+pNVRUXWdij9nba5JNp7l1t0Ts5ND0AVEgkS8Dss2o1aOQukIcihQqCkmYQ5Qd8FPUK3WePHyGorWQdYORe0QcJWyubVLLzPHFox8DMUZBQEglNGornJ9NyBeraEJMLiPjSynG1pTQcslni7x6dIQYdXB6E5ZmZyjrMygZiUalwNj28aY+6RIIyIyHLoguve4Fd1/fYGy6xLLM/umAtSuLpONZtj99yexMHS2TI5ba1HOLzCzMIebW+fzjn2EIVRJ2uXLlNqYpcvDqKYETkJZrSLFAb2Qi6CpG2kQPBZyRwbgb8Vn3CxJZ58qtN1F1jclkTGIoZIw8HbtDd9hnrlYmNAMqBR1RFWlPx4ycMaqioaehUE7R6TQp5OapVmuMJx30dIZQllHSaS5afWoZGUM3MM1zPFIoiUG5nCc5HSOIl7NckgBx6DAcXyCRQ9E8hiMT21Qp1bIIkY7ZTZDkyxadKEpfmmKErwRdGIYkMkS2RhAHtDpNpr6NKqoguJhDiUJOZjya4rljypV5+uOXEK/QqCfY4wkZQWUwfM6zp1Mqy2U8NyZKDPpWCz2EgARZ/gtXtyAICF9G1sVxTCJYeH5Mr+WiSjK6KtHtdFhcmuHiyKJcaDCzIBCJHmJa49rmLTrnI379X//bPH7+I0RhwKBnkysW+No33uDksMWTx8/YuraEoNg41iG7uyK64ZHLzVLOXqd5esbWyjfw7CG+OmQ4ajN1JozHBn74kttXv8Oz3U949WKPYnYes9dD8NMYdptEH1Ksb/D8+JDAcUE+YOPaHXrDi6/2xErlFucnbTqtc5bnV+iOthk5XdRolsSXkeUQRZF4+uRjivoaohCTK4W4U5cbm++QMTI8fPA5I2uI5U3pj7f5pW/8Ko5v8vDZI47PRvzk4vf4B/+9v8fK5puUi2WGow8pLM5RzBV5sfOM8/YZZ/0JhSsxrttAC4YMJybXlt6goN5gdrnA87PPyOd0js5fMLA1UuIWpVyaxcYCtuPw7MEnZPMyB+cPiN0J1VwGc2Axvuhw5+Y38SyTuaVl2ufbNMpv88Pf/22uL91GMXR2nj5nNA2oFVYIxSIxE7wQHM9GUUZcnXmDSeRRqpmksiU2btfoHJwh5wPq9SWOTzuc9A+REoMrC99jr/NDKrUsJ8dc4qeSy0tKEE3J6hm6I5uHj5/y7tcXEI0L/tlv/QAlnSNWpMuIRdG/NDDqYI9c1tYr3H5tjrOzE+7dr/PhT/YRhJggSlAUhSgEWVLpdaZ4nksqlSaKEpJEQBCkry5HSRJTKhXptI95941fpFBSeLn9jLRRIWtZiAKIIsiSAimfTK3IyasX7G0Dmo8giBRLYzoXLmmtyL17OQ4PdikVGnQ6XYLQJpeZYeKeIaQiRtYegeIzsTIU8/Pkc0ts7/8IVaiSKyhoch1VVoiihOdPTokSFzFOMzubw4hmsF0RJasxHA6xxmNKSzOc7o3IVWrExJRK8/yNb3+bSuEaE8ekM9nn9tIS2vUU+VSGo8NDFuZXcKdDht0mmXQRcyhyfHjAzatXedT+HF1VsacTztsd1taqBNEp7X2F4lLAzm6Tt7a+ycsXT7h+4x7Pd/eRazLNkyO2rt+hVL7NuOsjRxq1+gJPdn/CuG9yZ+Eq5XrA2dmEcn2WleUa7XYXa9pnNDQplld45713+eEPf8hJ6wLTv2AutUwwCri6vMbpxSE3rr3G8dljhCiDbYtU51Yox0OUI4EHTz7hztqb1JbmsJwhlfIcn3z4p3z8+RH/7r/7b3O4/4iDziGzjTscnu0zP7OAoJsszS7SvhhjuT0GXYvllTW8uE37dMj9dzfZfnHK/MIWsnzGauUW075PIqaIbZlG8Q69izY/+dOPqaWWeP3W91lbOePg6IztxzvkM1XMYZ+1K7eQBIvTkyNETUASFPr9hEdPDrlx4zUqMw6n+7sUs1dYXavTHyWMpxMcx6A+X6NvdVDdAjONRRBlBqMmKb2IOe3RqFc4OTmiWprFC3p89tkDllcaKHKKVFYhETXyFdg72CGVMSgWlpmp13n16hWS8JeuO/7lBeX3vvtrXJy1GfRajEyT9atzTEYKK3NX0MIzHu1+wvpGFWiRMiJiV6SQAd3YxHT6pAwJ2wpJ5x0UOUMx36Db6iNpU4RYBjuHXNMZTjwyisbJ0QH1hQbb+9vcuXmXKMzQHbaYW1zFdce8ef97vHy5je3YyLLMRV9h4k8J7CnNVp9IDnn9/irNE5FPv3h6GUeYFXFDj5yRxh06GJFGbmEOTRBwxiGykGFs+pz0d7k4GyJoBVx7n3a7ja74MJonikWm+ORmKnieh6GoKJLMyd7FZTatpmPJI3L5HLIMczMFAFr2mEY6x0WzRVqzkEIJL7GYLefJhGncFy7ZsoE17mPGIRIBYaTSvIgoFQOW1xtYXZ9iqkBKkynlC+wdHWJkDAo5kWFzQrmWZTjoE6sK3rjP/MISiRPROhPp9D9ldfUKldIMB9uvkGWHWuUammwyslpMvBqufUS9OoOcFHG9Mn3rnOXlLK7rMxpfICoyprfO0dkx73zze3z89DG94Utu3Fjj0aMHFKozLK9nkLw0giyzOpPjgz/7DCdyqFWbhGmNfK7MyURCjlX6zgnfemsVwfbADEmkPKVCnr2DQ+JMwsHZAY21WZrdKbVGDsuBTAQLs3N8/MEjZtcKyDmLfm/Mna3XOGy3yHohZ5/sMhqNyC/MslS5zdH+GaEUYsegZos4noWapJibr9LpnKEFDgF1FpcbuILPeBgxHIzZfbmLY1qMrC7lNLgyZJQs8+V5StkcyzWNsWfhjiR0TDxHRJElJqZFjEq1ukxOT2F1LxhaUyRJYqZcIJRKnPTHyFkf0wmwfRFBcJA0gZGtY7gQhZexb2EQgphnOLDIpFRsOyJtGOTTEYmtEQUaIQmyKBOEIarI5SyZF1waZMKQRIAoEYkSk3xGIafkiL02UydgonRZmF1m99kes40iulqmPzzi2tYbZNN1DnYf8PqN+9hhh9YgxJFjrEl0KRAjHS3rI6ZCBEf+MqIu+dKZG5MIMZJ0yfNTJRVv6jK/uEqh4vHJT464e+86YiyROD3CYEzkG6iawrU71xlNhohRxMnRDkkQk8/PMjdbxbEEAkdCEGWuXn2T/ngbezolm5qj3+mwtjGH73oEU4GjV/vI7gwLKyphd8y5+YqZK3WK3m2ePf8ZFd1lPTXHg4FFIqfIpbNYnkdtIceTpyaK3kIRxxRKVZ7vHnNbk6kUil/tiX4k4RJRXp4nV1E5Py4QeVNWbudw7DH9sUsxNc+w10TRxmwt3SSZgpSInDVPWV2eJ/JGTEavGAchxZl1FlZe549/8LtUjbe49l6W9niXfueUxbm3kZmyvlUkJVQ5n3S4uf42P/7wn7E5u0KqU0ZPC5QrBVxRIJwGzOXzWKNjbizdpd++YHbxCue9Zwwmz7h77R1kyeVnH/8OGwtzFGslOn3Q8zUCOyQ/u8z68hK7xw+wogrJWCKJV4iVEUp2luP+PnOlGlqcJ+106Y+38a1FklChPepRr83hJRat1nPGYY70ZAbjTsLO0yYZY47FQh497XB+YVIoruIQEKfyLCzeoFTqcbQTkhg2gS+iiBGylMLHRRQundyxdMpv/td/QvdUpJiukUQBseihiTmceIxMHeIxy+s5jlpP8G2Bd++/z3+R/hmhD6Ik4/kgyQkIEMU2oV+8NAAJEZKoARDEAaASRiGFqoynjpGGGs9fnTMYd5mplNGdCEkRESUFwhhnIHJ8fMzitZsM2j3iZMgwbRIeijh6i8TKUSnXSYRTxHiJUtbDnLZZWFhi99WY6Sgkm5snnc3huTHtdof9/QNyeZ18NcvRUZvIT7FxdQ1JFAndhHx2jma7w8Nnu+SMAoRpInuIH8L+SYd33ngf9eoZi/NFjveeMpvT2G8e8PziT7g4HnBz6z6TeEoqzqKk2wSOSb97TiDKzC6uEjs68/U8WujiYjK1ZVbWN5DDNJnCkMiLcL0QMesTTAusN94klD3GnkW33SOjG1RqKxweHjK2dxFYJZMtIxsT/uzjH5LLzWEJAw6aB3TNFPkZjTC2OWsecXo05vXXKtx57QbPtw/5zX/2T2mUy4ycJnMrdS7OnuMGMoWSwtjs0+2eUm3M8+Txc9Ya8+y9PEXMGhiFWZZ0j0AewziNRpZnT3cI4iLf+maDILTJ5isYE5PxoEO1UqfZ6zAcnGFIC6SUHJHk4BkCVzbz5Iv3eHHY4vx0wPriHXKFNhlnQOIpFHOblLM5xpM+mpajWEkxaHU5sT5iaN6iuqAzO1OhM+xQzhS4cmUW1w9JZJnz0x4oKfzAZTxxuPPWbfJ6HtEuIjEgjE1eHQwRtAEXp5fVdiMlI6GRyBOkJEvzYoeZ2gaeP8UPppxdtBAlaHY6rKwsk8pUsd0+RgpSmYhhX6XdP2Fr4zZiXCGXD9k7fMrEcllZWflLC8q/tPT0TYWluTd56/73uX7123imRiGTpdPpMo2mLM3NIGJSKkY4lkNjpkQ2J1NtaCi6gec5NObyiIJGbSaNbYORMfADg2y+wcLaGpGo0xm1MJ0xWlrEj4ak9BKnR13OWzsMhxM+/+QlklBgb38b0+pxcX5EEgfIUY9Ja4RnylTL87TaI/75P/sRMQYb15dI5fI4Y4WUPIuaK1NbXuLa5hYZtYDjpEmnGozMDietF1iWRa02w6u9FxwdHUCgQWLQ7/bwPZPGTJV264zRsAsInJ/2GE9sZEnHsUMcS2I8CGm2ekRigmyoSHh4oUdDr5H4Mb44pJrWkdMGQkOgsJgwuzxD4lcYNT0MCuBGNOYgny/S65u0/Q6tcYfl9TUuzDHLW0VW54sU8hXuvFElDKYIUoFydoHNG7eo1UqYwRQ/9PCFPK1Oh/3DJ5QKBovlu2hylkJpmSS8ytQUyZbSTOOA88FzmoNDjHSEG/d4tPMhVuyQyaR4/NkOT5895sNPH3DeO0Okim9HZLIlZE8kr+TR0x7ZtMynP/gjNuoZvv7+NUIvQRdSPPz0gHqxhBY1WMwWMAchpmOQKCpnF+ecn7bonfWp6WUEK8bqTFEjkbMXY/ZfPWA07fPi5JgoFzFxRCRvloKSwnclXj5/zPbLL5haMaVSheGohTmGlFpEUfKUU2nSsU08HaATokUJQiQxDBJc08byO5i9C6Z9n1q6SFEIKedSzOUbeHYe1S4S9FX2D854uvOS49MLWq0RA2eEY0psrCyiKwkrc5t4doLrjhgM2yRJmsbMArXqLEmo0m5fkE7nGI9sxmYbYgFRMOh3PWzbxPMnxPFlyxpifFvCizX6PYuMUmTSE+iPTSZOiISOgUaSXPL24iTB932QxC8rhZct57QfI/oBuiYyHk5xRxGiIzHqOOy82iNOJAK/hB/EpPQqjuVwuP85tVkJLRcxGUmcn7nUKousrC5iGCp6ZoiuSmRzlzNukiR81R4UBOESI4SIIEiIUkwUJlRLVYr5NHdeW2XQnVKtZ7iyOcetGxukU7A8t8LhwR612iyrK5uYzgW5fBFDzTHsTxDUCwbDDl4wYGi/xMgF3HvtayzU7/KNb97n3t3bHL60efxZkzs33iKfKyEkBrnUGkuVd7C7Vfb3XjI7s4FmpEA2MQyHRukKgj1LvVDHGnaRcdHiOcb9PuNewNfff496YwMjXf9qdQd9FDFFtZCmc+qwurTM62/8HKXqPNuHJ6xdWWIw7BJFadLiEnmjSqUc0+0/Y7a+xbMX21h2k6XZVWqpHGUMzo9PSGXS3HtjmUIly831FeaKeQoZi0SJONm9IFu/QSadx5fTvP7+m3R7OlsbswgMKBY3CCcesqyi1SRm1pZ4+637yLJFpZzn9rW3makZZEsSvbbJW69/m1JljV47RFF1IsnB9yLmGzc4O25SSs+xvnifD376IZKQpawVub5SJqtpqEqa1eUVbm3dIi1q6KkcKUGnUNARJI/BqQXVHI3lMlv3F+kdtKnPlBibHYbWCQeHu9y6t0Ci9hiMThhbx9imQTan4HoTZCl12eoW+ApJJSQivXafuZklzH4ZEp0osUikGEHMECUyiqIgKCPC0OPW7avEocre3gGFQsLsXIkkFkiIkIRLAL8gSIQhCIKApl7OyPq+T5JESNJlp0DTdU5PT+mdWJTn6xSLOb7xi+uYXkjzokecQBQEOEGIWOrSWJjHUDXm5jPkcjX0XEwkWwzOdURZ4+HzfTT5GtVSicloB8/r0+o4pLUGitBgOJjQarY5PxmTyUlcu1Wm2TrBm6S4/9rb3Lm3RPtiRFpvsL7yJr3+BEkSMFIqlmVy6+4inV4XSUxRKqcYjfsszM1y3G5hFOqMPZOXrz7nxfNXpDWJk8NPOXz1KaIT4FpTMlWXRJOYW5hn0O9g2uecNy8wlYTeeEBFMjjYeUkul2MyMHGDFOvL7+D6fbK5FEoq5MGjx2xeu06j0UBE5vGDJ/z8L/wahp4jSQSm7pBXr85IZ0Qk0SCllwmTAdOpiaqk6bZtasW73Ll7nWazzdTMcf/tTVY3quwfbCMkWbKpq9RqW2zdnL/kMasZer0BTx40+c77v44fWSiaD4KHGBUJfZ/Osc15c4qmF1iby7G1WSRbzPLwgxdYZofR8HIO8YvPP6GcXWFj6W3Sepq5hQKEKe69q9JznzAaK8zV57CHNuWqxtFpTCeqst86xBrFlMs1lteuoUkyxYLN2D6ntlzGCVuMhy12nxyTkxXiyGPQT+hdHHK01+H+u28iixKnvTbnOwlZG84OTKyJg2xMaCxpjMYdPDfCDxS+9vVvYmTS6OkaSaSTykqks2kG/QBNT1GrLKCnbaZWwI2tZU6PjjFHA2ZnirjWmN7pFAmPnKHhBUMm7gHbO88QBYN0Ok2/P/irF5QBFq8OnvPjj/6ELx5/iOcNiWOL/ZN9IlmApEjg5KlXNkjrdTQ9gxclHLfOMAopVFXG8xVkNUNvYDIyxxRqIYqRQpAUeq1DIg/8qUAYxhRKDSIqTEMJkx6qofJq75y7d96k2zWZOgPGoxGVSoU4cckoacRIJQnT9Noh477MdKwzNRM+/3SbyahDabbMwAzotMectU55tPuU806L8eSUZueEMPGRVYVMusBk7OBYCYZq4PsmkQ8IMaKQ0Gt18SybWrnMoNdnbNqkU1lcx8d3PRQlIIwmqJrA1J3ihDYaeXQUREMmr5eYDkIq9XlwLXpun1IuixC55PNj7t+9Sac5AiHAyKmMIxNrGqIEZfS8wLPDHZzQxo7BN2SMQhbVyJHLp1GzLpbdojUY8Gr3DMuLOGs3iQWVSi2NYmRJlIhspsTN2yuMzIhCMctsQ0GVaqRSBXR9lvn5Bkl0aXbI5LKk9SVsW2QwblFr1JhYIzTDp1RX8SKLVNZhacPAyBSZJqAVY27e2+L621WsbJvKgoGQHFIvGdza2mLp1jp3Xn8NIc7i2QKSmkJSJLqdEe40YdKfMuyatFsjSqUKpbkZRLGA4OeZDj2KmRwJLkfnx5iewEXzhFymiIiGKIookkq/Y9JqjajV6oiKzGDYBSHhysYNfAesiY2qCLi2yVRwON0/JhWJ5DMZYllFMEJyBYGFhQUqjSyirlKsZFioL0GYQ0npJGLApNtGVkKOj08JfYXAh/nZZUQMRFEmTGzShoQgBownbUY9D1lQCDyXxCsiChJJLJDSi9huBKKAKApEUQgJeJaA0xcRkwIT10LQbSI3TRLEWJYFpFBU+at5RUVRLsHnUXSpSUkYiRZqMcPYceiOLJS0gZIGRVSQfIn5+UXG0zMCuYmulvCmIiQJ46HH05e7KFqObEGhNmcgKxG5vE4UOSwvrZGS5omi6CtsEPDVcxILCEjIikAYRGTSRWbnGpSKWTa2yviezfJqnatbFVbmrzGeDNm6nqZU8+j0OuTzDfzIQtVE7t15l3KpTmf4CMcOqdVnGVkdFCmHF56iygpSUuVv/s1fZ+mKSzqVYXYhhWt7LKwVyRg1pMjk3t27mBObmCylwiZ5bZEQk5n5CqvrK9Qq1xmML7DCc5Blxu4Fn37xAZ89eIjjK18tz49YXa6QV8uIsY+igmCoHD4+oLXT4/nH22TCJcpSlsnpEXtHh1RqG6TTDQbWGe98axM5bXDRdBmMbcb2GamCQrPTRrRl9CSglMlgeyl+8ic/5h//w/8Seyzz/NHHhE2Hm9cW+NrWL/La3dfIzS5y5+1v8MMH/zlW30aRakiSzPvX3yJ0bN5677uYnSZ5tcHN63+D2eoVssUCM42bnB4M0IsqU7fFuB/zve9+kzBqct7aQUuNmbovmZ2p8a2vv0emkpAxMqTyOmu1VTJWiuW5FSI5gy100WWPK1sb3Ln1C1zfnOf0+AuaZ4/oTENGvs/xwYiUXqRQKCDLGXrDfTq9fRIx5LzzHC/exw26yJJKHPuX73GYIMnRpeEMicnAZ7axgtnPkyBhpC9/yqqEoFroRhrPVqjVK1TrPtOpw8bGBk9ffsH8WgnXD9A0FZKQJIxIwghR0L6CmQdBcClKBQExiUmSiDgOEUWR7rDN/vk2rhxwcvyYqdVHNYokEQRegCjJnDeHPPj0JY4pMh76GKkCI3OEPfVZXFwllymTycGw32MymTA7s4JhGOipLIuL82ysr7O5/hb2NOa11+8Re1XODgU21m8wN59nf/eUw51T1pZXODt9xYsXL9jcuIPnTwmtGrlMjVanSaO+xniQMOq5xHHIyek+tiUQRzm+ePgpXjSlUd9AkRq4toQ7zvMnP/pjnu0eMLbyKIbA51/8EDyNfDaHkSqhKHXy+WVKuSXqjWXMqY+WyIy6Fo3iIo3qJopUJEpiBK3PBx/9iGdPOsiqz9071zk7P6Dbb5MuhFSqWaq1IpIsMDL3yBYEFhcXKZXmcRwHe+rhBmf87Gc/I/BjHr/4A8YdhWtXb7G2usXsosZF+xmi4tMbmty+e5M7d15HMWxEEZqtE7LZebauXSHyDVJGkUq2RKoik63KNFtPCbwitqkSuhAmHURSzNaWsC2Lxdl1Hj35CE2Hd9+7xWho4oTQb0Y090WOD15SqRnkKnU6PYvTsxeIQkK9UkbTDGRJpbN/jN0UWZu/SaM8i2clzBZUlFBhbf0aWiFie+cpL54dsnplAyke8ceffcDI74A1IsxOmX/7LqKUsLG1xPJGneF4jGHUcSyNdEbmxfZzptOE1mCXqX/B2dkZC3ObtDsXuE5EFAtUSqsUiyUmQ4tvvP2L3L7+Bo8/6bC28C56KiDwHSrVAp4boigR1eIK5jji2rUtJNH4SwvKv/wM5dEuipzBsh1su8P9N9/myaNXFKtldncuMM0eQiSh6SKCIDK+GGCHIaEIZr+JpinoWoap02MynqJrAtXKOtWiztHREdm0hjW2cKYaE8nDcoZ4UQpJTRCELMdHLXSlTH94gSB5CImONUmwzBZZo4QbKRiZPMQCo4lJGAdUKjXazR5T02Y4sGj2BCbuAAkPEwnEHPl8Ht9UcSITSVYREwV76hGHAdXiDOPhiEq5CEKEICZYloUg6iSBBDFMp1MEUWI0GiMmOkk8RZBcRDGmWJhnMPYYjSZk0elOTebqZVY3N3n84Dlmf4ggTclHOs5IQC57NBZVOq0OuUKRWI0RkyKaHOAah8yVa7THZ1RqCzijDKPRBZ22x+ryCD8SiVUB3wqo1ebIV+vsOLvM1WcwRwboCYOeh5iTEVJ19s62SVISki4xm1/i4HhEuVrm6LhNLmeQJC7ptE6/P8QehCj08R2PaWSRyCK+I9Pt9JHtiFojx6SV5qXd5cpNg9HpMRm5wttvf5cPHz5gEidcqRlMPIVb30/QqxP+5R/8JrOVHBNHIHIcYtvAnpq0pxZpPY+fCPiihB8IfP78CYqsYchlkiRhaI/QUyUmdgdVTLA9mThwEOIEcxKyOFfh4HiXKNBptg9IaeAmMr6k03E9xucXZLNF+oMOYhJTEXO4gku+lEGQZKrZLHvtbazIp66V0A3I6yk8e8hbb9xgb++cbEln7LWwoxhFk2lUcww7NqGd4DKkkJpFStdwHQs/HDOeDGi3ehSLVVJpBVnWscyQmUaObKZItze8vElHMpmMQkJCnICEhKb55FMKIQ6uJ1CrZ0hCBSM9YdQ1QFFIkhCIEQQJVTOwpkPCKEQOfcI4QIokAneCXonJ5vOMnQ6CGKCpOVKKwnAyBCmDZXrouZB/8Pf+Dj/+8e9weNKmVKtRyqexkjJjc4qiKly9uszkiwBVjciVfMLDy++WJIE4Dv9CXAqXxoY4SpAU6LRMCpUYy5owP1+iUKiiSzpx4lGaEdALCtmiwMHpFyh6iXQ+Rb46iyL6PHrykKU1gVpDwxoYdNptMlqdk5NnFPMFTo4eMnV6ZLW5S7OQoXF48YAkKDFxpsiqw9byG0RuRKGcoj34gvffeofuwKW8kqNWznBxfkIS+vzSL3+boTUgOK0jZLsogk+rtc2Hn3S+2hOtSZ/QqeNORGTZ5eOd51QWtpjL13jjjfcZ93VkvU2ltMLbb99hJIa0+mOQVCQBnKHOYuNd+toAeaJwetrl5PkF3/r5n6cs1ukPbIYHPu++821migLrG3e5vjCDMZshNxU4PnyBb+S4sliE8YBOaPCdr/88RjTH8d4RdvuA7ZHLsHPB6soG6dsGz7b3ubV2lV7rgmIS0Nv9gkY1xWKjwe9sb3Nn6RqPP3zIYfecxY0lfvrJTwmcOvdv30fyhuw+f8zG3F2y5RJ2NGVubo1XB89p5HQKGQN5onO685hT8zGSJJNEKTp9i7Lcp5yfI45tvDhC1QROz4asX5vhwYMjikWFRJjg+EMULUDVNCQZfD9GklQEMSAMJBTRwHV9Xm4/oXnmoGspglBCllIguIhf4n6I0ywtyxycfErrQuL6tQX2z55z7dYV/vhfHRGGPqIUw5eIS0lUsD0bQ9UQuEReBYEDgKZphKHJ4uIit9++gyJ6fP7kcxqZOTaWrtA6+hzHmZJO55FlCT3KkxIlnj55wY1bNxkOQwpFg3pRYe+kjxUeXtICRJGRNUCINbY2vkUml+WnP/kRuUwd251y9+499nYvmJtZQTFGDAcWoiDj2EOWlpbY2z2kWKqTzYFlmlhjh8pyQn0mjTXxKBSzjCeHvHX/HmkjRasVU84kdFo9Nq+sMbEtTi/OSEll0mmNfHEFJS/T6fQopausztVJxykkQWapMcPu3jbpQo3uYIieqpNPaeiBzExeZyaT4/nej9FVnUJeZzjsk88uY+WOEOWYhcUVnu/8hJnSPbKpr2OoAr6dQUEmkzJoVOf49KMPEZQiy8tLCGLCytoMZ6fHVMs1pqbIm2/f4fhwF/cwZqaxQBS42G6fTP4m9rjJBz/7CbP5a/zy3/gb/PGf/ujSKGVIDE2fTE7hpPmQhfocdnzGzouPePfWdzBjh4gI33FoLLxLPiuQK9o8fnzO4vwyt2/VkFWJzx8/QE3HrKwvYTbbWF2VuXqG3e3POD464/37/wabmwtYjkVoF2ksSLzYO0SILDA8Wq0TSoMOrryBsJ7FHcLwcBuhXGDr7utIss3T7UP0pMpS2SCTl1l68wbD/gWHT5+TSff5k5+eoBl1SlWFUBiTLxVxgj4X7RMGPZf55RqzsyV6bZ/nL3ZZ36phOyNiZD774hNef2uD5pHLRW+b/vgcX2xxcjbi2z//c/zT3/y/0LMSlmobNJtNjJSJrPXY2RtTLs3/pQXlX7pCedId8fDZPtvbF0wtlQdPDmmNxuwd73N0+Ir+yEHLpHB9gcF4yNQJ8BOTIJhiW0MESWQ06SORp1gsc2WzxHDQRpS6BNMYUa0hajFGLsb3Q+Q4hRyE5GQR1UvIZ2tousLO3itaF2POmy1UNYMzFRHVBDewcTyXILRRJRchsegPerixxcxKHSGdo9k+QvASwtC4hM6i0zrpYSU9ohDMyfRyI/ETKtU8mi4jixrORGRquaiqTCaXAUFgcWme6XRKKpWhWC6QzUq4tomh6QixhO/GmKMpmZROOqVQqGgkfky73efh44cYcowz9lheXufa2hqFmk6SO+FicoYvD7n21iy3r77BTEElhYmWmuF0ckEmraCFItnClGG7T1aMOT09Z+fohIHtkUoVmLg+p8dHeMmYQjGFmOSY2irW1GU8dXn6/AxUn08efsTTnZe8PHhKd+IjqQKZtESc+AwnU5T0ZfyjnAg0zwcoSoA59JCTPMXUOrogM2pOGA8cAnVAy7/gs5dPaMyWyGpZTg63sbojKkKZvL6EIM3y8mVA9zhmfm4Ox3XJlD0SLE6ODpBlsH0HT4wYOx6RCKKkkU9liJmSMUR0SUNAZTxxCH0JAQPXdZlaPnEUoKkiUyvAsXys6YDBuIc5sfEGU5xegCZWCDyVZrtDt9+hWMhiSAq3qhmmpkvXHdIeHJFKNApqEXs6xTZ94qKAkVc4OjohUST8cEpRqKNbNVJijqmvoeR1SAfkZnKMvSEnrQMSUcL10oSeiCylCIMY3/cZDDrUZ4rkCzpjs4We9pFUkOUcru8jiqDrlxGL1Zk8+bJOLhNTMCRSqsFFa4qd6JeGpLhHEASoqv5VBvGfVwxd1yUMQ/AMUH2WrhfJVDRs06WcnYVEZRJYIAoMxh00Q8XnlP/jP/oPsdwjZhYS2t1DxuYZlWqWSrHG6/feZ7a6ysriEu3eLoo+/BKkLiOIMoh/EbsYCzGiCFGoIIghR/st1tY2qNRS1MpXCKI++0f7nPdOmTgDGo2r2LaAFyasLF1F0yQ8t8dwNMEJhnS7FvY4j6w5FPMZlCTP/s4+oVvke9/5O1y7cRUnPkOS6tjeFIECWjrkqP2EiWUytccMrSaqJlPMrdBqj9lcn+Ps5JAPP/gZ+Bk0OaScbVDWlpgvrJKJK6RkiIIB2az31VparECosLzSQFWqrF25TjUS6HbbaLkVfv4XfpVSZQG5UuG874HdI52RURWBUmaW8+MTZHHC9c0tdMFgZa6K6Rzw4U//iMPjD9EzEv/qX33Ev/h//B5qInL9xg380GTnz/YZWmWavQtcu8PDn/6MV1884+yDT7AeTHn1eIeFxioLBYP97U9RCiVenuwz7Z6zVc5hT87RXR+r61PLVHh9cYVHHzzg25v3mVu6wsQ8ZSYvkPNyfH39F7iyUqZjPeBPP/sxc6UN3rj2GiVZ4PjsmEeHL0jLHvNGnkTKEKVtKmIBMYjIl2VS6TJ3X/8uWaPO9rPnzM2tIcsiw+GQVCqm2T4irVYopstMLQtDvMLifA0/MBFJgSgRJ+GleUaWSVAJI5unTx/T7tiksjlEERAjREkl4bKlHMcJN+9V0I2AmUaeXLbKlbU3mZ2roGgyICIJIr7nXMaEigmh733FT/V9/0uTm3g5h5wIlAsV0mKW3adD1nNX8OwCp71TxNhH0zUEMSYKA9JGmZnqIj//y9eozPeIhC7Zok+/3yaTLnPtxmtIFMhm61y9fgcvnNJq9nn+dA9RTNjYqqPpDlPLw9DTXLu5gqZpGHqWVveCza3r6FqR6cRjbWWZankG2/SZr80SBn0ENEoVA8dvYppjNDnN/uGzy5ANWUYWQRFn6LdtOs02ajbi5OyYidXGdiU6rYTZ+atYXkKuMUttbYUoHWMFLlEcEEkh08TE6l7AuM/GxiYxEt2RS0apoEoqSwurOJM0d259jcW1KqfnTXx7gZXFJYathObpmObFMVfWr1CvGxxs93jt3n0U1WY0GrMwt46s+niex9rqDF50zp/88ENG4wlKOuGiPWQ8nrCytMzDxz9GDGu8/+4vYLnH7B6ckMpkOTi84OnuLq8Omsiqh5oKOO22KMk1NK2PqX7Ks+cfc2W1TKGqMra67By+ZHdvm/WN66AMOWq+QNYlypUl8qV5MqpP+6xJz/sCN2ojWXOsz17l1e7ndLsJZidB1DTipMvinIET9JG0Ik/3TCpvLPOdX8qg9l+yvlzj+//aOm/Pj9kSX7JWPmBBOCddisnXVKK+i9m3MZQZsppBKBlMzIjiTEQiJsRxxPHpCaKYMFOb5crVOlJiYPaNy7hWMaBamWNuroFry6SzMBnbTKcme/svsUyXUlVCzuzz27/zX7PYeIN3779HIaeQUvPMzjRIpXRCt0S33/6rF5Tt3pTBwMVyRkxsh0fPXtAbd+n2LPL5PKLg4toOo55JSjOwJiH4OXBKZMVZdKWGomjkcjkW5mYIHIlcJkPgu1RqBpViijiEcrFANp0jm1VYWMqgyDKeHaDKEqN+D2IDWSliWxGOP0VRDXqDPqHn49gWoihiZLLk8mUUwyBVMnDFiASbSq2InoJiLoDERss6zMzp6GoKUfSpVquMRzZpI4skC3h2hO+H1OpFUkYOBAk78DCyCn7skM1mIZFJpVIUyyWqsxnSeYFiKU+1WmFpqUw2JUAAPbfHenUGJczj2waZnMTW7SVmaksMvBRELhATCipaBsRMhCjn8CObYi3LxDlHEh2EKMPMXEBWF1mcryKmbfxQ4Oi4ix8IKLKIOTWZDF2CUKJ93iZOPKbulDAqI0dZvGmb0C1iOy6oPk9OP+ViuscnD3eY2CadwZiu2WHvtMdwKjCzMIugZshqcxhRFUXMEgojwkDh7vX3yDDDxdkYLdH57I8mJJLG0O5gTltEQYgXTdg93iaKoSAs0nze52T7EN9z6DdjQjtNrpjDc11kWWYwGhBMI2InZNQfUEkXiDwZQ0njWCZxYGNaA1RFoVap4fsuqpJFjETyuTSeMyWTSdBVgVxhjoSIg945jhfRPxuTknNsNFb5+tU71ASdoqbRsUQu+hfIMbQtF1twyEoJaU1marWJzBDHFEkZBS7OT7FHIXo6hZJ1SBSfJBFotduIqszYdGj3Rti+h57W8aII13UhCQgjG0FQkBWwnTGjQUAQeIytMQkBMVOyORUQieMERZWxA5NiWUFPFZlbXMBx40sTgZswGE0R1BKypHzlTL0UlQEiCUl8iQ3quRblxQKSLjGxTK7euoHjRwRRRMooYk1d0kYB39Ww7C6pXIr9AwvbSlHMFdFSHrKUZmPlBqrk8+zpI2Q5pJxdY2FhAUGU4Mt2N0DMX8xSCoKALCukUzm6vSEvXz5FkmREMcXFxYiR1aTZ7qPpeUTZYH31PZbm7uD7PlGoEoUgijL337mBLOZACHDcC3qDl5QrMqtLK9Rm6uy8esb2zlNyhSJbW7fQMhHFmkKhVKHf76KlJMyJR6Veo1G7T5C02Dvdp9aoE9Hn9mub2EGf5fU19o5esrP/hHRaYNT36Jwk5NQSBS331eo1e/QHHaaOhZa6FPL5VIHXX7+FJJ6yvfuHNOZkCEyc6Yjm6YTdgxNW19aoL6k0+0c83X7OBx/+kJVGhXDUwYsn5KU8D3/2MU8fNnnte1vcubdA4sKHn/w+xdQsI/OI/dYfs/P8nA8+e4zlSqiKSbWu0UscBu0L/vlv/jYf/PSC9+98k8Ui+E6Tx3/2gPPzMYXUDJGaJqVlEBIJO5R5+877lKt1WgcvWJvL8c1v3aY3OqZUUbl3+woL5TXWK0vc3XqL0dkB5rnA1vIGfv+CjFKl43b57CcfcXXrl8hnG/iRwsneEXu72+w++5B8towrD2g1h5RLDcyxT7/fZ9A2MRQdopDEjTA0netXbyAIlwxVYgFVVYkFkTgOCXHI5kqk9U08P0CQAxATVFX+8v/q8h3M5ODnvvc9ArNGpVjh3XffvfwdMaZcThOGEUEIekrH912SJPpy5lj66jIGEATBZTKTIHL8/2Ltv55sy9PzTOxZ3mzv906fefKcPN6Ur+pqj24YckCQBDHkEMOYmNHoQhf6J3QhhSImZq50M4rgkBLJETAEicYQaHQ32nf5U3W8S++293t5q4ssNKUYRggRworYkZH3a32/9/d93/u8x6e8PLwPksvu2RPGVp/jkwFL9Q1UQSUNBRRVIwCqazpnnSf0x5/x3m/4NC/to2QkIn9OrzdnEXoEQsB4NKdZK2OYIoVqkc2tdZ4/mlKrriCIEaWKSbtzxrBvU6lLbF9ew/XmLKwR3/3u74Do0el0qFdzLDcb5MwSmlSk34lQhCaGmfDgwQdsLr+JLGWIwgxT+4yj0xesbxWollrkTQ3CmM11eP+9Bn/4T2q0cj9js3CI7HzI+ctdPv3kC5w0YNi1mA5sLGvKu2+/h5eEnM4shiMbPc4xHI/4xY8PmQxTVjdVXjx/xKun55TzLZrVNTrnRyBNqdbKbF9Z5eEXnzHouTRWMoRiH0OXUBUYz454+eox25tbBG5EIowplFT6owOEtMqlK2tMJmM67QGXtm7gBwtKxjq3rr/OT3/yS2RB595r64znZ/R6M4qFBlJaRlfynO8OKAt3mfcDvv3tb+POA46fBBjlfabuKZlsiaPjY2xLpdZo8uTpC+zgnKfPXjAaL9habXB9+y2IdS5dWuby1S2KuSKrjRobq1s4Yp/FPCKxUjJJgYxlkQmLnNnrnI4PWW3soR7+G05//hDduE48Uxl8csxk9BHb5i56LLN27U0sYc4kPuPTh58iqRXe+Du3GM3OeXX4KbIqsbFVYWrt0WkP6JydUikWyeZkclmDWrVC+6zLy+evsN0Okqjh2iKSbJMxDEhASgtYzlMkwaGcLzAdzDg7cmm0ijx8+gGBD/3BGdPZ8G8sKP/GI29ckVwmj6xJaJpGIioEgYfniyy8hEajzKg/J5vJk0opkWSTJAJJEpM1VRJsdNVkNp8gpDna7T5b2yZJlEVI8nQOFhhyATkGsyAxGMzZ359QrqySK+mcnR2jyFXENMGyh4xGIZlCQJJG1Aur+F6ELKugKiSihBssKBRbJPICUSqg2i7zcEwh04QgJo4cZF3BH4XIUYCW0XEci1ajThKLJPECTc1QqclY3gjdMEhEiSSKaA96ZE0NUVAwjAy9/jmN5hZOEBMmHqpkoioXwqhSbuI6EeNZRLwe8O5WnSe7I6R8hiTJ8vHjT0mSiM2Mie3rbF/dYdzt8OD5Y5rGCEmZ0R2mCLHJUnYTXavz6NFjQtdn7qb4WpdSpsGV7QLOBKZ2ApHMfOHiRz6B1MHQsiShR29qURKL5NQmk+kcqGJPYOFGrJVWcBcRu7s2ucoExdTJ6AZjK6I9HNBs1cmYGVCOyeVa7B48hziiVI45PPK4s9Hi7rsrfPW1lBev9vCnCW+/UUEWTfbbz/nd31tnuvDo9eeocURrskHoO4TiFEeRWWms8+jBc+I4QREFZiMLuZrFsj36gwWppROZEYYOQ99FlnPYi4i5PEaTAmRRoFBpISQClUqBhB6yXiGRdVJ8VtUaZlajVMmTrRpYroVnJ7RHM84HA9RSjlQ0WAyGBLqIHwngzfB8G1INx0txkpiR06dcFAi9iKO9l1RXSizmBsFwxGpxifFogZJLKehFgpnDqNNjqV6hPzpB+/KdaS2bSILJfO4QJy6l/CZq0eX09JRGrUF03ieOE2QZoiTBclNcW2LhjugdtMnny9y8sYws5pn3DJSMhJRohKGLJCmIXxpjfp2pHcdkRCgIIg0pB6ZKnARUmlkWixk50yCcRszHI4q5JUSxSblew/ImTMY2ohCjieusby1xftomsm3u3bnD8+Mn3Hv9Lb7/vc8RxS/H20Ly5WH8pcBMRWIERBySWGYxtzk+aVOtVnm+9wm6XiGbv1gfUcUMvj/AsRIW0xlX3rzGoK/i+w6t+jreXGF76yqWt08YrLKxsWAykPi9v/8HfPHkx8RiGyMrU62WGXZ76JksesZmNJhQry5RLBaxRY/2oMNaq4pjZ1BMi96sjxA1EVDw4xmjqY8TxiSCTn/ksLl1m1FvSq2q8aMf/vDXJfGNd97FKITMLYvh+BhV0Miu5CnqEww1JZPLkMsarC2JRFKWwSImnZ7xq5//gm9/411Wm5uEUZ7RcIp+vUqtcZmPf/YX7LxZ5dKtm+w+n3PtrsF8coQuyrSaa/z08z/n2sYGU1dh7co6S+UatbUmI9fi8Mkjvvr6bf75v/4BRU3E756ze1SgcmWVb3/n7/Os9AUL28fXBVazJcL5hGfPPmP3xObrf+9/T6Euk8k0SCZdzl/5mDTIChovnu6Sy2+R01UkMWDqqdTXthiOZvTPxsjyEZNQRJnN+fgn/57np59j6SaytISedxlMeohPT7h+6X32T/8K2bjB0lKVRlQlTRwOdk/Y3rjEzqVLTGd9DvdPyJkGUeB8uUt58U5JooSsBnR7C/7dHz1BN7MXaC1Rx49cDFUlQSYNRUjnfO97/5p7d27TGZ/w8uUuJ/sht99ooeohWKCqGmHooapZwtgDLi4FmqYRBNEFnUBVf53as7NzjUzzhAcf7bNz/Q3EcMFgOmbYmaGqGnESQRKwVFligkchew3HXuHhryxu33kLezbGsrqUCi30vIgfRySyxWzqUK2UMItQzb5JGP0V/eGQnUvfZTBsMxiecXQ0YD4rcWlHQZYDiOFg/yGvvX0X19J59OwRrXqEJOq8enbA0qbJy1ef4Fgxd27cIhVs7EWCb7WBLKpUJKNkKBZ6+N6C2JX4xfcfU5DrmNWYnXtVQitDokdYyS6GotLt9lnaqpMrlOhMhhz3J9iGhO86RMqUjLHgwYtzVlevomctuu2Yr7z9Ozx+9hPs+YylpTzjsxrra1Ucf0ql2mAynvHgizO+8Z17nLVD8vk8khIzHUeU8+vkM3UonuH615ktRDZWA14ePSMN1siYOv3BAFFoU2+ssLxa5sXLBfduv8nJ0S6hrZHP5yhnlpkMzun2XjAfRdQbFVaKTT794gTBecSs53Ble43e9BTfXiNfrHLSeUYQBATRgsQrEoY+smTihBa9gwNWtq6ytr6ONXfJmDnOz19SK1xja6XIf/gPj3nr3pv0pq8oL11mabnGvYLD/n6PT55laFXfpb4qMTk7IentMt7/jGx+h1LhNg8f/Qxp3uEo+wCp/hYbm0W8cMbh4RdUnW0iTwd5QEQT160RejnSGDaXLzPqP0TLVXCdFC+YEkUKubxOb/SE892Iq9cCxNQgX1I5Peuxs3qLg70E/Ab1NzV+9TOfm3cuM7KesLxRZjqWqDezHB2e/O0LyrVqjd60T5pIOJ6LvfCRRI1yxcALZUbTIYV6EcNQSMUcoTMHySdJQmK/QB6DJA3wvJAktpHkLCdnUwJ/hqEF1PMVEtFnMIrwF0Oq5SKqFBP5Np5nospVcnmVhT0HSUVQPHJFHXeeJUoDkGMWVoIkuITMqJbLCIl6wZcMLsbq9UaTNFEY+QtSPYM791kuNml3u5SyTcLwEEGY0et4aHpCpWRQzVewFh794TFGLo8fJNQay4S+TSZbxJovUNSI3viIJEpxfY9iVmUxnVKrZukPzoljD0NXOGnbTBY23jTAlwLSTAZDUDHMEkfnAxLTRC92yUo5hmcWs3CXd76yxGknQhLqnNlntJ/cR0FGMTIghEh2ES+CYkblrDNA13WatSyREJOLMvhJQBwE5ESDzKpBqAqkErixzdTuslnfIVe6wsHuIY1ahdaqhmPlyIh5wkWAKghIqkatpmENArwkIVtNMc4K3L2xxnvfKBNpDvfeqvHs2ZTEHPPGN6tE8zpZo8zJ+AVS1uVP/sNfMesllCtFao0Gl64sc3r4jMlpQLFe5vj4HE3SSUWRIPZJU5XuYIxRquOpGoE2QTNl3J6EZAlkyimCrjCe+SSKjOo53Li6yen5mChKsGYCoRiilyIW05TEnRMkCsFsztnQZjia4cQyoqSRMw2IfZSSiuP6qPMYSZSxDY282kARJRZBQC0rE9syczcmnxPRHZPZKETKh6heHtsPSGSZRRjgWwswJEJNItEVZMnANDIEgYe18JGkgDiJyRhlwsRlcDpBUQVG4y4tUUZVVSAhDEOSVOJsekoxU6ZSrqMoEYZpcnreRpWvoqQJXpQgSwqyLON7DiIpJAlREJOkEVEqo+YUXDkmFGNKeRNRSlHUKYk4Q49T8sYaqhjhOAHtzozJVGTtag0JgUHHwbZ3GbT73Lhyg/29AwSxyC9+9X3yxRaSnCIIKaLIf+T3pSKieCEIUtlBEzNYgY9jySRVl4U1pz/2WV2+QquWZa11hdl8iK5pZMwy3bbDYDBBVco0S5tE0ZzxdIIgmMSRws76ezyYfsr52ZRbO9/gp7/4M1Q9YjB8RbG8jOc7tOrXmA4eMx4O0Vpv07NfkioK7fHHpKJGZzjHUI/ZWi3w2Qc/plG/Tfv4lEgOWWmtIeFyenDAa6+9xmIy5b/8x//1r2vi/skJxXwWN9rj8qWbBG6f0eiAVNxASg0UIce//+M/4ltvfQu1MGTWmVKplpGNI57+6kPWaxXUXIFBOcNguoeSKXJn+00e7x5Qry3xD37nm5xOOqxf2aZ9+hnL1Ra5/GVOjg8Q3ZBEzRN4I8rFG2ilPAUZAsnkv/0//B852XvAxkqBsecjRQXC/oSMlKFSSWiWV9g7fgCei6pusLrh0zl8gjNdo1gpEHFO7Gg0CllMMeHO9St0J32ur3+FJ7NjlFqLtL1LMLR57+u/xXj6gpK8htmqcnr2nJEc85033uCsY5HPv8tnBz9g7+gRlVTHc1LsWUIQjHn/3W/Q67SZlaasNq4R+yYj1+D9d5b5n//lSwSxQCoKSLJA6CtEiYOu5fECl+FAxtCLkMokXEwj0kRAVSWm0ylvv3GZ3/mt19h7PuONO7/Ls92fMLeHaOoaX/vaW/zP//pHaKpBKkAUX7i5k0gkDMNfs1RF8eJ/XTcBkaPDY67nXZqrRSpKgTCREBs+4tDHDT1EUcAbRxiBTm/uMrWK3Lp9g48++jG7TxZk1QJSLebdN7/NSe8zHr0YsXapwovhMe+99w6ffvwZbkGlkK8zGcU8fvYLTDNLpdzk8lWRydhhMY4ZTw9oVW6hinmePGyTSCP8aMpgWKBgpmh6iG15rK9tMOgfEURzHjx6zsryNQqaiTSW8H0R19LRdZ3l5WVy2hQ1TPnv//v/EcXYZm2/xte/dYf+SUSt2iJTdQlo0p/OWWq2aOByeLxPoWhwenSKUZUIJZGvfvtd7FlAtzuGJM/esz6xK/Dq+AAhuEacDthceo3HT0Z8cf8IRVFY2pA57bRZeCOiucrW9iYza8BkMKE7eIWIz2wKVtjHsmL0zILpLOXy9k2SVERVVSb2ES/3RDJqlmLBxK+WqNUkXj7cY31JIa9tkXqHfOO33sEadTk+OuPu2zfIZDQymQa3v7nFn/7xHsVins8f/AJDz+MGPUwtz7e/89v84ucfkTOzKFpK49p32J0+IDoVUPE57abce6tGFE64vzvgzbdvoiuwkd2k02tz2pU4mUEpLXD96ianL0TMXI1MaZWZJxKVZ/zoyTG1Vkit9TY7bzQZDXscLQa8fHXGSmMDx36Is9gjlRNK+TX2907JZX00DQoFeHz/jLv3lvjV/U/IZjMUqjKuVWK5tcSjB7tcurxJ+2yEpizTdx6jyiWevjyjWMhTbhU46/VY3W4Q4lOqbLB/9CtyxhbZfEJGTf8TivD/T0E56nUJwoRYcRA0g1RWsJwZykLBUBISw8C2Z8ymEprmYcbFixxhUSeKAxIrZm6NL+byfkychCiGiqFKLKYLwqlFpZHBtRNEOUHJGGh+RKc7p1gL8RIBKY7woxBVSzEUEXeuk82bmNkKljtENoaoikFOrhEsPCLxBUGgkiQZBE2k3ihxdtolm1EplUza7S5JJSSfb7Kwhyw3V+n1p1zaqVMp5HEnEVHcZ319AzccUV/ZwHQs5pMJqiQR+0MKZkK9uMSjvT7r1RZhusAT5qAJdOwe2byCpuiUKlkQxmS8ZSaCR6WaYeK6jDpj6jmPpfUMqaLx6lUX06xijWxsy+WsExH4JqP5kHg6Z7W2hILJOJqQCiLuQmI47RKFLtliESkJmcwWCIZKLIhIrkyxrkEk0Zs4yMzZamQYjrLIxU3Wloo8f3nG22/dJRAlPvnsp2zr64QlkUVvglDzWNnc5OzRgHKrhCAGHB2eoZopWr7Iv/qjU7xkyGhmMHPbvPsbBSz7hIU8pWPFtOM2hWaWK/ItZtUZqqETxSmvdj/FsRVylRILe07ByJB4Ae2TBdlsE0EaEU0UwjRmEB1T0XN0RqOie7IAAQAASURBVANGsymCqpLEgOARBx55rUzV1Dk4fEaUiEyHIqqZxQqmDM89Lq/c4EFvHxUZUyygSSBnG6TjAdlMFstZEKZQKuYuVio0A8uyCN0FUrlAKiqUxIjYMbA8m7kXgBqSLZnMhileELC6UuXo0CZfF5iNfRRZx8ykREFKu3NCkkqMunMK5ZiYAGIRx5YI5R5GJoMpy6haBS/yMUSdOAmQJQlJklFIMEUd33Gpl1vY1oSMqVPNb3E4SJENGSmRSdIIUgUxkYnD6GKhJRUvXNaySKfTIZPVyBRAzxokwpjr69dot88o6RrVRoGDw2OypU3mkzG6otMZTNEME1URGQ4d4kTjlx9+jmFIbO+s4VpVbt28RTZzdJFuoskkSQiJBESIBEiCTBKWQLXxQ4vQ3iRn5hj3YrbX11BkA1WXOTh5zPrmCr7vU8zlkAKJTKrhOyOG3Q5aTkNKQUq3MDMuLz/7GDFIePLhL5hdv8LGyhLuIqK25uI4Mrt7HVJHo5U32Vj9FsOJx3zu8e1v3WLuOOxyTppEmKpKrnaVKy2PkTVjkUwpSCqFmsDpbE6hUeXVwSNuXH2Xdu/o1zVRySUs1Zu8fPkrevEpN6+/hWOfMHen+IsFr/Y+xU9tnuw9YKNW5ubVq0w7Q2Jxg82VJU5GLxj0T8gVG+iLEDU34Y3/7Ktsnm5xsv+cSJxwdnJIUdeomAUefv5XlOvr9NwzWuoyyyYcHPTJyr+kcf0q3c4hqpvhcPhjiuUNjIJBUZMYHDzBNlJI88h+yv2Xn/Dwx58wtwPuvXuT9XIFc2kFRQuIR33KBUiEEWe9CXLuKkgS4bjHB+cPGCwOeHi0ixlvcf3rV3j0wz9l8/Yax/0BBc1hdXsbfZLl4FGXQTon0Ze5snSFrhaztNRkqbLEpw8PSaYu9x8cEziH3GreYdpVWNiPqa6sc7b/iDRMCXIiiuuRIpMIEYKk48c+giqSiAmCBIIMaRx+uTspI8gQ+yHvvHGXrLlJsX5IGE1Zqd2kXuhy//F98nkJMdWIpRApFiAJSUWDRPJIkgTD0HDsi515AenLNZKUbM6gVI3YP+jilFMWvk9W2iDwPyOMAlRVQtRgbo8h1aguCTjzDqWSwmmvx9WV6yxmE37wk3/LlaurNIoJm6UbHGaf8cMf/YJcXkYyppyen6IqTaKox8nJC1Qpi25ECKlDqSLhBnUSOUDXEqzRAEWQMdUaiTgjTvIXCVST50hiDZIMJ2dnlGsFnr94crEbl81QLGfwvQDH1emejGlWK7TPOvzdf/IHBGmXYrHI/u4BD+8/4w//8A/Ze/UcSfPxPA9da1Gq5Jl15ww60GgUePWyTb6UZ3/3jMQVuH3vLs5CZzbco1zWWWle5uHHn5Er16hXIy5trdI+PqO+VGTvZMLB4Qtyao5LV1o49pTBiUsq+kiqz2Sy4LhzhGVPuXz5FoknMZzMEQSDjc0mz58/QEZlf97GD0FwbbRchoWfUi4u4/kz+rMOa8sNipLBxM5Sq5cgURj0fApZne//yc+YdOe899WvsXVpAynVGPUOCNyE/cMHrG+uMZ/3SdFR6i5hW2Xt8nUGg2Nwh4iKymKcJSMt2D+f8P43V/jkw/tMx2f0nrzkzhtfZ2AOMSyV7csypjrgw4+7bN29hMrr3LlWp59z6U9Vhs9jVDMllzdxZicEYYtqcYdP7n+PWq3KzWvvIy4PCeIp+XyJ+198gm6s0baGiGqJ1eVv8e57t/jen/wxzsznrbd3mIwTtq9VGfbbLOYZEk2gkC1Qb2j0OueEcQPdmHL+PGBn5zJXNi+jsEZ38oxizf0bC8q/8Q6lFXlIiUw21NAXATU5z2q5gZD4xEGKbQsgSqiGRxhZaFmP4dAhCCJSYYYfj6g3a4SRgO04aJqKqekIKeRzKpVmkSRN0XUdVVWZzWYE4ZylVhFNUdHkHImfUi83UeQ8aaIjCzCddjnYfUSv00WWAwTRYv/lGZ7jI2OQzYrIypRYDjjpnDKdjZFF6J53ieOYIJmQX3LI5cu4/hhBDMjmFKaTOUlsk4RwfPiKG1vXCSZz7O4AJRbwHYHpNGJ59Rr9kUXBEEnUMmkmIYgU4kigruSpxSWSQGM676EqBoXlBbIx4+D0GNs94crNDWpLVVqrG/iJznQmE8sLBDWlWGxyciRwcDBmOhmSJgKH3RGnkymLOchihBrGXMpd4ubNu/jBAkmOsCwX17nY1wtCB8+Nmcy7OPaMxDVolJcpZ/OUCzpJ6lEsF2iWl9i9/4SsvIxQSAkDGzGnIFpF9g+PsLUhs6TP5ds38GODtUtX2T8YE4QhoV3i6eMealTn8OMqx/ereKMFFd1lpVSlnFvn4cEBalHDUBVmg3P8qYU98Zl5A84nbSbjM5aaVXYub1MqZ5FEg2LJQJQs4i/3mIajPhtbaxQKBWaWjRfEpLJApmRwOuwTCjpumNAfdxEJSUOXZiXDfLpPIVMlo5fQNQldN7HsEcVCldncAnRyYpXpYMrCnhMpHoWWipYxmY59XGuEH4p40RxR9UmIyedbNFeaoHkUKibn7T65vIEQprx56zpZQSH1NVJCbH8BWAS+xXziYShFMnIFUzMIPZcotJkENug6iqljOxYiIkmUIosid68tUW000UyNuTPC8efMnT6dfgfD0EmRSPjrcTPESQhcmGEEMUVIUpI4ZHt7DU1TGXZ99nYPGfS/3OeaK4zHDoeHHSYjj9CXEUWRat0g5eIbBojjBEmVqC8XcCOH6cylXC1h+ecX3cn/j86OKIoXu2fSl6NvyUJICuSyZY5OjrCsKZouIQlZomSKa8VMxkMGnSmxF6KrHtWKwc7ODTY2dxguOhzuvsDIZWlkdWpCAT2psFLbwWgWyRdmhJ4Picvxs4iT5yPeuH6PG1duk6YCsetTLkIxs8wHPz9kMoaDwx4rl5bpTc45fXlCJldnffMy33jrGywmQ07a+5hynv75c66tbTM6eg7S9Ne/rv+Kl4dfsHX1Lmoxj++NaayuIntZFCHhtVvfZK1URjFFepLIwaiLV85SWtGItS6baxu8e/UKSuAg6wJx6PDnf/rHDCyf7nzOpz/+c+zRCV/sfcaDF88QjDUGnz0gdnTEKMGQJTQj4OXxKb96coLkFbHSkJs7l9AyNjkRBD+iNz9GkeqMUgGl0WD6/JhULLD+2vsk4RKDsYEXz4nmAw5OTul7RfS0jN5s8ccff8g//5cfMlcSvvlPVvnW3y3xT//O63z927fYWVrhzvtXUbN1kqnGg5cPefDsGT/4s18xmM+5e/c6gv+YyxtNbl/7Oleqr+GPQr71zXuUm3XwfJaLVXIZlYX7gIP9Pf74//n/wmzWyFUq4EYXCKA0RlUkSOJfxyL+OpEpuWBJ/jr2MzEw9CLXbizx8tUzrl29xMnxKYLkMBwsWF1a5723v4YfuMiSevHtCDEIEYKQkiQX77osiwhiehGPK6QkaYxuajx/FlOuLaGaFpYzYjJqY5oSiiwShRGyJjJLPZI4z7xncXzawVAzXF5eYutak43lu9QaIqeHBzRKa3zy4U8JXIFivoQsCcwXIUJawDBl8vks167dwPM8RsOASq0MiCSRiaYWGQ9d7t19B1GKUFWVOze/ThRN2N87olq4gzWfUK2a5Mw8GUO/aJi0fVaWG1jzkO5wH8edUiw36A5HREmWeq1J/1xhffUGlVqW1966i56JMDKwvvQes7HE9k6VySgmn21QrhqcnfYxMwq6qrJz6SaXbqzQ6R2RLToUajVcp8RSq8y3f/s3KZbLGDmH8dDj+rW7kIYIYY5maQnSAftHn/Pk+Rd4yRzdNLn/xXOOT+Y0WyuIss7nD34JqUihLPD05c/44vPnkMrk8i1WltcRhQnd9gv654dEjsdydRs/inGSHtW1Gidnp0ydEx4/OkaS89TrVSpNmUxO48rVq9jTCCVxOdrfQy+0qG83sN0BgjAiiiIMtYFvF7hx7TpGxmE6GhM6EtNxwHD+CQPnhFxR4fHnZ+RVmYJhsLnZwrP3cNo+prbCcn2F43OHIFXYf/GKRFyQqD6LbsTd2zfZuJZnNAtoD0esrd5ExKNez/Pbv/3bnPZm/PKLH3HamZGEZdy5wXe+/g+5fmON67du87Wvvc9XvnGZDz/9M5Y3Ssh6zHw+RRQUGrUSi8UEVVziq+9+B0me4i5Erl+7giBNGI+nRMKC4bTH04d9arU6zVodVTT/xoLyb9yhLBZKaFmD8SxAVVUW8z6aoBMmAm7ikHKBC9G0ApEf0u945AoKsuwiiiZyPsWPHSr1MmIqIisJvu8jRVAomdheTJoYpNEMVfMRhRKeNyGX17FHCpNRj2JZI01dPG9GmvrEkYAmGuSKBmOnT9bIMB65bGxUyRoXh+J84aDqEY7jsFhIXNq8yng0YHmpies7xIQcnLyiUV8lm1UZ9EPO2ruYisHS6gaTnsF4vs/xwTGLWcTa2ga9wRlp6IEocHx8TKmYIzOP8bU+DlNMQ+D6xptoqsu457G1kuNseoaQhARYKLJDqaxhOQHPnh1x/UqFyWSFq9duMZ6HWM45qiZTzhZod89RFFCDPKaYIy65pFKMFqjoQh69IKLJCc+efE4UCSSJSL6g4UsClWKNs1kH23eollqUigaz2Tll8x76+vwCp6BrLL0ms/vyFe+8f4XDvRNGgkdmliUVJyiiiBikSHKVolKie35OoWJwetJFU8akUUq9ViFXqTMZjpnMEmJVYXKQsNFa5ubVm5x3zijoARmjyu7BC7auvsvh0Qtyhk2tvEaOlNHwc8yMTyi6eCOLcrmGlEgkxPhiipAaCKnD06dP2bi0gShLLKwLgPPBQY/L9WXEUCN0fAzTJBESSBWKpTrds1OWai3CNCFMHdJQJgxjKistZtYe2UKWJHaRIx0hEel15mTyGs1GhfFoTuiplComI2fEYhSSyzfQlCIHh69IiClVVGI7RlEVBDmludbg2as9RMB3Lna7SrksGTVFSApoksOwPydKxmRzDRI/JnZtnIlLQoo1d4kTBUkUSdKEznCIrcdUalUEEhRRwXZc8vlVHOviQJRTkTQBUUoIQw+4iIwT0pQ4jSjm8/iOR7fbplJt4Xku54cSo86EbAGGvQgvsjEzEsPBYxQpQFPKCMiIsodrqyiKRilfJAg9Xn/7NWxnzNOXn/KVr3yFNE0vDuH0Iu5RVRTiOCGKIiRRQ8IkFTwQHRwrg0wLQThnMpqg6gmedUy5UCZ1A5RsQEHP4TsW02CEE1ncvHKXWj3L7ssnHI0HZLQ8jXWT5nKT8iwDXkqpmTCez1A0j5WtLLNxiKa2MfQmn++9ZGunilw02Fm7zuNnH3Lj6g6dV31G53Mymz0ePhuRMcr4tsPq8jUOD85p0aXfbfP9v/oJt+9eYdKf/7omlpQSaZxytLuPpkvYjk4rI7KYvyBfaeAJDolRotW4hBv1+GT/Q97a/hpDa4HpuGwslUANsGwXRIvOvM+bl9/CVHyO8TEKmzRllak3QQolWo0amXtfByWl0+nw+aPH6J5IXFmndHREst2kvtGkLOV5+pf3MdI81bUcyxvbPH/yknIZXozaGJUyd1fqGK0VrPEBo+4Jq8rbqJbI8o6L3TlkEBQxFY1maPOb//UaO2sxvaN/Q5GA9Y1NhsPPOHw15c3vlnh4f8o//PZtZt41lJUGtdJVLl1aJVo4yF6WYn4FIx1y+GzIlUubfPzTzymVTZorGc7P8ohrGwgHoOkTSksSb92+hxh/j0AScAMfRZZ/PZYWUpH0YjzxpfksQZDFizuLJOHaY27cWGM2cbh35xrPnj/nq2/9U3724b/Atnzev/pNjp500XSIohRRUElEiyT1kUSROE4IowDhSyetJKVfIoYE+oMuXnTKjZ2vsdRoMB0sGPaG6EoJUegRpyKiIDOcHyIFoCRFImTO9l+wvXoZx4rRNRNBMlhEId7C54tPHvLm+xu0Gi3G8z6ffNYmV5pSKTS5enWDT+//lNDN8dob9xDEiN0XPXIFCd93CKI55+fnvPP219nbf84nn/6Cy+trZM0m6+vrCJLNcNrHMAWeP33C+spXeP8rWxwcfUYQu4RRRCZfpT18hYRBvpJnOvb4zm+9yatXz7l14yuMBn3avXNmU4fNdZk337rLfNEjjkMULWBtbZMUD103GY0c9vaOSLWI0FtQLpZoD9sM+z1KBQkvdtnaWeajj55x/dYKh8ePOD89olAuUctfZaSPeXL8nFJhlSS1eXF0n6Xmm1QbK7iuQ6N+l+s7IrOpg+vayGpIEJh4voUim9RrOVQ0REVEUiQOdp+hFUOKzSJCWmH36IzLSyt0x6fcunsXN5hh5nPs7Z0ynfcxMiArOrqRpVYp4NodHj99SF7fZjw6xygmzHsDlppXkIQC5XyLrS2fiCGfff4J5dI6aBLFkstsMMXUJYbTkNJSkUhZoKpZ1GzCQXtCVDK5sblKcBqwvlrluKOzkunx/OED1reu8Q/+we/x4x99H3/uISYCg84Ey4Xvvv97eG6fRx/32bq2ytQ/4NHjU7LZFcRUwHMdhPiEJPVxfYmd6zWOjqZEvsF8JPGf/eZ/y1H75xwe7OLYNoo4x5rFLOZnZIwmUZxyev6KSj3LydkD5lOfer3+ty8og+kcL54zsjXUOCYJHTKyAW6EICZksiZpqhE4oGkKplpFUUEkIYo9Aj/C0HUSYYqsaVhTCVlWUJSIKExxQp80nGEqBtZMRtMdlpZW8f0UQfTYvJxDlQu0220kxadcyRP6IrqmIYgRRqwzGfmoYoFCTseaO0xHCxQ1Q15TqeV1MnoOa7EgiWJEBMQENKHAWq3JcNJm2ksJ3JjmqkbnNOAgGBIGNp4vY2R0QmHM4f4zUjGlUq4QpzHLS1UkQWBEgUxmwPREQ9Q1+r0zuiOJ+pLJtr6Mni5AmmPNfZqVVSaWAL7F+rUaKiqkGj//ya+QdRlDKpBvmnROJlzebnLtxjqdY4lPPvqIajWLJjRI7SGB5SLnYjKlHK+99U0++ewxcWixs7PD2FpgWyF379wiFV2yhsrCCqiUVxktFhTKEa1sgaPDDs8PnrJz9XXK9RrVQp2fPvuE5UKOYn4byxlQLph0bAeJlEo9gxdGhMIALaNz89prHB58jjW3Uc2ErCqxsALMOM+0bTGvD5HJksvrzN0FmVyOmTWgUV9m7+UjcvqMeuUq/fMOg64EqonrD9GkiDC0MXNZPM9iNJ+xtNqkN0w5ONyjXKnhexESMouFTV/qYRgmppFFFGU6nRFrW5cZ9ifMFyHjwQmtlWVs24FUJZurMFvMUEwdQROQBY0kAEMVkWSfOA7onk5IRQEkD2t+0V0zdUhEDzfYR816mLkSKRG6mWJbIYW6zsv9PUqNAqPhOc1mkzh1CN0FGb1M4MXMRymGqhKGBUb9DqppIoQKc3+GoisYap4kSbngSorMPJV5MGA8XFAu5MmYZQxFw1qoiKJMGPjIikgUCiDEBKHzZexiiMQFNN1zZ7i2iqEUaR8NEdWYSqXEeDSmnL9CoThFtC3iIMSQq0iShCJrX5oeYBHMCO0pc88hjRNiMSQMh5g5A9t2yWczTMcuSMKXGcMxSZKgqjKBHwMiohSiyCqu6/L8SQ9RANs5o1pVMA2dcOHgmjMS16d9MEBSRAQjIcDBoM3JmcCwOyDTTNHLPiezId35hEyaYTaUmS3GDBfPabVWiZwMBaPFk89fsLa+xftXX6fdP8VZDNkfW9SrGYpFk3Rao3EJZnHI5pU6S+UbPHvxmKk9I1qkzHo+2XyeS5stHj3dY/vStV/XxDS1OT1rs7pcx5qO8GcOT60jIlxkM0c0l7i+tEPbnVKtbHE7r+InHn3LYadU4PB4TLGq02jUmPe6NLK3OZ1bfP3ma/xWbY3TvVfErs/cs7h97QrmrMerucPZYsjlG5fI2g2qqzkaV1o09RznJ33EaUS4IvLWb/wukX3O6e4Bd975KqlcYLH3iDgSqV7aRg+6YD+kXqiyeOkjOR4Td8jlrWv84sVPyUgOGxspr12fMer8kv2fpqy9cZXSxip+PMZsGtzdyVFoiNxQl4gyHZLpkN65R9EwyQoFAnVGIJyjiTqLcYBeE/EzKZnCCkEyoH805cala+w+fUXoaRRWayRji0mnjaHIyDHIX6baCIJEGMcokvhrcsBfO7FlQYT04q+3ULhyM8HMj3CDFN9xkRSPq9c2+eKT5xwe7WLmZLY2qxyd+5iqAZjIooKQSqSiBamAosj4/gVGLooiQKTX63HrDZHtS2vMhiPEqEhGV/BHE5I0QVZUVMXg3Xu3+ZdPf8qVrduUSutcfe8SL1894Qc//XOurN2itaTguwvchcA3vnmHmd3n2vUd/uRPH7GxusO3v3MV12/zs5/8mHs3v8V8YTNb9CkXV2m2TKbWAUenF2lbj58+RNU0ztt9ihWTTsflG99cYjQ/JRF9blz7Gnt7eyAeUaoJWHbK7at/QG/6BftHx+RLWRbzBSQVojjm8Owj9PwN5osxu/v32Vy9SbFYpVgccNZ9TpwEpMOQZn2ZxWLMz36+y6VLW/i+D4nEpctN7j94zOXtTR58ccjmlSb2fMzTZ32u3CozmL5C0WIMPQd6zFK6zmQ6xA16RJFKwWiiSln64x5Xr3yT23fv8PEHn9NcKiBiEHoLjk4fMJ3Y3Lh+B1FLiUOTcqlFu9chISbxmqxebhGEj2luNDg6OyenypiqyMef/4AgsfHjmEubN5k7x4hiAUOr4tlzbHFCf+oha3nKYoa16hb15R3mbhfLPkA1MhSKKi+fHnJlu0GxWMSxc6yuWHh+yMKa4O47rK0sMZ3GtDYayHJEFOSRFQMvVBGiId5in/bLA3a21vjRB6fo2QgphFK1wpNnnxKFCtmsyun5nJW1dWy3zWH3kFeP8/z+P/w2tdyQSn2d6aHKtVsJ5+dPGYzOee3em8ymC/LZAsP+AmumUi41GPUXaLLBo0cfoRsXqWvXdrY4P29jL7Jsb3yF8XyXmR0w6E/ZfuM19o8+pFHbYLGw//YFpV5RGI1nSE6MHIVEHvRnA8yMii5niRKFOPbIGDKKkiFJbSb9CFmWWVhTWrUlhHQGSYq9iAjj+EtGncLccnCDAF1KCQOPSrHFcDyiG1jk8hqiEOP6EYpcI1+ooeo+uikgSSHFXJbRuI9uxpiCjiDajCcWRbNBmtdYWjcQlJBkqtHtniKoIog6SSwRuBFZVcdaBOTNOoPpiGvXskzmCltbJmZmxmzYRPLGHOzvo0gRjWqBYrHJcOLhxjbtUQ97EqCWdJRhwEq+SKTm8JmQz8Oon/CDwa8o6hq5akwiNjjrOLhpRCFbZTR0yGoSHfsMpJAogjSqQqwiyTb2zOXZFz0CcixtVVnaVBidTqmaClLS4Gx8TujPON4/4+rOJsQOnhsy7k8xDZmj/Se0Vtd4cXSGpDpsbd7lYPhLtGkTKZGR1ZDN9WX8cMbgUEAp6txsXUOKeoimxOBkiqIFWNNzupaEJGXIZy3U1CJMEwbuEwx1hlhTaC6tcPoyIvJC6lWd5dUW48UI306Yy2eE7ZRLK7exFxYz22Z1eYViNaLT+RTZCPGjEIUK1uyMGR10USaTqxL4c/IljcF4hO+q2DMNz7K5c+sGD794QLlcJVAvcAh+4nN00mWptY4YCyyGQ/KGiYtMv9/HjRwKeZ00lbC8KWEAk+kcUYpIBAfL0QhckUpZxygYDEYTZCXk7KBNPiejGzJhPMXM5lEzRYYDi2lP4cbtVZ4+6jA6n1DezjCfTlgqFy+cnEmKKuRJ4pBMRsWxfJIQkjChUqwxmgxpri6RSgELa0ocChedk/hibO1ZLmRUdFGgkM1wcnxIrlhFooUkyqSiQZQEgEiapgSBhyjKxFGEJCn4UUiuZiKkKjIGa6s5To7PUQkoZmQOX7xCzAqYGR3fD8mXdVxvRiqEBIEAqEwcC002SAIPx57h+RY5wyQNXR5bDzCMFq4eEafpRVdSUi5EpKIQBD6SlCIIKqQiphnx6uUhkiCQy8rIks+8r3AaHlLIVam8exNr0WM0n6KaFey4i04NQzG4/cYbLMZHDI8HSHoeBIOZluII+1y+dY3kSRs1CkkXMZmsyXqrhWkkVBs6Zz2P5lID34s4Pjtn6EhkjTJTO6JQaCJpIp8/+YDjg2Nu3LvDXBzS64+IVYnPHx6RkWWs8dGva2LOqFOIShy+fMzJ+Jzf+uZ/xdnU5rwbsr3sM57P0JbX2KhtMe4fsLJ8lZ989BM0QWAUl6jXHeJwQeLXyS1t0Grd4S9/+H/jT2yP3/07/4y80sS1TtmZV/HjhMJylXp9ijQXqKsKN37/79GZDJkfPGW0bFJau0wcegz3j9nYucz5WOKtN7/CcJZQyAdsfOc36Z31cMdd2lMfFJGrNZOrb1yl032JF/jYn4tkyis0VQiVkN7sKtPJgPW7q7w8nbP44QtWlst8dj5h76RLyZ1xvJhz7JZ45/U3aRkem9tZqkseclAgEW9jlnQGSZ/Ql3i1f0Ls+Ny7+1VODz7jxYMBSiakVZRpXbnK+UGb8+EpsgSy7ZPqGr4AmiihSRJxnHyJ97kQlH896k7SiDRNyeZC7lx/GzGOGXZtFC1i9+gnvHq1z5Urt7DdgDAcsLqyxN7BPpIZf9mVtJHFC/QWXHQ7k+Siw66qKknsYeg5yqUSH37yl6RMkdMsiiSzs3MFgVckUUIozrGjz3j7GymVwphXT09BXCKOZFTq3L7zBifnH2Jka9SWU3o9Cd0ocf/hR0RYmKLEs4dddNXgdH+ByiGlWoGZ3WE8tHnz9bewXpZZahXxHIGlpZhev03iLaFoM7SCx8n5Pr22z+qlGj//1c/5xje+Qaa64IvPnrFz9Tof3z/Dtm1yZYPZbIFtj4hshc2NDTQ94eS4SxAEjCZnZIzCBRovq1EurhImA6aTBaKkQ+qxslbGc2SWV+o87LxC03XWlspoWsDW1RKj3jGBn7B9fY3R4AAzZ1KtRfzi55/wd373XVy/x9Xmazx58oQbN97mqqLwx3/6P3J1512Wlrb5kz/+U7Z3mpwfDVlZbTKfXuD96rVlMkaDQlmg310wG6eEYR5VjlEqFo8PP6CsL/GjH3zBt77zBs8+/RWF5iqZfJaSdAkt67J79EuSRMeehjRaOrXGCt7E4fSox+076wiEiOMIRJtyrsyiN2Tz2jrj2RHD2Qm//NhGpQRCAKlCs74CaYgqgmIaTNwOt9/IIM5cZgObSIrIyh6JlvLW7e+S0VPawz6ZaszR8Yip7fP+m99l4Xg8ef4Jy5sNipsjToaPUeM8lazJJAr5+MVDLt9Y53/603/Ha7ffpKJomEoJMRTpn0U0G6vIJPjzLN3OCN+LMbMxi2mP1ZVNDvf7tKp1Ik9nfaVMp9Ph7KxNFBcxNIVbt1tYM5ml2h0Ojh5TKP7NO5TCX8e1/f96tr6WSZvNZQJHZjGeE7oOqQCFahFBUfHnLqqeYpgikqgzG1m4rku1WmY8ctB0CVUP8V0IfZNY8NAN8J2QZmOFiTXFGnsYuk8+X0BRSrT7HbK5gHJ+hZE1Io1EGs0KYgqWM0KRU8r5FQb9Kak8x3NEVC2hWMhgTUCRDSJhQRCmNI0sc29GbbnJZBbhBw5BOERONGQpix3NyGtlJMXG8hRkwyafVzk9mqFqDTaWCgz7LuPhgJWtOsPpCDWjcdLuUMg1kFQBez7DSPKkos/CdckoMkksMQw9ioZApiCjay2kSMEKO0jyxQqBIlkoShE/mKPIJggxqqQzn9qkiUQ+L0AYUqpWCFKJKPS4d/USYqJxcjJHzznIscb65U3Oz/d5/vgVRqGA50yol5qIqoJPj1w2gzPLYIkDTMUkJ2ep1FaYDxbEnKPKFarNdYanxyRShKjnGTszYndGIqVkJIdsYQnLc4nHKZeuGAymc+xJiFmTGdgDotDk1pXbPHv0kEbjMqOBhW/NqC4LDAcejcYW08WMfCZHLifSPjoh9HwCQSR2oVbbYu/sKbmChCZmUaQSc7dPnLiYRokgVHDsgIyhUchlabf7+E5ILAlE7oLllQZz20GRDeIoYjQaUS6XmYxnaDmZKBaQ5IicWUORYvxQwI0c8qaBHyToWg7kENcZ4s8F/IVGrVwhlucUiyUyGUgijycvztFLCoI8o1m8TEGTOWv30TSN5aUKziLk+OAILSNQqlTwfZ35YoBAEUm2MNQsqTBjPJKQZB254jDszclli+zMEv5PB+qXu4gS/+dbIh/afTbWliEK6fZ6VOuXEdgkiiGJQ5zgwp1K4nN+so8sykRBgCrJTKcLbt5q0VjK0TvvoWamNKvX8cMxqqagaRpzt4OAiu9pjGdDYgQyZpHFzEFRPRLZxLYdCrkihqySJhFpGJFGEaahMRtn6PcmiLL8JbIIUkLyWYP2aRdBEr+EnwOxim5INJcThFghl1MvdpcklcCPkRSLVqPIymaZ+VxheavMxsoqughyGiP4MvN5hJYTkA0NJ7bpHn/I1776nxOkHifnR7heSq3awHcDshmZxy/2KRRV4lCgkCtSrRbp9DqEnoUWZOi7Cywp5MblSxw9e8Hx+YSl1RZnR8+xpwKtyhL3Xn+Lk/YXv66Jqq6jannms0OWSlUMqUiYxuilBqOTzwmrZYzKJSrzOaFoMyQmbjvEYsq7r11n7+iEJIlZ2l7CmaqcDI9ZXX6do72HXKluUlxfYTo5Zz1fQ282mAcDnONDjjp7SL7BzZs3GY5tdFFlMu2iGXkKWYlcsUWIxHzWQ9eb+PEEMbUx1QxyweDhR5+ytbPFYbuHIpTZubqFEDmMjuZcvraEGxzz/GCIrJepGBksUtqHZ4SRw73r17HmMXbSw5FXOH7+Maaaki99hZVmjiQaoZYyhM4Ca3COlFtFqeX403/7L9jZuYeqCDDxUdWNi8lA0eLq1etEkwmjfsC11zY4OnnB//Df/ZRfPD4hn8kSKSJpGP8aHwQxsqwhyxnSFETp4gyLwpSMFvP/+J/+O4aDZ8znA9qdAYXSCtPFCaQBlp/y2tUG/+Zf/Zx/+S8/oN4oE0cSqRBBkhIlKZIkoGkG8/kYWZZRVZ3RwOGf/Te/wd336ohJiiyAkEik6YL1kcC7//wFkphiORO+91/AB36PKA5Y37zBaqXC8FXKzpWvoNQkVNlgPD5B13WeP33AztU3EZQ5k7HHbBJgmlDUbnN0vMezl7/ijTfepFDV8D2VWq1CMb/Mp599QLlcRVTmqKpGwbxKu/+YcXeKkZOxLId6q8R4NKXXnfLaG1c4PxuzsrKEmRN59riNpmm899bvEvICa6ISRwKSatPuv8S2AoqFFoocM5sPcBwAk3JNRkgKyIqIpNi4bkirUefosEMUe4SBwRt3d3CSIY+ePqKeX2NhOUhqSmSHvPf+36XT+Zxe30LWLkgW1cIqulpAUkUG50M2r6zw9MUXJLFBxADHG3B8cMbl7VsEfkKtVuGT+z+mkNni9Td3GI46jPoOmxtXkBWF9uhTothgu7HKWWdIpbRCNlrQnsVUV8sEnoYbdPCDOa6TEMRt/MAho9zg7Xdep3PcRVIUnux+hBDXUcWUmzfepNIo8mL/l4iSShC67O095trmb+G7U4iLVBsqmiryF997wPq1Mju3L3N28BTr0OP5wzaVNYV337tBd9Kh2lwmGyloikcoeYRBE6FSQ0h8HGvBy/0HDEYBl163qCrvk4lCrOOU2jWZR/s9PKFBq9XgylbM3uM9Mtoy1ZUyK40NomjAqxcvqTUrHJ93ODvvsbq0SbVY4MXLU+q1JmdnZ1y/dgcvHHJ0dIAXWiSJyb07t9k/eM5oOGD70jWi2MMwVP7p7/9fhf+tKvzfPn9jU86VlR3SABzHYrGw8byAWqmCnCgkgUOpkEMRdJIwxvO6ZIwq9doqcRyTLVy4lpKkQCooKEZEVi9gqBqFgkLnbEBeN9labfDarbeY9D3cYES9VSaTK6DnfFQTUGxmiw6T0ZTQ1fBs2N3dwzAVWpVlAjtEE/JUKxnyBYFiSWDcXzAdORTKBXLZFoeHIxw7YrGYEcYB88AjFME0K5iZHKXCCoZh0mjUCJ0iGys3+ObXv8J8PEQRdbav7BBGDoWihD0fUi1l0IwY3ARRMzmdHBO4EfVCFUWuEkshWU3CMDL4rsywc87ZaYfQ0kmkCIE5xCqzoYcqGuiSQOr7SKmIEAcQ2KShR0OvsVork1FclpdM9s4OebD78cUIOo7odAfs7p3iOAmCpBGGIbl8idF4iuvbCGoBUciRyynISRNDNtlaucHJsU8qqaw0LpMvV3AmPeqtIqJWI7HBmrh4logXZun1fI7PhyRak9KSxrSv8vCziEkUcPByzOQ4oIDC3v1naI7OrLOLkLaxoyGLUUw2F9M9P0WMItyFy3zosbDnpKKIMzfZ2riJJIAQ6RSzTQizuIuQ6chCl8sEPnTPz2i0imQKOvuHByiqzsJxmfY93FmKPfUII4+5M2ZhORhmjt5kxPJqBVWSsRcWgQXt4/ZFfqtnkzNUxEjBUGTm8y79syHOTMNzY2Q1pNfvMBt4HO31mY0tXNvj6rUGK+s1VjYKLK1myWo5ivksiALt3ghBlGg2Gyw1KmREE81IME2TJA0o5rdxfOsiH7ukICoJsW2TUwzwBGQUFFlGliUEAXrDAY1mncj3mE5sTD1DGusIooIoQspFso4qyQS+jST9x0vihUEH6rUCS60cb717g0KujuNaVBs1wiQkiEVy0jIKMrWyQTavUCjmEKWYOE4hUsAVSR0Rq+dy9KLHou9CIBAHCufHHr7vI0gKoih+uU8pk0YxhqZSKheQRQlFEpHQEcQIz3PY3r7C1atXIdFI0pDJJCBMRfxAJoiLRLHM8VGfn/3Vp9y//zMOdtus5je5//EjJN0jCCwODw8JuhYV8Sb3f/KEvUcDBuM53emUhy9OOT47YO/JgOVmhelkQJIkHOyf8uDJIc+Purw8GyOVVhhNxoSRRXdgYbkJa2trRKlBfXmdN95+g6VLLX755McMuuNf/4aHbcaTPaJgyGLU4XxwQD5Xo9t5wtbOb7Oavcmyn2JkDaJigclkQq1i4iUxf/WzXzCzPc6nXR4/3iWnqGRMkbrZZ22txvPuC05PHzI5OyCb11m4Y7748DNMJSHfvMaZ53P/2SFeXoCixPX33iMvy/SGC3YP+1j2GLWWp7mqUItjFA8G/SmHTzrceet3qItZ3ltq0Mq5TM8PMWSDnbsVJCdhMgypFTM0Wk3IWWSTBe/caFGoNTlfJJw5C0rFOjvrRao3Kty89zWu3NYYLp4glWRyRZNLSzWErI6xmicfCTTSFmYxwfIt2vMx+WbI3ddKlCQdZzCn0x0y8Uc8PTyili8TBTGxIiHIMkQxKaAo8q/frzT9a3F5wVyVJAnHcbh25Rb1psh8PiBOHCaTCfmizHh6jmVHaIbM0dGE27c2MUwZUvnCvBgHiFKMokik6YURR5QEREkgii6iRH0/wLO7jEYvOTx4iuvMsG2Xx08f49ges5lFFGoY0hJ/7x/8HXZubzNfTFh4HfYOXzKb9JjOBpycv6LeWOYnP/05iRwyXXQ4OhgTRym6nrCyvIkVP2FpI8vv/6P/ikLFpF5rkslIVMotxtNTGq0irRWTTz/9lO//xY+ZzPcRZZd8weDGlTcxTZONjQ1ypZBGK48ilahWauhiHmsSs7qWZ2OziiAu+OUv7qPpWc67z1EU9QICLqmEoQ+CjK7XWV5do9SMGI+HrK9vUqtVODk9xLFDBMXF8VyMjEFz+YIusrd7SuoXKRWuXnTMJINSdZ1PP/8l9z97iKHn0JQi21t3mM9cXr78lIUzQtAnLBYT7ty5heePMOUVGpUtWs1NioUqKysXrN9Llzb55m/co9s7Q1YjYlyGwx7zxYjZUKaSX8cbhzgjEV3JM5u6aJk1dK2AGx4xGk4Y9zwCb4Y1jckoK9SqWc5Px0SBgyKkvHPvXTZXl1hdb3LSfcJPP/pLVteWOT7qU28uo+ktgsjDMDVUNeHz+5/iDCr8g396lyTMMu9FNJstwkLEW7/7Bs0rq/QSiciM8eIcxpLJ/eMX/OSjXSYjnWJGw7XPCF2HkplnpdJkvrvJg5/vosRlCuUQ1c/y+uXrnL78OVra4Rd/+QGxUyeTbXByuuAvf/RjfvHLz1nMJQZDl/2DI0rlJRa2BbJCuVrGdgNWN0qMJm3a7SmXruxQrq3QHZ/wcvcDnJnC6voaU+uUhdvl8bMv/pOa8D/1/I1H3ofdDuFcollWWb60zsyGWHAuog5FEdeGOEnxLZdSqYagWoiChucGTEYRlXqOhdWnkC2Ty+WYTjpY8xRdybPcNJAlg8V8jrassbG5zDjaJ4hCgiCikK/iuTFJdBHNmC0ZjPoLKsUCkqiQCjYKJQp5g0w2wV8IZHUNw8zy3e++z2CwYDKaki3U0T2b+XyOrCjYloSXiCRagBI5WNOAWiVDJiuRYBELC/SMBekyhllFyYYUqy5xX8QayWwurRMkNlPHo2/ZaEZERi8RKimjyRBBMkCVyMgCUiIShjFmmkHM6YhCxHzmoiYSUhpSz+eZjV1EXUYRMuQKEtlKnTDwKZVKxL7Iy1cTNBM0JWQymbHSWKF3fsbE6eIvcky9Q7J5BVnNsbAXmIbM6loL1czQdSzCSCYjasjCnNDymQ77LLwzCuUcL48dqqUMni9w++5XiU/2OD96wVuXmsRGwq9+touWV/EiE6t3ji4mqMWQ2mUBMQwo11t0jkekkcb6zhpnhx00WWE2t1lbvkYSzPHdFCXNkvoCxWqB0bRH1lhBkiNGE4fHj59yZesqd6+/z2C8jzUf4fkLCoUsaZpiW1NW1mpAxHA8RlBFxlYPvQDFVhY1UggDn4VrIyCSlUtEkUuzJtM+7aFoOkvNKs4iopLPkAYpjUqd826H0aRPsagjpjoFQwdpyvbdNc7PhsiKQjln0O1ZZLMml7eWOVt8hhXmEBSNIJoy9xVKrSW8s3OkVGE0GF84m7UCC8tGFjMkpFTqKqNhm3yhwHTikcmrxKJPTr3M6eyAVjODOYU0iUiSGIC8WeBk4LLWqlAq5siXZYZDhQSRVNARxRRJCiBN8H0PWRGJgwtRmaYxsizjuRb1eotHTz5HViUqlRpJkmFu9bi0Y7A46aKrGmmkEXo+gqrQ7S3Iq+t41gx7MgUEwjQkiQLmYxj3bPwwIE1jGkvZL53dFwc+QBj6mKaJoug49jmINr4jo+ops6mPNZOZTo7QlSJxNKXazNNYKiErkMvk6Q9f0mt7VJZTrKmOVlX46Rc/YHm1ij8fY/kR2+vX6I18LPuItdo21SqcneVoGHmMnMZseIwkhjz56R6t1RXsqU/kSHx+/3OOh32arWs8f/iXtFopulOmc/wAWfJR5xZ37tzj9HSAnlVIvYRvvP4Ov/eb/+zXNfGXv/gLvvfD/zv1xhpapcrJ0UMWrxKWKsv0Zs8pRRW+ePUFq1/ZwR1PuVxfxRSq1JwpBydzXr+3g5FxeHJwyo+ffA+zeAV3d4wjTVE0EUOr4DY1vvfBj2gWGiiqysPdNvkr29SyCvWchnvykvvdgLXNU1bLyxTyNTQhYW/3Fc1inW52QaFuUEzzFMUSs+kAe3LMk1dPeffOPWrZFrm8yYc/+nMqO7eJ7TFGAkKYkq3GfPL0FL1cYhI7rGYb7PsBStKjKypc8xXu1u7y6vghil/n4e4zFl/8kq9/879AsQcU85fIs8z+2a+oNJpEZwFr9S3iVY/Z8QkPj8/ZuHqD1c030AqPefTZR0gTjZ/sP0PUDdRYIYnBSyJMUYUwRlIVwvAigFsQU9L4gi5wkb0dsrKS5/R4Qil7k0D8ght3trFsj3LxMi+O/oQ7m9+lpayiblgYugixiiyFCIJKSkySRCRfCkpJukjNURWZOI5oNZqs5KroVYUg8vjo/sfkisv4E5sokVEUGUEWaO8t+PH/5a/Y3F7inVvf4IMHf8lbv/WPaZ93iA8cZDUiij1qzRKGKeIsFHJ5gdAX2Nra4snDAZvbq4SRw2i+T6OV5cHnzygVmzx/9QnNZo1w0aPdGfEH/+i/5C+//yOePHnG5tYSpWKGTD7kzs1bZPRl+t0fc+XydWTVu4C26w5xLNIZnJNbfZ0oHRL4MU+e/5KzszaXLl2i2ztDUQziOGQ2v+j87R/uoWRtctk6UTLj8eNnhG4RVxxwsC9DKiIJOcRUoDfpcfX6Ok8f7XLUe0CzlSNjyARxShTbLK9uoCgS4+mCwegXOBMPQxc5OT5ANyX84IS5K+F5Fp3Op+RzNd5+8zeZzE5xXZelpSWqtVvsHzwh9AwypoqiWFy/uU67M6JSbFErZkn9hNCZMRp0wFXYerNC9+QVtr3Adx2uX32TZ88/Ynv9dYplmacPjnn7tSwn83OmPRtVMSjXs4xmUwKGbCxv8OrFGRmjzM9//gn7Lzxq33ao1Ro49iHXrq4xnH7B4X2oV1rcumTws58fUqqVGPRGXF7L88XTJ9SLSxjVkM4oIpZWKbQcDqcfoA/forWyxfPHZ+QyW+xcFXn48DHKsszuYESpLtHtjrlz8y1+451v0apnefxgyv/yR/+Kf/qHf0gm73D/4RNuXlvj9LRDw5fwfAk/UkjjmPsPnrG6VCcVHHZfLRClGMsOQJZQ9TzrG1dpFusYK1my+RJ2cMLPfv5jDK30ty8oy0qDRXaEXJRBUhGdEWkQYpo+w5FCueCgyg56KpGKUxaLlGxBRdIS9FxIHMzI50LmgxB3NqW5lGdghcSCg+uPSOICVhzw6OBTKtkKi9ECRdSp1LKEgk2tnCNyS+hiwng4QVE0zrs9lqorVAp5TvtzyvUaKys5MkaW/b0zOsdtFl5KmNj4soDvHJEKIMopohTSaORwHPBjFwmdUrVALLpMxjGKJaFndFzP5MnjUyK7TcHM0j4fEwQylVaRS9t1Hj96ijOfkCnk8L08ojgnCSOENEaMXXxXJsknqAvv4qMrSyxGcyIhRhNCZDFDVs8gSAlGXkeMA7zQw3EU8rqJL0vEgoCoxkS2hWsFzGZjVlabOBOPKA2QlXVSbcZg3EZTlnFsi8iPOT7oY97ScacQzRb0BQtNyKMKRRzBx/UC0n6CvmUyCnJ4lsfSpTz94bMvxeAJT6Uh1bLEa+/mOTxI0UQJRZc56swopQnFXIKXFtBLWW6taBy/HOI5CqoG1mRBqVDG96aYpkAS5JFNh+nE58ie4CxU8hXQBA0pjhFUhf58guo67Fy+gWV9jBtMEKWU8XDKbBZTa9Q4PxvgeQ6yGgIi2WwOP5jSmVrkM2XyZgFFFRmPuhSKFUZjD03OU6gJILpcX7mFvYgZTtoc918gpiUUSSWJYlzfQynOEaSYUO1y7fUijz454+mZTGupTCAO+GJ3SLncIFosIDWYa2NEXeO8neL7AnkzRJI0hFjAHtr4loKftSiU8oxmXSpLJqHnUGuVmU0dolBAMFzy5RJ2lBAmIaIoICCQkiIlIa1GBlE2L5iAMSRJipbKCEqIm6YIsYKQpkRuhCqpBEJCIkGSQJymCGKZDz55zGw+oVCqsv/gAxSxRpz4RE8EBKaoaY58yaOgFxjPQpI5BOoA152h6Sb2PCRwfSrlOgvHZm6PUZUMoijgWsGFsUFQCKMI3ZBxXZ+3336bH/7g+ziOhaKCqgcIqMSxxWQy4dLWNtPpCKPYoFosYc9tspkG457F2uob3Pj9hJOTJ1xubjId2fiJzPqb63RPOyQInLUH9Icx9arOZD6gsXaL3HqN9vNHuH2bqSDQP9mn1NwhlUL0okx/OqDV3GJt+zWEyOblq0847+bQpR6iCLKhYprw4U8/RDZGqFqWREg5OHvJX/7oP97WK40WhdZllHwetWpwO/MbGNkc5YLO0dmYJHC5/e5dQtdFkBXy5RJnu4f4cspv//53ebl/QP/xiPWNIt1RnZ31FV48PyRJFiytXscJLaJkwe7eS7Kv5ckWFZ5Peqx0iqDIdK0uk+GCzWaTYWeXp5+9RC+XMdWIq6ubPHr2CkP3KBSbFCs1RPWQwIpRK1nyazfYHzsU8lM6Y1jauY7TnxAVTUJNYtI7ZXb+hHkwIuonCHEWveBy78Yb/Pv/9QUt/5TnJZliRmOrucbTgzO+/eZN2kOLYH7I6e4p1U2LwYMfIufK1G5c5vjBC5Tzc0ZArbLJRq1Ef3jAiz8dcPVWgWqlxeH+PpmVKhHnpKlAKkcoLkQypHGCJgjIiokoBSiCQkxKlApIsY0Sybx2c4Pz058znYp87Wu/xaMHP+X46S7X712mvPobCPMCCDKnwwQhTUilCYmgIQQ2qXKxmymLGeIoQpYU3MAnlSVEIcWeieh6ne/98Ucs73TpTbvUK1WWt66iPjknl80znQ1ZvbzNb1bf4uTsJS/3Ixw7w1ZjnXlnhmgo2O4+oS+y0lzno09+yXKjynpBo3e24NbtEpd3dHr9E5ZWTTxX5tMPDy6IKYKGoQecHEY0LuU5OfuMw4MK/+j3/oDPH/+C/riDKBd59cpHMAvIA5t37n2Lzug5hw8jtKyGkTFo1E2ms5RFzyIj67z1xmUevXhAQszH9z9mbW2NIHB4+eKQ97/6Fu3eEUY2ZD4qU6/XePWqh5LxmDkzNpdvMZ8OEAWB5eUS7dkDSspVuqce1WqVhXeCmC4TJG1CV2SltYK3WHD86oDrN7b54kmPRFQJPYWyniGTTQiCCUd7KZY9wyz4SEKKbvjIU5/FZHiBRxJsFgubyWxIvrJKrZnjwcOnpLHAW+++QxCJnMwiRpkZcuqTLeaY9vuU1CaeMyebq3F69JK717/GckXnL370EZfvvYlqhHhOhlSOMXWdxXDMrL9Ay5mcd/p43hRnkXBr/TY31hcMR1MOPYc4CLm9c4Odmwk//cEjQndBZ+izsVWjP98la5SpLG1Qn3YwVJ2UAGsREscSfhRQqtY5POiy0VhCUBzUYo6xNUUSVVbWaziLOY3cNTxzwAefPObm6w0evnrFm+/8Dtdu9ykYOc6OOtTKGVQ5JZv10DMbXL+SRZOzPD/ponsKgZ+iKia6llCqB3Q/nyKpNo8ePqZcqBHmHcRI5ujBPrfvrfE73/k92t32376gnPampEJKbiXLq8dTKlWRXLaCFYxZ3zJIfZX5wkVRVKRUI6soJFaAEHmUMyaiopDEWZpLGrqhEgcyZmFIJpujdwZaViCTSZhPFgiBgCFWkTUFhATHntEob6GURHxvwXg2od+XaLaqtHtT/Hh6USTOe+Sy1+l2zgnimGIpy2zRJowTSEWCUMTMpkhqQqtVo99xEQQPQyqgaSHjSR/TNBEEWCymyEoOy3VRFRFdaTA4nSMZAVNrhKL7HJ3N6PUnZI0CWSPDaOAhKmBmG8w8hyCZkvoxkaUhRBGSkKfXHVMtZZlaPrlMhnyuwuDExkscKhsxzrhMPiPgOh4xM/wkAFvGizOU8zX8dIaiF+j2RlRqS6RpjGTN8XWHS7fXiFOZrGDS7x5T1gsMRkNWl5tUqqvs7Z2gZnTSJMZxAo57JzS2m8yDkPVtAzkUmI1UQjPFUHTWLq3jxCKu5VBvNFhd0ajWV3l58BCzNGNmB8SRiabDaGxxuDdBEmA+nxL6KsXqFoO+z2n/kOVGgQCXydSnnKtQq5bpq6cEooUpbyDKFomQ4ngLgsDj2bNnVCtLnJ70QBCIooRKPcd5p810OqVUNom+NLzM53PyhkrJqDKfWtSqeYhBFQtEPmRMsOczjl+K6JKOWOtyPuyhFSMMqU7BTClkM4QkaIqDZApU6znG0zmf/WpAMM3SaOWwrQBQSUOTiQ/ZgsF8MUMKZTIZlWoxpHSpyNHBCamoYzsOV65c4vS0TX80oVhRESQPP0qRZI3pYo4XepSqGYQopVjQSASRomQgyxeQZQHY3NgkLV0gTtxgjuP6IDZBFPCChCCKkESVJAoRJTAMgzBwgAtzgaYpDK1jwjAkU6gQRFVE3UXWIVzIoKoktkmpXidfspjPQhS9wKBjUK1XSJM8h0cjpnOX1ZUWi4V1EStYVBGRqVdLdIZzPNdGFU0U5aKzlCtU+PnPPub8bMi9124ym805PjpFURJUVUVA5g/+8T/mh3/1PSwrgTDFzJhoGRUtirCCAVGsI4h1RvNDyq1Ntpe3OT05Ymd5jdhzsNwxgeEQWR5XX7/Hn/67j7nxxm3UUo7picvjZ4f8xm9+k4kzIHFV9nef0GsbWI7P2s4YPB9dMQnFIvmshDUeYzseaRyTpBqDszEiczJFk8PjB+Rzm7+uiTPXolBoIMYSkWPSGbzgVuldnj3dRc7KjMQCd5eu8/zkAeNhQKNaRDIG2LMJn376lGxRQTUVjg8WhL7CFx98gS+G2M4Ue5GiyRqt1jKaWsULXe7/6meoZgE57lMr6tRrK0iixdHpMdlikULNZtQ/wxVVHrgj4lTDHY/J2hZG/5grW7cY9M9oyGU0WUfOZnn+ZBffG5DLqhi6zPf+7S/JKlmWmlUql5Y46s/4va++h9+esXf+Areusby8iq4keOGc01OPNE0ZhD6HHx4RulNalWUwfGJZ48bNr/D9v/oTmqUGd9/cxI8j7pVuMrOOqdQL9J76VMsuzx91qawWMYyUtdYy9cqQOD5FUUXiAOIEFPkCmh+EAbIsEMc+oiSRigAJigp6LuT47JwklfnhT/6cN+5dYzrtMh63WXhZ1GCIJ01Z2dygVqzRGS2QdIlElEhTGVFKSMP0S0rChan0r93k8/mM1e0NNq7vUaoVKK5sYaY5xEkXSRaZW3PSVCAIh2xtv8nGxhbPXnzKb9z6Kl88+gnjic1X3v8u/aGEbQ84Pj3g3r179CcvOT4t01qpM5+47L58RaliMuja3L5zGVFI8SOb2eyUREnJlWRSR8c+b3LjToODV09RhVWW1006h2csL1VR5TyVSonF1EKOW8jGcxrLWQaHx5T16zRbVV4+b+MrM4rFIuVMi3tfW+Pf/sUP2dm5RLW6AkmGbntEvVHh+HCIkYlx3Dkbl0r0ej718jUK2RxR4GLNe5yedLj3+nc4O+khJDFx6iInLSbDCctL1xjE50SxwHg2Jl+oMZ07ZIsae3ttNKVEmpwhKHly2SyXtpaIWeODT/+MjGrw6OETlmotNi+vcdbbYzIc48591lt3MLSQVy+HKEqBm1eX+Kv/9WPqzW2W1vPIbgTWGKEwIw1UptGMyk4d3ylRNiKiyObjxw8oljKkMwcxV6HT6VBpLOH4C3JakbXmFez4mMurJcb2Gc8enhOkMeNZn+5wyNraMtl8lZP2LmPLoFIroSgRrhfhLnRkKYNjjVEVESGWkNIMGWWd4qpAqdzh40/6bG82WL7WoD3q4i1CVpo57PGCfNak07b4ra9+hfZBm3koUyhHbF9Z4uXBTzg/hLffvY3tuLihQL6UpdUsMuiGJI5AJCo0tnSclxO++o1vc7D/grMzh7fe3eHh/VPyuRq1hon43Meypsw8Bbs/oNVYYWF5lCoGcaj+7QtKJI9i9cIpmykYVGsFzk47FKsZFvMBjqMhIZKSoqsiaeJhLzyyGQVFlHE8F8e2WVu9jCB38FyRjUtFOqcRhXIeWUtZqq8wzrh0+scUChpRmOLZAaqWYTGZsLG5Rntu01puUi6VSGSLKPZZ+DM0Rcc0Cpy2zylkyiRxihu5eK6CZc1ZWmlSKImkgoQo+/SHA+rNFtpER5RSzIzOsJviBTMUSUZIdMbDGaIkE4cq4+mCcskg9nWCYMzJ6YjTcwVTlckYCqETYeo5gkhgOuuRyBHOwiKyIZ+LsTwfOZUQdQ1JAEPTUaUsopCwul5hIqRIioBSdtle2SQJE7qjU0qUWNh9Spk8TugQezZqVMTM1JkNZ+RNMIoms77AdOGQKi6akcWNQ1YaUDQzXN5Y53x/TjGjoesyk5mFaQg0aiU0VWYSTjlvD9lYuoShJYTCgDQ6Q9YyVHUTU28RLwTytQnHx59RLTQorwmM+0NyURXN1Hh1fMbG5dtMxnMG42OKGZNCtgaxw/b6PeI0xPc8xHKHNOwzdRaM5jOKuTrT7h5GvsV8PidNEyQxZe64LBYzllZWGQwmGNkMlUaRB49f0VxqIAguYioDCaqqIJNB1F00LUPopywsD90EQfYZTW3KZhU9F2IaOSbTNitLFdzIAnx8FIIwusDUBB6lsoIWTXn2WYwk1aitX1x4PCvBGYtUK3lm7hH2KITERFEFZFLm8wGpL/L27XeZDR3axzO6BwMKpQAjUyVKI/KFCrY/Q1FCFF0kVyzh+XOUNIumifiRjef4xLGCKIgkaUoYhpyfXvAPcxWJODEJ4yxpmhAn0cXulaFgO/b/N/RZEEhTUFSJrF5ALRt0p/sInkAqCQiSwsJygCnRNKJ7+oJWY4nT00MKpRyt1jKWPWFhjUjkiOZaFS+yKJQyiBJkzByyoFOvN5hYAbblkBIjSglxFKFq8OLlQ+Zzn9lsxs6V68SRwHjSR09E9vYP+fiTL1heWWM2W3B+OiYVDXoDmzBO2Dt8QSlfQJU11pvrVPJVDp484bu/9VUePvicrY0Vbm69yfHeOZN5QFYP+W/+d99g/dol/sMfjTibhVy/c40ffP8X3Lr7LolzTnvPp1A1+c3vvsv+qzPUzBxrNuaHHx2ytmogJC7NZpkogNHYI59fpn26D/0CgrjB+uY7vy6JugbuaIA9mmJqZUJfZfesh+tZeNMR5eIVugePeXH0EVeW36F7foYdhji+xbQ3QrEFZgsfRc6iZbIEiUaagGMVsa1TfC+iOxpQLucZDBwKxTp+PGdm9QicMpPZlEajxtAe4skWo4lFzWyw8GYc96Zcvb2Je15lZM8IhnNE9ZRsNs8vP/+COH7KpfV3qdayyJ7Lo1dPubL1Fb79rb/PKABr2iFDlbrpcf9wl+VCCzNvkMQi73z7Flb/mGp5FTfIMJ0f8HYlQeZ99NIWJanIZ89/yL/793/Gne2b3HnnKxycDJh2umhBDu2KRqRY/A//4s+4ff0aa+UKb6ys8PnHP0QvFrGmHWynf2HgSmVSIkQRkjhBUSRkWQaEiw6mEJPGKbbjsbLU5MrmKtPhMZY9oppvMmiPMdWI+dzD9/bYuHSPWU8ilytQa0qc92VENGQlxI8gjWNEJERBRhAkgiDEMC5SqMqVIh9/+Ct0PaFWWqczO6WYqXPp6hrh/7KHJAroWZ1C0eTg4IBCLk+1rjIa26ysbjKzP+DVi13eeeer7B9+QjYjMJy9pFLcRBQVKuVleoMDNrfWUbSQQnaVg/0T1tauMpq9ojfYZbhnsrpSQfAV7ly7gzWbE6YLFl4HUytTq5UxcwX+/D/8ETvXrhO48PX3XiNmQsbMcfndJVw7x1mnTiY7ZGNrHVFMWYwjNM1gdbmKoVU52DshkxMR0Oj1eoiCgSYUKTZlet1jVGGV126+w4cf/wjXuwgzKVe26XXGDHp9trZbHO6VuPfGBo8f79LuvCTwU4bjDjuXryLEeaJgQUCd119fxvU9XHdEmqiIUkyS6EynC964+w2ev/oQSalh+V3SIAOxTiHXoJht0lrJsHdwSBplqa+afPLoCaurb6AXfRbOPvLCo7k85uvfvs3Dn8+JvZD9kyGVlRa9QYfpNGFuO5hKnnffW+f48BGqXKFR+3+z9l8xku15fif2Od7ECW8zMtJn+apbdf1tPz0909MzJIcE1pDiiisJwhIUuRKgB2mxb3oRZCisoDXQasUVV7tYYElxqCGJ4UxzTLvp29dUXVM+Kyt9ZmR4e7zVQ142BUgPDSwTCORLRprIOP/z/Znv57tFqZbj0Rc/YTp4yLWbNwliAZkG9brI5k6b/sMjqrUWlrVCs7RDvSHx5dMnrFRvEKQHDCcx9tzjrXfvU2/0eflsj9XGCr7v8tknj3jvaw/wbZn19QL28oxDd06WeZjVTUyxQlYY8vCzMd/81vv89NOn3GtcpyR0CQWfP/6jTylXNphmIVN7wHn3klJLRUtXKFsiZ9EJjVKZOPR4fdxj0rukezHCdRKyDAaDORkypQokYcadW9dYTjUOTvZQsjVKpZRw1iXMFLr9w19ZJv7Kppzt++toVQ3JUim2AmzX5c6DMo1mhho3WFtvoBtVPE8jS4sgSJRKFfwwJEozTKnGxmobz7lkcO7T7mT0LqfY7hhJTtBUgdNXl6RZSK29gpLzUXWJKBaRUwtRiDg9HqLnPWQNkAVePZuSLyWQFImjHJpeJI7yLJwUL8zoD+ZIaoJV1Lk4m9MfnzJ3ulTqBfKFGla+QqnYxLU9FssZnm8jEFEq1q+SWkoFQj9gOBxilMCOZ/TGIVmax9SrmGoDXSnQ64VIoUe1YDDqTwj9CBVoFjpEqcr67XW2NraoN1RKJZ18waBSzTFf2FhFDVGbE8wjvL6PyJJ+f8iLV3sEiY2mm2CUkA2JMAlZsORgfIqYhhiahqiVWbgikiTjLn0iW0aNq2yvtalXZdorTS66l8y9CXfubdLqKOzeqKNrMhISnjPlwZ0NECLccIKsLhg5F3TWyyyWCTlLZjg+Igx7eP7VaFFVh9j+HDOvsnOjTBgv2ajX8dwRWzcNdu6sM5wsmMynzIc9FhcPsQ9/TjrYoyrNWMvbXK8vWMubjIZj5oLJ0rax8nmmiymKJoEYI2kpSeZSbeSo1lqMx30q5Rr20mU8niAKGrpiXGWopx6CJhHEAaKWIZsZQRqQiRJ5K49csijVq5iWQK1Wxo1DkqzJZOJzvj/ASGtYBZHKaolMqvHsyzx37l+ntp6Qyj5RliDqKlohZbQ4JYoEFk5GEEe4nkSSNdjcfpMw1Hjx5IRSvkS1blGt15HkMmbBIIwSfC+jUmqjKiaSJDEdjzA0k5XVIlHsIQs6lfLKFaNVVZFlEUXxuHGzzo1bHTRdYTz2ETITWbmCl+eMAhkJUeyjaSqyIKIbKoqsfcXQyzA0kyDqIWUyAgFypnD0dIyULUkDl8lAoFTOEQURWWhyuufy5OFLJv0RG+1NPvj17/DO195lZ20NS5QoKQaVfBlZMxm5HuOhzWzqM595+G6CpuZZzhzee/+b/Pv//t9C14r86Ec/YzIZoWgK65vrbGxt8pOffMrHH+4ReBKtlQYbOzXqKya6qVOprKNpq4zGIseTCPQc733tbSq1KqX6FnsHHj/+kxcMT1Pu3Pu1q7loJPPyo9e4lxO2O23eeusdzJzEzptbOOKCtTvX6C01/ov/9oeoFYUsl/DTh1221yoYRYVl7GHbIe7Q4+TZJb0zH0UpM53PULI69tz75eP3//GfspwniEKR5WxO5rskrk691kFTS7Q6G6gVnU8PnrMQZS6CgL2jF8yXDsVaCUUV8fw5g9EB3rLH5GIIaQ5R0lC1VVJJwI/nXA6GDMYjFssUAR2JMrGQsgw8Xh31UXJFxssAUTXYu3jO2LvAsmrMxjBfLvHjFCdz+fDxE77cf8mF7XFuL3m09wtOBzZC8RrFtbdwRJ/cVhM9Z5KzNI6Glwx9lzD06Y3GlG9t8fSLL/n40UMePf6Yv/f3/y5/+vE/47J3xNmTp7z85GMOfv4z/uAf/d/4h//V36dTLlIrNtH0Iu+8t0lR0bh2fYOlCHphl9/93feRUChUtsgUA2+p49gxk8Eh8/kRii6TpTqCJCNKGaIokSQgSwZRJEEmkSUgijL2POSdd95gvBgwHPe5fvsWqd7lYvqM8XKJnteQ5BYvj7vYqcPYmREKEgkuiDZZll2xicWrW2KSpSiKgiAIRF8lde3v71EoSfjBnI9+8afkTIlCKaA/eI0iy2RZguv4TCchOzcKvHj1OWEE+aLI8dEZ9doWSm7BbBYhCHm6lz1u7fw2nqOwvXafg1enKFKZOMhxejLlzz/8GacXz/nzj/8prhfguhIPbn/AxrbG6eUeiaBSX20SZiGtZpH++RmGLtAdHvLNb34TP57SaOW4HLxg7i4w1TUGjsb56IBru3W2m1UWFyO6hw5mXcHWlvzmd3+LWrnNB+99/SppiyK5XJ1UuiAVbRYTGyEzyRKZx198zkqrwXQyRFEz/CBkb/8hjrdgNk24d/82T558yXw+JYh81nfyxILNdD7j9dFzpm4fy2pQq2wym4xRFIXGio7jzTntPkLShgyGPXJGg8UsJYgSTEtC06qIaspwccFPf/FzPvn0CR987T3yuSJxkJElXUbjc54/PeD9D9Zx7YQ/+SFs3bkNRZPWtQ6xJHNwdsDC8dm8toUT73F6ts/cc1jZSTi6/JzBZES1tsG3f+ProIx4ufccbxHQ6TQ4vzhCUSS6vVeE4YxHn/+c1wd7qKrC2D7ncjAniHJs3DL4oz/+IfncCrdu3WJrZ4VxX2J7e52p8yWPPn2IqZYZDHoYap52a4ecvuT45Bco5SI7Dzoc7h0jSCbqVsrlaIqbSSS5hPe/91t4aYw991GihHA4o3d2wuGrPrEQ8fJgj0xJSBC4sdWkYeWu7vO+zXImgTxg6nzJ4esuk1GAqNukSYSMQmetztlpD1HK88E33/mVBeWv3KGMfAMxrmAoyyvcDgrDy4zAllhbl7gc+oz7EwpFHUGcQDJEEIq8ce86o7HNcrLEngt01kpMpZicIeHOxmRpRq2SRzSXaJmIpIe8PLRZ7ZRorRQZL2Yk4ZhSUSROI0ShjBdkDPpj3vtgi4PXz9na2KY78K4ydIMEf+6Ttwpouk6SRSi6wNa1OlNHor1WJUNnNDmgUW0gaw5+ECJIKrlCxPAyIgmnjCZHdPQOaSJTreuIokkkJFQ2FKajgPkowLeXNJt1wjjlcOSQjl6TKQI5rYY7mZG3lnztvVUUA4qVEpXrec76fQLXJohgfauMHcwo6CtMR8d864N1upMAnyVrW21EwaN3ds4UG8fOMLQmAtpVCkkq4HsOgq7ghxGKImM7EYYhkfg2iR0zlXTGwxGKEVPdKHIxudpHLFcslMkMq6QQ+Smj84x6uUOtJnNxcYlVthgPQnIFAWIFS91Ek5fY85hGdY2QCUcXPa5vPsCdS6x2GkiLPI9PPufw1SUla4W1zV3m8xm+kBIFIbNFnpWVNpHrsVJoEjpDbq+bWDmBs6FHrVEkIcOyi8ymS9IkIZ9TyRdzJElCvz8gzHwyFAIvJJczqFdaTKdz5EwFNWG+8FAFDde1kVUJU80TuhmWXGThTxkNUsQoopxvEKUu/fElmyub/PX/6Q/Ye/I5z18nFLWY4UCmUwsQfRMpUMgZPpqhMp9M8Vydzuoqjj0gSvPklCpWQWS6mJOOUiRV4ax3QtZXMAwDJ43IMoPQniNr8pWbeTamWW1BJFBtl/H9CYE3R1MVNCUPAVecPUAQRHwvZTmBIF3ghxmaVIUEECI0XSQO52iqRRr7FAsWaRQiR1fJIrKkIkopU2dIGCcIooDvp4gEmLqApZQZnS9QFZVhf4pnjzCkPJsbKvl8jmZ7hf2XfS7PpnS7fayCjh8nbO+sI+oqJV1DJmN9s445klBkFcfxiIKrbqlrB+TMMtW6gplvEcYhWQaFQh5VVQn8iDgL2N87ZzTuo+jQaDQw9TLbW6vkLJnOVg5Lb/Hx4+dsb3b48PETGpU1VCtHraFxcXbOh4/+lGs3rnN0ds6yN2D1eoMsU5GjlL/+P/63+fGf/RQr0+hsWHzjvRv84Q9/xLJ/SKtc53d/6y32X53iegJVa5PlaMly2aOztkOxbvFyb59ETPn227u44eyXZ+Ibd++RyDGFahtdyjHr97E0j9PZiCgN+PSTH3N4cYGo1+ifvKZcamDqMt7Cwkkjxssz8lYJRUtRZBFPjOn29giCBF1pUalsMJoeEfoplWoTSUk5Pxlz59Z1ypUcH338Y1ZXOyRCiiCqtFqbZKJEv39ErtRlPq3RO1+iGjaOK7Gx0eH41Ws2b9zEtHRkKePpi4ccnx5y7dpNJtMxsw+f0d69g1as0Gx0uBh1saMlw8GAP/qD38co5ljb+KtYhQ06uyrzZZcnlzPCxQAxDtFuvMf6WpU//88Pefebf4W04JCgMOpLbBZv4y269NMRXxw8Y73SZjbtMxqNGIcz5HKGYubY3ihjWUfEUYwoyKiqihu4KIIKWYYgpF91JyFNYjRJJggCtrZL1EspX3t3gyiJOT/skokOCiKz6RWwXFMF8pZIzszz9oO3efjJ72FVc4TLGEG6Ql6RCaRpepXMIwgIQoaiXBl0UGWKLYnW1jrV5iYnh+fcq66TpB+REmGaZVSpyvn5ObV6Gc9WmC9PMc01zJzI0vE4On/B9WsrPH3xGfO5w81rt1nMLjFFHdOsMhges3t9lfnSxPZOGR1MmU9z/MZv/Jt48yX7e0fs3Kjx8PE/48GtX8fIWRwfD1hdvcZocEKmy1QrLYbTBa3mdRbzj9E0k1RaIvsSvhPzdP8QjxF6QWQ2FdkodNj7+Cn76Rl3793jtHfM7Tv3ePL8OZ32BlYx5uTA4/r1NTRVpdfrXk26rFXWN5tc9k+ZjVIatR38eEQUi5xeHCNQIpdzSYWEF3sHuK7PzB5y894288WA8WBI4Lq8/eZ3cZz5FR8zt0oU7TEZeeT0HIVymyAdMfuqoDo8+jHvffA+k8Me7dVVvv1r32T/xQX5okg+b7B/+IxM0/nN7/4OknNKb3JBTsp4cd7nYnFBp3CXxcXh1f5ve4Ob17d5b3sLMQlIqWCWK8TZCSfnH7Lauos7kannryNF53izjHzJxZmndFY22Vy9yXx2yf07N7G9HuOpTaVuct4bsblRYryYkS/Ds+fHtBpFTk7mfPM77/Py4Cc4sxxvvL2J7wWUCxv0LxcoWYImWpQ2K/QPL1gOVM6WI/6tX3uTV6fnvD45o7GjMZ/KvHxxQqlicH7Ro55vEyb7XL+5gaYv6T7xeOfNbxJmAy5f2bS0W6iKRKlQxF4kDMfHNNvr2LZLGgtEoYJu6tzYukMx38KezSnmVV48vUQ3BH7ra/+aBeXsfIBuhlQrLRzfIl9IiCIPxTTZf95FFg3euL7LdDG5QgUZq4iZihCLGLqAVIX5OGA2X5Ir5Ik9g/X1AlnSpNkO0Rs55qrDZDDl5k4Fq6QQRwtkMUUpKkz6MlP3hHq7iqSI1Jt5FG2MLjVYzPoUajXms4hW3eDyLCJNIQ4Cokim3ijQGxwhyjnErMR0MqdWr3B+cUQYeCAvcRyLUkXDcyWi6JL1zRqDSwdBjkgSkSwJOOl2aa6t4gUJBc3AEEUWiwWCapIv1MiZEknoUsjlidIKiuVTbqns7y351vsC9kTlwYMH+M6E4SJEDBWmC4fN3VuUG+uUciGfv/yE5rZMGE+JlzLL2YJqp4kSw2DkUS6aoEkIWoKYCiwnE2SthKwL1Gpt5vMeMi6SoLNwYrx4QtVq4Lkyhwf7vP/eG7zaf83bd7/Fy8ePeefddxj3RkymQ6b9jJxR4eT4iNK1NoG/IClmNJt1AlthZ6PB9voGxyd7dCoCX3v7O/y3f+//ztrmBtVync5Wi1f7EfOJQ5pOmc7GCEKKoaygV1IifcR4YGCnXYaXM+Yzn9WNdeJE4Oz0CFFWCOMMy7BYOEs0RWLRnZGSIcsK+XyLOJbQFRVJihj2hui6geu5hF5EyazRu+jSbFUoFA0OD85oVlbRJIOUALO6IHY1sjggcuDf+xt/na2dAsifE1+csXG7Trli0VwZQLTC2fmYpeuiiEWqDY/YK+DOIxazHrohMR3NWSynNMUmhXzCcDJBLSiYNYOD8y55tYCQeohZQrVR4KLbY24vaa82iTyP0A1Yu77DeBIQhRm1cgV76eG5S0RRJIpjkiTFcwxcQyQWHTwvJomv2HtZqIGoIUrK1U5ZGmIZedwkQchSFFEiVhTCyEZUCmhikeF8H1kok88bFHSdcddn2PfI5VzyBZPOSgtdgfZKHUlMyNIAw3QpNDtUa0VKloll5UmyBEkXKZbqzO0FTSVhc7OFJKtomsZyucS1Xfr9Pn/wh/8dN27uYuaqBFFM4IHvxqRJjK4bWLkyznJCo1lE1XU8N8CzQzw3YD6XkRSJyF3gORIff3RISowgLtBzBkkyQUKnZjZ4sv+QYjnk+koHTykwm43Yaetc9I+5e3uH+XCMqK9wcHTKe+/fJ1oaPProM0Qrx5sPrnF+fkEcwYWzRK3VefO9B5xenqIoGpZkcfRyzJd7D395Jt7/xiaCpvDx0w+pqLf56I9f82/8D29xcrJPksUUJZXuy5C/+Tf/JqJ3wo//+R8R5lSENEIvq0znEVklIxR8ltMRldoGRpzgGzNmsx6zpYTnz3jzje8gZAWOjr8kb20wGL0mziwq1RKz+ZhSfp0suWS+7CKSUSluEroxYTyn1myy0r7F2eEAS62xsRuyVm/RG53hCQaFqoo7OeT8dEkqNQnnA04vRtRKeZxCjYWfIIg6d3Zu8LV3HvDi9QEff/oRparFanOd5SSltdrBzy3wM4+SWqbgrfJXvv6ApukgxBUm0wXOsM/GzRsYYodk4aAmoOopc6HHq7M9vn77NvWayOODLrpSQxCLZMIUUbpKUhNTCSQRSYIki6/e/4SkWUgYJWhKRpye8cXLV5jSDnZwimC4OHaMs7wgTTRK1Sbp6AIjzBgpAg/ubiFEMmJqoioBUQZJJiCIAlmWQnqV7S2KOlGUUKs3Uc05k4MDivEGhYKEbfc4XV6wm/pf7emHbG43eSIsGA1mbG9tIOsSvhczm8wxrBKGISGLeba2tqjVahTyCWczkdcHnyMVwDRLtJu3OTz8M/YPT7CKBVbXKvT7XVIEVN0gCTTu3nifYr6IG9vMbQfROGMwPKdQKeDFl8RxzNnxnJiEerVNmM45PjslX6whiRIaOmQBa1smXz78gnpTpiTLJInHdDrm0aMDVtZqHJ08R1GUK8/CxQmr7V2CaMpseUlw5LJ0BgiiweWkR5hIiJKEqoRsbBQR+ktODn2MvIxrZ2yu3qbRLvP46VMevHGLxaiPldOYjMbUW0UW3ilpYLK1+h4nRwdcv97icH9ElhUpFQzCwEdRNHzfprliUS6X+fGPfk6n00ZTGyRiwtbNdUaTCD/ymFz6GNU6tz/I8+jzH1Mpb7KYe2SCg2m1wRT56OnvYY822NzZJOdHnB37rK3dR6vkII0JmCKkFdRcGVnL0WzmePj5j/C8Xf7iX/yAP/0XU0JrxPHJHqJWB7FAriBj+32EMKG50uLZl48Io9uoOYsnL59x/fYOH3/4kmB8ZV66eXOV7lmXMOghk6MkvUF/OUISNb7xvTf48ulT8kaRW29sM19e0FAb7D38Q4aTOeXyNoVOBaG2SvdsTD6vUTE6FHSBycJgrVVhd22VuTfAUGRqNZOp95InXwy4fesW88WIpT3idvMvkC+d8Qf/5CO2dptX6VSByNzZ+1Vl4q8uKIe9I+q1FUbBFElOSJSrxV8n6bO7cR9FFxgvL5HUBFPTMKQCqqwwnbn4YcjCXlIs5PjWd99idC7x+vAZt9+o47tFBMlBSHQKeZ2N5iaXywNSbUFObOI/H9Mf+mys6pSiN+jbn2MVqkyGE2qVNqXqnIr1BofjHrOph5TmUUQN11lcRS06U2SpiUKN6eyYvecBguST003u3rrPh3/+JVZJp9aokGUCuhGx0qkjCAKNpslgdIzrQt406bRWUGSN0kqbgl4idmE4O0VQA0yzwrB3Sa97ybvvXuPNt7e4HJ4yvDjDEgs4joOk5QlCF1UuI2VzonTCtes7JLHISqXIR599QrtTZDadoao6QSywefd9uuc91FJMErpEaY1UUojsFDcCOZ9gaBH2UqZYychTZDm36XQqiLKEPbRJgozu5Zi6VWY+9Flv32Rw2eXu3TcY9Jacn5yQyZCgEiYevufy6vVzckaO14sjttdCLLVA9+IIEcgyibu7u+w9+ZJCuYKspZxNXlIo6OTyNrq0wqAb0SwWiREQFYNO1eLZ8Qus1SaBP6TWrCFIS05OlpQqBSqlKmEckzoeiqKQy+WYTmcUimUyUUCRIPISlssxvhtSLpfI6RKiJIOQUrByVPJ1vJmPZZgkSUK91kZVFdJwBpJKQV0hdCPKKzF33/8WL85e8CefPObWusneU4utazbd05BSucDCtllbb+Aslhy9GBD7eXZu5EkijyiQEUWD9c0VJCXGdR3G/RhJUfHtCfm8haYbmLKEEGkISQ6ZkGKugqmVaJWr+O4cWddIEsiZNS56XSwrRVV1ZDEkjgPiOEEAVC0jTGLMvIwkl4hDFYQlXuwgKxq6KeDbMaQJopChSBKqJl/deANI0pAkiDDNCCUuYBoWx6/OsFQLzXC5f/8ardpVByZFpVjSkMIqtaZBe3WDzvYpVqnMbGwThz5WrkScpTiBg+/72PMFvpMyGY2QZIEg8EjTFE3TKJfLFAoFgiCgUqmwUqogiSrL5ZLxZIggCCwXMyRJAlEgiCJyRRNFlBARWCxdJMWg6w7JwhRLy4EQIOkqnmdDEqNKMpfTPiureV4c2Ry8dDGMiEiKOKouEVKPi0GXUq6NK+4xn40pmFfittCpI+ZEDK3OrdstFMXma19/l/kiQMwkNlav8Tvff5OD5y8JwogbOeuXZ+Kl+xmCrZClIjNnilT2OLkco2o5jo5PORrNEI0cO7d32d26zU8/+QlqWGY0PyeRG6zXrpM4PmkooGcaF88vKDfKzJwZhUKbOHQoF/J0L6Yo8oTr124SJwK+t+T48CV5/TqaNCXy51SKVS4uLqg2LRaLGUVrm2q+wWK25OCZRGfbZHuzynlPZzqboJkJR+cPWW/dRdMMXu/1uPaggZ24xO4Se+oyqxZQ9ByBnVHQcwRCheGZQxiFhFnA2cVLarUOmaCzvlFnFPpMLqe8f+868/eGBFqdTCwRGkvWNtaR8zKf7z3GSDRKUh0hmfPu7m/y6Rf/hE+eDLh/97vMvX0GwznzuY+umZBlV+geVb/aCU5SEDOyOCSKAwA8z6NcyuOFFwz7KUJo8+b9r7P/+vcRNZ/NdofXr4+Rs5RCZY3R+JBYW/LFoxGZmBHGAaQZmSghSzKkElkWIogikiQQhiGSLPPq1WuG3Sp3r3/AeDjh8vIL3nqww+WPPidNQFVMgsDB92KMWoV88RRRueDZl5ds7WwS+Cmra9CoVhEzC01sosgw7sl0Oi3u33uPiT0j9mVOjveRUPmNb/87vPPON6nXc/yjf/yPODo8Y3Utod87oprfRmmWCKd91jod7t39Oqe1x1z2B+y/7tNqtdAEicm8QC7vcXRyjKavoOslgnhJUf0O3cmnJEFCPleipJcRkjwvXpyAchVE0usNrsw98ylGIcWzRSbDKYVCET+cUsgVaDRbnF8coVcyNE2ms9Hg2YtneLHGiyevWVtd5fhoSKGwymQ0pdGoUbKaXJxOEFC4fXMLxICj0yNMtY4gz5nPF2ysdTg9OkYUwRkLVIo72OzTWS8yuOyx0dlFNyGwVdZWbxLENre3bzOfH1ArqLx48hSJOqX2Gj/5+IcIsoXpqgiija4b2N6EIJColu9y9nqfyWeH9PZek4RFvvb+X8Os59DNHMVyk1cnn5OJC2rGNfaPR+xee5/peMDDhy/Z2K3y5PE+RqGGohcYjsfMF2NEuYMk2VTr17l9T8Z3XTIlwU9cXjyboRsS7fY2k9klk+EIVV9SKe1gxfDyo5+Rtq6zttvg8OFDgjClfrfJZ4/30cQ5b+7c4v7te/z5hz9DEUKCqU2xoOLKIxQtxzfe+4DZdEQQmBiqghstKJSKXJzs0xsvUfMmt+60EBHZ6ryBaXYZDC948eoZb7y5i+N41BoGgtbDiq3/v5rwv5egbK/dJUmSqwouDZkvPEpFk2BpcDJ8hlTMMXNHmGaJmm4xnWc4zgVnlwNWOx3yuU18b0FO3Waq9Bn1Q/rDIe7S4datGxweHpLTM85mCyItd2VeiRtcXxW5s3uT6dJFNETWct9jfbfFj/75AY8++YL2qsbh5AV9N8C3Y8R4gkyGJimE4QJVNZiOF7jOEk1rEbkJvhtjrUocH73i2vU6o0FAEMVkCTRWTKIQBDFBVFxyZpU48SgWJGYzhd7pKbmcRZCLmEwmLBc+OzcKFOUlcTGh0Xgf1ZS5HB7jzCdomoGiC3iugmcnhAzY3mhRKMPlacpsnFIszHj86jVpSebydMLaShVV1BiEE87PxtzeNAmWEWvvv4MTRHz6yZesrq4RLQNULWGa2liyQP8sZqW9w1J4QRjPCZcasacwv1BQFJc0sRgMJihBxGK8xI4c1je20RYFIu+MXK5C4BusN2+xt9+jXlsQUeDk8Ij1tS3qtRU8f0GxZPLqyXOmyxFb1zfwgxRlMcPv+dSNOqphoaQ5+r2XlBoijh9zfDqn3a4wnRzgjywyI0OuZpiCihhD3ixw1j0jjkM8F3w/QJSyq4JA0/GjK9yMIotYlSoSEqqcEWcZogjd8zHV2yX0fMqrg0tUXcUsJiRxgq7okNokaYNIm3FwlvDF/p8gFX3aHYEnx0t8OWT/vEe7soa9CIiCGGchoWpVqjUZXItwAhWrgFIyMI08aQpeMKZZbTBIBgQOCJGGEqbIQoQu66SaSqvT5PK4j4JAoagTuAG6UsDQLXrdOTEBtfI2g16XnGUiyQphuESSZGRZJMk88pUKo9kAIcyjylUSHFwnvGKdouD7yysRmSRXO18IiFxF05ULRYQg5Px8Qpy4LPpTimYekYDVehNFthADEVHNKNQlClYdz9YoVKpEWUxrdZu950dEIbi+z97BK1TjanleShQ0VUI3QFGvCAL5XJUsy/CCkDgCWTYI/IDx0GY4nLOx1cLxJmiaRhTEKAq4TkiQhqi6QrDwvjLGCWiGyWgyYKWYRyiqeFFAGoX4ywhDNclkCVQDNQy5GEyQMwuDgNlyhqTnce0Fct4ixmLmh6SGhOtaNOs1SjmR2ImwgxEXjk1npUK5nuPybEBZLfDwRz/hjXdukmkWb9y8zf5pl15a++WZODqeUbQUzo/nOMsLokXCzvUBYhTQXG+j7ayxmMR4yZLf+70/4/ypy87NNrd2O5xPIw73X9NpNAh8nVhQ0MsGj784xLUTNq8tcTyH7d0VaismR6+6tBsdpvYxupLjzu17rLfe4dX+z9D1HE50SbO2SpDMKOTzOMELgkmDnCWj7YyQxRWOLvZxZynr623m3pSV0g7r9Zs8637BcDxCevEKSYvoDaYUzApiUaddreNFXfqLL3BncySpTklK0KwKW+sbyPMENBnb7TO79MhXRF7uf8T55IRbssnJ+SPktSqrq1/j2egpqg9qwSfMTZHVAko0xEoTRLHE3PXZuv4GrZxEnISkqU4aKyhyQhzHxJmALGjoOtjziCTNEAQJzw2pNeqkqUuWreLHxzx79SmNepte/4Tz8x6qlvGLj/8FnfU3MGWL0O3T2JaQdREkmThykIU8aRqiSBJJlCHLV0WWLKvIsoqiqLzx1ps8+uQzCkUV15tyeH6OCkiyTpyEKKrMcuESlTJWV1dZ2H0661X88JJf+/b3efr8jxFCg2vXG+QKEct5zMaWxcvPZzQq7/PujTxZlrG6tk2p0EEQQ3zHZDE/R8hExv1D7t76OoXrKj//889BKyJJGaErsdGqc9bvYygVVjd36Z5ccOftHR59uYTIYHfr22TilPOTEdVKDbIpK40ig/6Uv/KX/k3293/G/v4hxUaNNFsjV5DJ5Yr0e+cUc20axQ6ES4LQJZ/Pc9lX8PwYyUxpNvNEyYLZxGb/9ZBcQaZ72eP29fdQ1ZjFwuH69RX2nh/x9MlLypUiGxtryGkOsoQ4Dhn1UmTFx4/HpPEFc11l0r9aPdi8WeGs+wRBztNopKSOgD108SSRX/vWe6h6wni84OXeMzZWd2jUTPzgKUkyJ44Uqtoas+WC7qyPJKYUihlBPEcMYsbugspqi/XmLbL3ZwwGrzGMIb4nsRxJOP4R7mKO7y1p30/ww5SZ9wKzUqA3+YL+JOWNt97irLfPYDBEyHJ897t/mSid8tHPf0a/N6RWzfO894xMNFDVFGc2pVYtMVv0UMQcYQC7W2/Q7/vI+SJb791n4k7onuyRq4coQUb/8oDdXQv7UsJUJPqvI9r1W2C49LrP2BHe4tq6jCsoDEevCROZQAiRU4PD4zPSIMcPvv99/sVPfohRqhGFIrGXY+n0QPBY3RB5493v8OzxBbPFCEWzICmgSr+y1eZXF5R5w+DV3skVEkQM0HQBL7QQJYXK2haqLBOEItPJBYZhMJ7OiGObSq3MeL5ExGY+U/i//Gf/FeWWhlaO+bOfBuQMg97EJvRjZCHg9q1rZF7CqrDKxeUl3/y1W5ydH1A3BVY6TR598SUXB0sk7RJNqZBFCYV8goHFmIjLbImmC5iOgSCrRJmLMtXQjSJx7KHoLpKmMpi7WKFP7JlYsoUmy5yOT9hYbWO7AcvMu9ptKJZwXI3W2gZn3c9ob7XRzIBw6JFFU6qVNgVdY3VDRR5eY5o61HI6o2mG50ckrk5zXaNY01npdDg+OWA4jvHDJdN5SK2p4UsOF/0xXuxTyK1y1j8nlSLiADq1NiNnQrUgUepMufh4xPfef4ee0kM0Z7QKuzw/75EkBm9f18nCGYK2zfHRMcu5jKaEtFoylZrJqDtjfaPJk5fHSLpIcqJCcEClUqLY+IDZbI9KWWZmw82dVfqjS1xvTl4ocX66ZGc3z8HhC9bW26BGdDorBI5Lr3fMxsYGr/YOiBMZea5QqGjEisjleEmjVSCv6XjOgmKuRl4zWC6XRGMdOclIRQ9Jr2KpbUTFY7bwCJIRslLF1A1ce4CkaZhaGVIFGYvh4BRZjSjkVqjkVMQiePMllp7j7QdFBuMuhUoZN3BQjRL+8ATXO6DWLLJzt40TgqGVWGuuIichhqFRKphImUgmCLh+wP7hS9pGQprmcdwIy9y44j7GC8yygannGfQjBMllo7OB7wbkrQaLxYycWcRexLhBj4vjV0yXNoZSQApCRCmlO1rQagiAS0EtkvhjdrZ28YMl6szHNHSSNCFNQZBNzHwZc77DXFTQlYzY0RDEGCGLiEMVdz6iVMgRhj6qIRMEGWqWEocJoZvi6S6iHFDKlREFjcHlCDNvcXg8JQwvEWUJwzDYYIMXLw8ol8vMXBvbWSDLV8I2DGOi8CoCr1Fpsrm1Sq2ep14rUS2VrzqhqUccxwR+wnLh0+2NSFN4fXCK5wY4Xsijz15iGhaaHpGmKVmWUczrmKjEcYzjh7ihh4BItlgiyypulF2lGuUtZN2g7w5IgqvYR88eI0kyYiqSy4kIksFmvkGzVSWMAyRJZGW1wHRy5WwkCugOLlhMlpQrJTY3VlnYXcbTJbKUp1VrsZhMefN77xNEKotQJVHzyGZKJ7F/eSbq61tEixUevFOiuSLRu7ygUKpzftylbFXJNfJ4zjkXH71mcPwzFG2BmCsgCXl26x7H8ynnB+cU6+vMoxBJkEgjk1JZwPVHvPP+Gwx7Lv3ThEXfZmLNyCTtivEazHk6fUhzZZWGZtHtuhibFVy7Sn++YLHwabZLnJ73KFoFRsNXVNpFKs0afioQem3ajYDYtdnduEGtnGc5k8jEgJXrKzjuAmdqk3UuCYUly0yivbJGQV8DMcCZXyC5JjE6kirgxyKt9SqX5/tEnst6vUB3/IpCp4TjXvCTL/4B1ZZFrIWUrDtM+3u4TNCKJVrtG2RCA0WJWPY09K9VETUDQYlAihESmSQTUKSEJIM0FJGEkCTUkNWI2FuysbbN1vpNDs/OEEKLzD7FVwWEVCFYOpz7UzZWvkPv5BntnRoFNhhNLlEkFTkTSKUMMU5BEr5K4rna0bxCYEVIIoyHLs7CYdJ/xNEzld/5K9/nYO8VhbBJEp0jiCKaInN58YLW3e8Tk2P/9QsaDRldyXNw+JI0yhMILj/+2R9x7843KLY9zqaP2Lr3ATfv3We5VBAkl1jTeHHxBCEp0bvcY62zghuAXsk4Orng/be+w1sPMgaDQ0TRwDJ2uRhd8uJzm80dneUowdLKDHp9CloBy9pida3I0eFrEPs82/uEnc23KRQlCnmd10cvKZTuU65KRKFEIS8hiwuOno9BFGnUBD794jnluomiaCzcEEU2MbQ6ubzEs+evyIghKrO+1WYyEnnnzft88ujPKJkdbt94QOItiNMZpVoVSYw4ObrACxJu3bzO6dGYXEmlWpKolN9nOlkiyAKfP/4Tfvu3fwvV8PEPuqyt11nOumhlFSHXIsoW+GmKMxvy9LNn1Gt5Xjx9xu0bH7DW2WY8OyOKDSS1wGhyyb1rt2m1RT78xR6i7BHMn6GZZeRIY5kPKUkGo37ISD6lVV0nXw55tf8UVa0SqR7/+Pf+gN/+nV/Dno3pLg9ZaexwcTIlyz5HFH0qZg1BVJkPPUbTc+7e/Bq98zFCEtJobvPJLz5CFKHdqeKHHqdnh2SpRKuxy9wJaDRX8aUTTs4cXDvDj6ds7W7RvXhBisbJ6T6xazF7/BmF+hor5QJnF8e4C5FDfY9bpTbD82NsJyCIEtZXbhJnI5yFy63ba3z05acsXYVWvcY4mBELl1RqBR598gtG0ylWrsRqo8p7P/iAR4+/wHZFbt7d+NcvKOe+S3FFJcsiFMUiTmDhO0hywrI/oWiqZEKGqlZIYgVByCDNoYhlAu+EpSwjaWDmZAajPpKqIeoqmRJxennGRn0HSTb48MMvaDfqSHJGIgl8/PAps6nN2rbM0UlA92KIMFiC5nH//RbLQY35bILjDXnr/g5PDwYsbJ/E9TA0lTBasnF9lyePj6itKphFDd+XEEUZBJuZPSSvVUhkE13PsXREvEAilVzIFgzGY7Y2Njk/2scUNCxkhv0FudIqOUVGyiqc9w+59DIKZZ/D8zkrd2TaWx2ePx7R6Vwnk10+/KRHzpqjayme26dV3sKfxaTBlP2nPVrtPL5tYicBg3lIOR9SrdS5HDjc3GgSiGMef37E+vZdQj9ms1LkSHBZLGy+tZ7x3nsK7d0I2ZD59KdTHlzIfO3rDcx6lT/7+IxlfxW1MWU6DzFNCccOwDjn6IVG+2tvM0/HnE2P0dQGnheyttphZpssAw+1uEkSdLnoSaRiwnDao1xcZT53EBX7K3NFShIWyVkahUKFwfQQSRKYTmM0dU67apEJKtOxSxgtqZbbJOkSXcuzvlXhw58cYWgKge3TXKlwetEgSx0qpTVmPR9LkygVNNJUx1m6aJqCKGp4/gI/dpFlC9cNWVsvEIQeiiIhyiGJm7BYdFlda+PPQXChouqsFAosJ3ns0xAhs3lyeoxhKRTLOue9feYLn0xQaa5UEQyRzfYqpZLJZDJhOnLwFue8cfcmqqCQU9dIUpdMkfDdGRtrLc7OT5gtxghyhoxOu1lB1/KEYUi5XCbLLmi1C4iY+G7E9uYbzO0xkFFv6SB8ldYhi5Al9M9PEGOVVm0Fx4HYj9FFhZSYNPGIAhtFKiLLKlHgXe1+ZaCqKsvFjLCnoKgKXhKzvlZlrVMjEwXiCKbTOYqsUiyXCIIrlqWZ01ksZ1iWxXw5w5RMdMNA1cBzfMajKb7vUu4XaLcb5MwhtXqBcqWAkcuh6QK6mZIJCkvHptVqsL9/gGHoIMrEcYrv++RyOZbLOcMwRBTFK2ctKop8ZbTQVJM0TZktbSDF8V1EUcQyclcQallCNyyyLCLLROLEQZENlvac5f6SlCu80XQ6RVElkkxgOJzjBwGGrtLqVJENaBVLGDkR3zYJMoH21g6O7aJmM+a2w/B8gGnqlEq7vzwT220d2x1iGTrzuUOxbKFpLrdurnN03OPo+QWtAjRMhXSzzX7P5+DwM9588xvkytvocxVfHzB2B9SqLaJQ5N4bm6iKydHhJUdPU+azOXFgkxNlBmcj7q2tYOY7rO40Odl/xfjZGYtcmUKtQWKHHB/uEaQioZ2yWCzonneRVsrEkcDlRZ95X2DqvqbdbiKKbXQlwQnmlBoallWi3qziej0Mc4XzC4fAGaEpMiVth+MXfW7fl5l2J9Sq1/jy9RN2t9dxzwN0TSQxFTKzQX9yiaDmCcQ8f/RHP6Kxmqect1gcZaw22mTRFEMqYy+GPDnbo7GyjpceMzsxSV2VFy/OCf0IMgVR1oljD1GUrpBikkgUBaQIxNiQCmSJwWp7g4XTY6PZIQpi/PMRBmM+vnyFld+hvWpRqV1j69ca7H/yGmcxZT72yeVlsjRFxCCVJciuCiZRlL8qdgRERCAhyzLWWrs0mlvcXtd49fAnuEmLstijkCuTZgGeF/Hg3jt8cvxzkEzu3/0L2O4MRZ0ixAECOoPLCbKUcnb+lKE9JUl9altj/sP/4H/GxvbbFPMtCiUNCQ3DlDHkGqenpyznCXfv3ObyfI+9/c9BUMgXBcrlKsWcxkd//pibt+9c8SKjA05PvqS/12FjfYuPHv4TOt1b1Bp1PMelZK3SbDZxwhFO6KLbKb3eF4RZwM61ayyWMyqVOvtHj1lt7vLi1RFJZNFRW+zv73Hn9nW22uugXHBy+pztjU0qpTUWS59ay8D1n/LJRyestN4mS0IK1gYL+xA7XLBZMYmDGcQpQRLx5PFLNlfXqK1HfP7RKxRdwksHDA9z/OAH38cPErKsSrkkESdLavUyF+cDZKnH9vU8f/JHH1Es5nnw1g6nJ13qtQ5hvGS1nOfsTEU1NKLsHKsYY+oaaRDxzoO3SMIZk9EMPR8wOZtRK2QgZ3Tam0iyiqwkRKHD7Zu36E2/wDBatKs7PHv6mhu3V3DsBEMv8uCNWzjeIa5tQeIwn0WI+iGp6BClLjdvbXO43wV5yDvv3mcydtCMgCgK0I08rjdDtxJcd8TCdpktUj77+Ig337lHu3OL7smQQnEdz+9RzzXJNXIkmUm5XuH4+JRO4wHlWwaff/GEX4xdNjY7BPYe5eoNzEKF/mBBtW1w3utTK1Yo5WL65z2qqxXsxRIta7BRvQWxSKtmUDBVHj/6ElkweOt+jZcvj+DX/zULSsfvkSQJYZRR1ixce0K+pOE4Hu4iQpZVhKxAFMa43oKLiz7EOvWWhGXmcOwMVQ+xDJlIz5GmOn7gsvQSIl/CsSbEUYqq55g4DrMXX7Le2cGOAtJYYz4u0WyVuH2zzGn3gu7wkiAY4doBqpLgi/CzD5+hyBAHKe12njhLCGYlbM4xyjPSTMOeZ6SxiKrLKFoBZBnH9xkvxqzUOwiCgL2YoygqkS+yu72NklgMsi6pZXI6OaVUKeJFA5qNCs8+PKKSF2jfqtDtRhQMlYvzSw5P5oiyymA2I1UinMBBMRRSXydLRAaTU1LR5fBoxmV/SJjl0HIWSZpiqSmq4THon9LM7zKaDNi53iD2y0xHU0IzJQpSsOFWoc/NVYmD8Qmf7XukvsjudpW/9NfWWNkw+C/+6z0+3tN4cz2jdzQglzO4t7XKp39+hrkS0773JocHNt3FUxTLYaNTII5FXp938RybqlUmi4cgKLzaP0TVMjprLZ49f0mppNJqF+id2sznMYpaJV8UWS6m9M6H5Ap5VNEiiVIm4wW6ISFIJlJq8vqgT6FocLD/Ct+vYBoq7cYGr/ZfICqQ01dxkz18v8fdWzc5OT9hPh+gysWrQ1ats1yEX3WeBghyRqVcpz/oUSo3MEwVU9Xo2Qd4vsNICtGyNvbE5eRogKyphGlCGCU0VjSkuMTR4JLmShPfFTHVJoWChe8OWSyWxEHIcHKBaerUa2WIIPYSYj8iij2QVYQ0g8zn7PSUxWJKo5lHFAzSzEASfYajOaVilcXco91usZxPadZbqHLKs+dfEGUzGq01JFkhSVKyTCRNE1wnxE8zskSk2SoxD6YkcUaagmyaCElEHC8IwyqBK6CoGYokEiUQhxGtVpN602Q5X1CtVkmiCHvpM5pOMIwclUqFwA8Zj8dkWUapVLpyucoycRqRy+XQVIMgCJAkBSNnAjCbe9hOwOlpj1qtQXuljpkb02g0ABgNJ7iuS2/QJ4l9ZEXBMHKUqznm8zlJkuH7LpIkEcfpFchdEIiiAEmSkCSJNIuJs4QkzJAkEVGSiKMUhwhVkpEl8SoirVYlTWMEYpIwwvM8dN1CQGYwuCQhY9ifM5nOURSdJBURBJlnzw6QZZG8VaHWlFnOHXJWQn8ywF4kJOKC+USAWMIPT1HU4v+XoOzgB0NkWabd2CWILhFUi3qjwN23rvP/+gc/5Nvfepsf/fkXhLpBc/0HPLgnc3o6IkgcZnaGIOS5vrOOIYrMJlMyEXTN44MHOwiKiqgZXJz3uThYcP3uHT4/3OfmSpu6rhNKJpOZynZlg2m3i6GkKJnF+ckxYxtmMxEtazO8nKDqEbGdIOVnrDXuk8upPHn2kO//5l9ksnhGEGvkizqSYtI7CTENhcnUpb26ShqPcRYR5UINp98lsmMW6gX90ZRW813aK0Vc54T+xYRUyrj87ISNbxg0Cpv87d/9O/zs858R+DL1jTJe4NO9uCQNBDTBoljYJM0iyvkCSSYi6iKVkoW99BElizTNkGT1avQs6CRCgKwGxJFCmrqkiYEgCJSbI0qFPJq5RrgMsLUptcIGdzdNnh9e0ijWOXrxZ9g/j9la3yT2Yq7favHjf3FIEl29r0IhRUEiSwUyQSC7SnhEkET4apXk6dMjWit38OwxATNUBoTLMkE4IU1jyDIkUbqa1Mg6Z5fPKZerTBd9cAvcu3ef/ZMjsuyM4+OHNFfX+fDnDxG+/tu8d//X+f0/+GP+9v/8r/G//l/+x9SqDf7q33if58+ekDd26DR30OUck+mI27dUvvxiH01dIUkGnJ8dUWusYwfH9AYKb739deaTR5jNAlYhggsfSQvwOUdSZFZbNQbDHt3pEVauynB6Tqd0nUQJkYg53DvgMFrhG9/+Op9//oSN9jbPn+1he21u3d8hDEIiuhwfnFBttEBLeH3+AikqEachlXIV33dZzGZsbm5i5lU+/eI5kiyzt/+Cir6FqcvIkoplVCiWLPpnE95969/g6OLnXByL7F7bodt/juc5NGpbbF/b4PGTn5GSp7VSx/X7fP5wQnPFoN875eQ1vP3BA7787IBarUYSCXQ6HUJcvMsG+dwCVx6ztnqTvYPX6LJEobpJqgzJ1QRGiwWKlCKLeQwD4nTGoOuxuqaT+Su0Vt4kThyWkwQBhXanzKPPPmW1uU4W57lxY4PL4wtKFYkoiijlO5ye7zG46LOxegdZE3j27JBiyQRBIw5jdF3i9u2vs/fykHxR5IsvHvOtb/yAr329xsnZC+bzMRvrDyhVEp4/u6BYUYmjBUgakT2jWs5RyuXRDJH6Spvz7gWj8YT1tXcol6pMF2dUqmVKhU0uL06oVIusb1dZzgIyMaA/XHBx9oKbm/eQpATNiJmPZlSLLcJAZ97zuLm1+/+jB/97C8r1NYNycYXL7ogkS0kjFX92lYHaWaledSKWDpKcEoQetVoFSdRIsjmiqFOuWphWQhhAJV/j8mKKpqaUyjWCICATUpIsQ5HyeJ6LbuQ4Ph5QKZsspzajYY/xuEqhUKJYsJgtKpAlGFaE6/gkqU+9UyNyPG5sFklVn7OJS6lRw/VnVzs2kYgiKnT7F6imfvX7iib5XA2sjKk7Qq+IbN1vcPj8kubaJnqlzIuHp2zv3uW8f8p6u0ngO/RGI/JKldVWE82A41d9JF3BmwQMEal1VAJfYhJekkh5sjjEt32ieI4sGRQs0IsWhwdLOjtlnEVEJMX0j+dkUUROscgckPIRYi7is89esrplUsw2ePz8HDufcaPepSJLfPI6RdNlqoU8xXWDzq0G/+U/fMHxa4Hp1KfWrvHsyQuePR/zze/fZf9yzMqOya3rNfoXArHRJbwMMXNlxGBGp1SjOzC4cW0NRco4uugxncbImku+XODsvIeu63iewLPHYwRRI3ITlvMuti2TJhKiIFCwNAInRYnLJIJL73KIkStxcmwTRi7LRcj25i6mnmOrU2Y2WyKJKkJmkisucAYKcRihVl1yhoFlKQiZjue5qJrMzZs3cZwhhXyD4fCSYdLFzOksnUv8MCHL6tSrNaZThSy2UPIyaqCwvb5OEAdMlkvW6kWW84RWu8JarsTh8QmaqbDSqOLbS5ZLh5WVLYp5nf5gwnzmU7Jgfb3F5eiQRq1JtVCl0biOH8wZz06YzXx0XSVX0JCEHLOFDalGo75ClAbIssRoOCPwHEhUwjAmTiOCSGc6c3j1ekIUZYhiRpYl1Kol/uzFEds798kZeWTVQ4lDYickjFOy5ZI7tzfpd33ETCfwfDRTQlGUq3Gx65ImIqVS6atOsowfXJmWcpbKfDlBlnKUSqV/1ZkBdF0nE6/YfF4YEUYxK9Um0+kUACOXI44Tkjjm+KTPq9cn5E2DJElQFAVVU65QK0CSBNi2ja67SOqMOI6JoghRFBFF8er1yuVYLGbIsoj8L2PwJBnXDYmAOIrRZRXTsHAcBzd2cV0XZ6mzmPtUa3nq9RKlhoFhygSBB4g0V674pk0/h+c1ydIrxJZtO4RBjO0scNyI6VxhYfuomkgShQiIxHGMJBqkSULOUhCjfzXyfvrsGZquoKsir199SaWWo1xTmc67fPn8hEJzm9djkZ++PmIymVCsD3lw6xaCoDLqHuM5LrVKk73nF9x/8yZaPiZ0FZqNPIu5z97zPUoNHUWuIesyHz76Eqms88WnnyMuHTqbDXZvrfL//H/8N6xUGmx2OsyHGcmyyWww4uhwxM76NpP5gFqzQk4NOTkeYC90imWJYX/Epx+/wLGXXGRLWi2fzW2Rbu+MJMnY2FnF9kIMs0ahGHJ+tODBtXuMgwvcWKWSXyONNczSJq9Pn1I0VvjgvfdYLTVQdIFHX+zxxps5FEtlNp1QqdwjDiAJRmy8kSPyVKbjHvUVk5/96AUPHryJ5y5R5BxZJqKqKmmWkiQJoigSBC6yJhNHIkkSkSYSfjJH02XuXP8etdKSxXzE5WRIsaXh2AKSFrC2dZUr31qtYe1sYJgp7XIH2z7FUMtMIgVBSZCjGEGUQRDIEMkyEATpl2EBnucRBAGBO0LRZFJJ58HNHYRMQJBOCHwXwzRYujHru29xfPqUJIl5/WrCzu4Gp6NDTk766GbCfKby9Q9+i95wwb/71/8O/+gf/Md859t/g7/8l77J6/2P+e0fvMn9+/f54z/7PSTRZKUmkK94eAuDrbW3OT66oFW/gyinHB/vUSrlieOMJFHJUpXReMn61jb2MuDsfMIbd99lPB6zXEjIag5NaxMkYz7/4hG1aoNafou723Wm9jnTkcN793+X/aNfsBgr5I0GOVOnVDKpFCTkTCYMfQQxplmts1xOEaIGRDaaGVGrVOn2FmRpgO/5jEd9SBNu33iPP/3JT/ng6+/yjXe/R6u2ixfOMAyBZ188ZX1ni0p+i9Udl3/U+xGWZfHu9i0effIKqwSD2UPa7TZWXmPv5SGdzjqOsodAlc7qLYrFIk+fHJMmMk+efcbalkkUZpye9di+0URalNEljePXe8QhzKY2nXYRRS9TlCr48ZKDVxc0Gg2EMMBxHFY6BexlcOXg9z1OzrqUyjmW7hn2XECTOmxsb1AuFljMl1RqVSqtAk9ffEboWuxuXGM4uIouTDORTLKJEomt9Rt0L/rkckV6FxPSJMBS1/nmt9aZXCpcv1ugNzwCZcnF5SvcQCYKVDSjTcQYSXVwvYDL3ozSts74PCGOMrY2GgShy3D0OZNxkXK5DFLCyb6LbIz50Z+f8s4H95hNZkiSSLN1jchb0r8ccPv2XbIkQijkKOQqaHmH58+7TAbBrywohSzLfqUv/B/9ra9lp8djNKNELmeQZQnD4RhBkK7SC8wSC+ecNIYoCoEUSdSJ0yW1ep69Q4eVFZ3B5YxiLoeV01BlgSdfnFMsFtCKInGUUSrWCIMA04TIS9Hkq8o0FXzEzMR2xsiKgZq3vkpDUZjMbMRoiZ+CnIKl5HATj9J6HSlTSNwFjiNgmhCGIZqaQ1Jkpos+hpVDU4qkmYKQevi4OJ6NperUG0VOLy946/Z94sSgd3pMyygxnFzi5zQWtkBZU5gMp2Q1hTQxcfoOuXxMJtgQNTHyLr44Q40tojhDNSUKJYXe2RJdqWFZFmZBJ/Iynn/+FM3QuXGnjCHohIFGLILnLwjdCMtUqHZWmY9mXF9tUTPOOOpdoOVzGGbMqCdw3o3ozedUGmUmlxGKYnP7/jZz32Q5HJI3WmgWbNxQGJ7bCKFAHPpcniRs3apwun/K2kqD1e1dFDEH6YJAkPizn37KSsdiPg1xvYByPWM8WqIIJeqNApZawrIKTKcXDPpzyqU65UqeR5//OWudLSBFlAVsN+bifEGtWuCyO0Mio1opoRsylmXQ7/n44RyrnJK6LSpVicBJyRLIlww8N2ZjYw3XmzPoTzA0g8vLy694oSaj+RRZzSgWywiCRJw4VMpFMkUiS1Rse0QQBKysrOK6C+IgJZ8XUNQ8w+mEQqXOZNrHlDQss8l0OWYR2liZhGzkiKIIQxbxvCmN1Tq6bNKurkJ61VHzoxmaanF+fk4qxKhakTD0MU2L6XxCkI6RFRBii7JV5fmLLzHMEppa4PzykFLNYHuq8h++EAABUVT4e9+ocdmukrfaLKYhE89nPpnjuzGoEuF0hKmNGXQ9SoUVJCVC1WUkUcdeOMwXY0pFC0lUrvJ1lzaKol0Jzq/gzYjBFbNSFInTjDRNCb4COsuyjO8HRFF4tVOWpui6ThzHZFmGrht4gYcsSgRBcOXKleWvDA3ylYM7k64OHCEjjmMSMsIwIMsyJFlA4CreS5ZFIEOSrqp8VVWRZRnXn5ImX3VlZflqFJldXc9xHGOZBVZWWuQtHTOnIUkZQRB8hRgJCUMXx/EQBQVF01ku5xRKRaIoJAgC5vaSycghny8gySaO46BqIGQiWeIjyFd/w78U0wCGpqJrBmma4nseAgph4KHJGsgCmmViuwtqZRMhkRElFS+JUCSJyHNRZBFJUtB1HUGCOI1o1tcZDo4YD8a06h3Oz7toqolnO6xtlpGkgDRRyIkSBUvCT1V2736N0LMJ54c4QczLFwfMIpfeaIkYZwiSSBBLrFQrmFWR5WICUcakq1BqgpkXmYwdrLzGvTfW+fTTJ1RaFvWmhe9lZGGN7//OXY4PxrSbFT5//jmev8CxJ6w3vkar1WDufkFFukmuqjCZBnzv175Ot7tPIiT8+Ec/5/aNm9RW8uy/uEDXdTa2m+wf7tNcKTFd9CmXWnhuQF5sUakZ/B//zz9ikpgYqgFZQhD4KIp09f8XUybjAXGUEfgOjWqO/+g/+jtMeifMByPq797AsAOOX++TlSNWikWmwwGjQYZMkbXNJnN7D103+b/+J59y2k8RBRkplcikGAGZOAnQFAHfi0nSCFGEOBD4P/zdv8ks+QJvBAXFRDZ1qoMhv/XfjYjjEFEW+Of/1gbF791iMJqzufImon7Ow4f7VCstxoN9KrU1lvMZlm4wmPZIY1it7XA5WvAX/sJf4E9/9E8pFzew8goZMnfu3OHxs59CJiHFVcoNldlizHh6wtnpKXeu/wYbWzUODvYplDR6/THrmzvMZjOOjg4oFWs4C59vf+s3ePjlT9jbO+HmzdsUauDaU0b9GWlU4nu//gMG41c8f3rAtetVJFHjyZdHfP8Hv3P1fUo1Zpc+d+7u8uT5z4nTJctFSBQUKFYSzi66dFpt5tOEW3c2yLKM8SBg7r7isjvinbe+QxT1GU4yCoUYXS6SCQ1qNYMkcBG0jLzRpD/sASIHR695651rHB+O8YM5qlxhczfPs8cXNBstVjoGJ6fH7D2/4NqNdYrFIilL5PgOqxsxP/nZH5K36qxv7OAFfcZDA0kaMu4NkI0i09GAQr7OtZubZIHPeDEjV8gzmZzS7y0wcwKaWqBQyBPHCZ5vE3gm9VWfx5/53H9wnWazTZj02X9+QalYY7o4wXUSgiDge9/9dS5OhyyXNqVa/oqM4b7GszU6q6ss5zHj4QjXGXJt9y6eYzP0hmjpLdZ2fT5/tIekxWhKnU6rRffyCQIV1jdWUPWEV0czbm7sIIlznGGInwj0JueUKiqKFuLaGZ4jUSmt0qgXcdwR5eYKzjJB1UOSALR0je2dHEkU84d/8s+5+8Y98jmL13vHWCWN2SzAypX4d/+d/73wq+jEX7lDGSQp5UYJM1cnCBKieMnNNzaZjRecnUyoluZEExvfi6hUaownc3RDoLnSYr68JF83GE5n2HZC3jAQBQXXC9jc2cL1JqiKgSxHKEpEoWjg2FNyVoEwWDKajCnVK4ShS7FcwXYDMkLCMEazQpoNmfFQIxlHNDfKV90UV8B2phiKRc4yyIQARZFAUCjXqvRHQwTJJEuvKg+FOSBQL1doldpkcYSeCZTkMkcvulhtiVpb5vRFjyBY0CitU6rlmY5OadWqzJIIvSSQT5YY5QpC2ianG/R6XexpiVzeJRVkqvk6vjeikC+T+B6R7+AnbbY3LLizSyREhOkcQZBZaZf42c++pNNZpZhXWAwj9KaHkjNZZA5ffjbHE0B9KbK+dRNVinnnAeztD7hxe4tPlicUm1soVoyqznl/4x4l2SIQfF6ffUkWybjTDAmd3R2LwE/ZvL7L2uYKcaLw/PlTVldMAk9B1ibMpj6eG5OzLMZ9l/lUoFDIkCQBzZoxHC3w3QBNk9nYXOH1/ilWroAsGQwHUxTVYDbJkCWByXjJ2so6vpdwebmHYRiEvoaspUhyxvgyw8yNmc6uLghTvwq2bzbLJEmM484olFRUOUcpVjCUMrP5iFpNBElEUQIuLy/xHAj9iNVmA9efYGomceJwfLLP9a0HpKqHvVzgq6BrFr6dIEYGUSwz91zc0CMTPdK0Qmp4lGs5/GlKpbiOomr0Bj1ySg3Pn2EaBURR5vj4iJiAYsnE8WyyVGZ0fkS1WbmC0schiS0zHs1orrQxc2WG4xlbu1v0+mf4nkyWyaRxSkKGMxfpiwPSsspsHiEZFoZm4jkzpBRCz8dbLlDVHK63QEfC9V00NcZ1PYr5Ajmjhu+7hGGCqioIcoxqKMRRAIKAJEj4YYRlWaRegB9FyLJ8ZbAJQiRJwjBMkiQmTVMcx0HXdaIkxvVdoigilZWrrk4KjuNgWRYCIp4b/nKcbZrmFYZFktBVFSOXw/M8VFlGFGG5nFOtVkmzmNraKrIs4fs+ilzDdX3mS/eK/ZpmhElGmkpkscDStolOz/B9H0kSKRbzmIaC7SwRRZBlFV3LMZ2NcZ2ritt+efqVQAZBTknTDEOHOJkiSDFRJBNHAbIgkyYpWXb1+vzLD1VVCcOAwbBHo9q4Si2SDTJURDMhDEIs1UCIDTIpJcxkiK44epJhEcQRXhARCxJJFFIuF4lFn0xRuPfWGywWC66VtrFtB99XyFVKiKKIm4hMnZhu95LIjVnGD8lEgYMXn1MtFQlTHyvXpEUL33HRjBTbHzNbTBnMBGQh5J23ruM2YvaPXnN8MiX0ZTqdCo8/uyD0dPqXDo8fd9ncLqKJ8OGHjzg+OaRc3CZLXAxTI/BKvDo84uTkBRkuhpUSvJzjhzmevXhKXjNobtfwU5fzvs3p3Ca2fbKlR28+o9HKcXzWZzhYcuf6Lk+fXPDuO5s8f3GGHwsoqoIogu+FCJJIFEUIqGRiSBT6AKSBTrlSYOT8KZfDhHXrLZLxjL3PuqzuXkMoj/FGAVKiEc+esL36PbLZEiE1uPRm+GlG4i7Q8zkiUYY0A666kinSVYGVXKX0REmEVSiTCCrN1jrL6YROe43V8gph/ENkRSEjRjVSXr9+wsnxHNNqEE1i6s0GoTPjN7/3V/n557/PwdFLGrUVyg2L7vkFaXaL1bUS/+Af/jf85vd/g0HPJgjnpInATz/8PYazL3AXeW7s3sPvFpE1kVevn9BpPEAzUj59+CUffONdFMlFVkUG/SmjkYNuFNEMk0p5hYv+ay57Pb713fdwwy6v91+ThAqarCKp86sYy1BFVhacdbvMRzoP7r3Ho89+iD0XWP1ak4NFlx/+6AkIIdXyOkZhQOKfoeVucTn9MxIvwDCaSLKB79uouoGWmhTLEq57zsXpkJ2db9BaURj0L5E1jb39L7DtHmfnDuudVUyjglVOiIUTnj2LKZerlKpFapUqj58/xcjnuZw8B3GHjIBavUgQX3J2NuH0eM7WZobtqRwevaJVzXFjt8rjxx+ytnYNVc+TJEv8IM97771LlPXoTo4JlxH5Uo35vI/jBKyvblGpVPny+ceUy6vEiY1ra9y7v4PnObz7PsxmCx4//QiiFR7cf4+MkHpS4vPPntPuqDx6+AWLkc47X1/n5HxImE5pVXfJki6nhy6lisbqWo3j/Yizi1cEboonZAjiI0afJZQqq+iGzMHrc5qVGtVqnaPjM1xPZzFJKebzBN6EakPCm/rY84ztnWsslmNEIUehFJDJS6b+GZKbkjPrHO4fkzfrzGY+0+6EaxsGQmLx7PN93n7zAVatyBePXlMsCpycTTBNk0Rc/qoy8VfvUP7Vv/VWZjtjkiyFNE8cqhRyZXrdU+qNGq9PvkTBImfquF7CzAnQLYFyxWI0nJMqIMSQN3S8ZYBl6KRpykq7yXi6JIpVsjQmCDx0TSZKbFS5QEZIEsvoOZGcoTMdh1iWhaSIjKczdD3CtcELl7TqNaZuQKq6ZEGGJBXxE4f12i7+YkFKguO5KJaPoKSEjkjespAziTD0qa+ucnFxQZakXL9+ndCbkAkhaaoSjFLu3G/w0aNXlGsG7kRn0JsTiHNcd8GdrbfRyh7xJMNVRdxghiwqGHrK2XDGSmuDxWKG5wXUqkUUVNLQRsx0siRAFAQqnVWOz16znE6otZpoQoYpVkkkg7kTcOtaGXsR0+l0ODk7otHsMHPmjLoX5AWJ0A0IeM36ztfwg4RGrUV3+BDb91ktrjKcJZg1kzQbkgVzhvsalfIKxYaGURaQ8AiilOnM5u237zEZB7x82aPfO6XdznN8NL/qCsQBmprHdV0UVUBRNBx7yEpzi866xspKiyhQmA5dUiFlPvPZe3HC6mqHk6MBhimzdGw8z6NaaZAKCwxNJWc08KIx9jLCMAxW13VevezRXmteRSxGKo1qmdm8exUJOZoiSRIb1woMu2NyZokbt27y6OET7KWDrEhY+SqiBMFsiaxriAosHJ8sy9i9tsXDR88BkSTRKFclzk+7fPDeu6RJQBSC73usra6wf3BEqPrk83m8QUyjWeN19wWlfIXV4i6REOIHS4qFGnGUohkwGJ5h2w6Vyg5ROML2bIplFd/xMbLmVWGGz2g+RRRBpIiQ+dRO5vzv+iZfpcDxn71Zod+qsNXeZhEInF5OSVJYeEs0SeToxSusXIZpWohCxubmBkEUslzYjEcjbt24Qa1hYNs2IOJ6AZ7nE0YJoij/Mm5OEK6SQWzXw7Iswq+MMoIgoCgKcMX8k2WZMLy6yctfmWdcx0cUBBRZo1YpYeUNAs9nsVgiICHIVztoi9n8ahyuqoiyguu6CIiIoorv++iqgqyIqKpCEFz9LMMw8AMXxw4QxKvnQorneVdGHknF9T1s26VYLGKaOlHocPP2Bm+9cwtJTvEcePH8Fa/3z3C9CN8PQbzao1QVHWQfEYk0Nsjw8MMAWSkiCSFpnCGrGpomkZH8q8NTEChYRXzfJ8sS4iRAJE8Q+QSJDUmGlIrIUo5YdIlTUOOvUle4Qt78S4GtiGDldURJw1k6NJsrTKZzpvMJ5VqZ+XyOKApYSh6rmKKaEnHiE7sqWeCRs2oMlwtEP0DL6VdxhM6YZqtKloi4ywWZpHF0cECzssN/+p//bzjq/5SjkwV7z7t89sk+BdUijmO8ZMHCdZnPIwwzRJMTmm2D1697mFYOS8vjLFM0S8UPEtJkiWZEaMo1NmsWlZU6B6cvaXQsRpMFo96CMFzQ7hRoNreYjDxm8wHrnQ0OXr/CXbr0Lxzu3LpGpVPk2ZOnBPENEt0ii0LENCURIfACFNVESJeMe+eokslw1ON/9R/8bUrVATldwE5ecfxkjhiFmPkO966/QeYGTGcjNqp1ehWN5fCQopGjsSXwd/+3D3n5bIaes0gSCUHwIFN+mR8eBzFhdGUCXC4C/gf/k9/g2oMRh705TjDhve13eBCuc+Pv/pQ0i1BknT/4twt06xprq2/ipGfsvRzywbtv8OLLJ0hijXxV4fTkc4S4xNe/+evYCxd7NkHWCkyXB/QubO7eu8HZ+SErrW10rc7Y/QkrzbcIlgZ7r75g92YJd2YROhphNqZQaLN7ex1dDnj48Dnnl0Pe++ZbTKaDqx3lMME08ji2RLkKT774klwuYzxy+dZ37tPrOZydXpCXi6yu1fCCIdd2b/PF58/I5xqsrOkodNg/+jmiqGIv0qtrzYpRdIdeN8MNLolck2vX3qTbO0VQYorWBnfvbnB8+AJ36VKrlNnavcfTJx+jyVX8LKbb3Wd74xa1FZPXr59SK2/jxQP2n3m8+V6bzz77jK+/930S4ZzPHnZZW29RKJgMR33m8zm1agPHG9DvzjAMA1NvUCgp9IeHGPIuS/eEne0b+MGS6QhWV6s82f+UWn4DUdCotk3kzKM/6bIYSjSbConTJpVH9CeXbG/fY//gc67tvImVVzg7O2NuO5CUaK+WCcME306xCglfPDqn0ZHwwwB7GbC7/k0evFXm4cMvCJOQa+sPkEQFx+/T7Xa5c/sBl71javUiX3z5CavrN5Blm0cfn9LptKk1THq9LrpsIiYm1ZZF6E0pFSxmzhRD1pmOHGq1DmaxQ8yA5cLhtPsKWdGp1KscnH2IItzgzbfv0jvq4TkLSuV1dlfW0NQBti3Qru8yDSb0xmPGg5BSLWU6m9FqtRn0+vwv/uZ//a+3QznqpkhSCS9yEcWElVqT/uUYQysQODNWG7v0Lk/ptLc4v/BYBues71Tontl4jki1LCHKOs2SSSgJ6EqRMPYQUg9ZEElEH9Oy0AMJx3GQNYk48whTh3K1he/F9AZzclbM1J2iig1EZUYU5hj1bdrNKvbcJZNUommOvKTQut7mZHDO5KKPqEgsvQvUnI4ga4ShhJEr4DljNFHHdgXcywOiKEYKFTzfQSHPbLAgUxPEMGB0PiULNSTDZOz1MDsSjXyb4bGA2Vqw9Ep4wQByFVxvTuYrSHIeMVNYzi6wigaQEWcjBFTiCHbWbxPGYy77M54/+5hitUU+v0NqB0x9F30notVYwRif8OrVSxqlNicnZxSVIt/87jf5+//l38e3x1jtGnq+QE69hahJRLMhWtzAG/u8+e5vkjdXef7D/zfbTQ89VShU3mDzWzusbehMLmJOuy9RdYfRNCLIVIZjnzQqYZouUhIxHkYYms4iWKCKedw5KJqOu7CZT0LWNlZJwyqnR10uz1IEJGpNlfl8ymi4QNVkEF1Uw0ekhakn6JpIranTvVhQXS2QERG7IittDStXxLLyNFdDZrOIUklCESSW9pyVdoU0c8mEMpKWMPeOqdRqKDL4rsNbb96ie3mBquks7RmqlifOqSwX7lVGu6lj5iRO9/f41vt3ODsboxkxSQilrRvkdREnctGMIn604Lx3wOb2DufTIxxvQrPV5Oa1LVIlote/YDDaQ81vMhidEmchObNI73yAYUKMzenZazqrZVRNueJO2ilZeEGhUGThLGmsNXj16iWdjsFimCGlOrquEMcxhq6xsZlnlggcvnxMcXWLernAeDonEXxAZDQZAA1u39plPO4iyzLFcolbt27xz/7J73PZu0CUG1iWCaKAZujItoQkKdi2Q7FQJkxSwjAkChPK5RJBEP6yQ6lpGpJ01bUpFovEcYosq3i+T+AnCIJCpXq1ZG4ZJq2VKqoiUNpqc+/eA549fYEgRyzmcybjGUEQkKYCju0RhiGu45HPp+zsNNF1DU1XrtZq0hjbvqII5K0tpjOb6XRGlmXIqky+sIKp59B1nUJZZbnwefTpS+YzD03N8+TxOZ4j4vlLTk8HyLJKGCS4gY+ZUzGsq5jOWIQoSNBllThMEaSrkbsgpghCSopPlAQESwFVVX55JooSzGYZQeAhCKCqBok0RRBSClqdKJwRZSKJ4CEhYsoiTpoSfoVeEsSrNA9NV1BkEy+A2WyILKXQ72KaOSplC3dpIycyChlRErMYp6Rj0Ismvp3QKNXodNZpKxGPH32El8WUtSLbpTukUkaajKmstKmvrnJjJwdpkf/kP/0/cTGeUqheoZ/+vb/zm7xz9x1SZvzhDz/i5cs5C/8FF8fnBMs8saOQBQYD2+E48FC0mFKosra6yWwiEczh9t0GejXj4We/QM5FhGcOmiBRq+oYuTbTwTk9d4Gs6qRRjGlJvPHmdYajAY1WytraGmOnx8bWJk9fZIiZRJpdwfpBRJZ0ZFkkCgKyTCAKRTICFC2mXGrws5/+MY4d8uvf/hYxl/ziZ/scHJ5RVEtIosnopotl5xnPfdKWjTkrELsuWSojSjKZMINUI00zREkErpBWwC8/XxU5Fi8/fsbN+9vMs0u+fHLJVmDj+SE5Q77qpifHdEcvOD8I2NlZ4/lnPUbOPplvs3PjB3Qa2zz6/A/54R//U77+zq9zfnrOyto2a+1b3Li1YP+5gyKbXJz32N7Js1J9m+PDHrdv3MHzRXxHZ7J8TMW6SeQt6A8V9Iscd67tcO32mBfHj5nMZtjh1b7yauMag16PmTti2G9SVFdRlD7f/s77nHXHhF7MrZs3IZZQFJnB7BU//vkP2d54i4OjLxnPdvkrv/tbjN3HLMY5vvXdXf7FD39GIU44Pz+FtMPOjXXUYpssqZBI+xwf9fmdH3wTFJsstdjdvY2qurx6NWZ15T3KlYjhNGbY9dF1BcuyqBXeIwwcwsAiTq4McbNxysHrU2bTAZVyg8ALuLTHeP6cnFEibxkIok5YrhD4GflcCxiiSmvcvrvBH/7hM+o1m2arxnz5msAvUas2sadTtnfW0fWY2WWGvzB5+8Fdfv7zP6aaN1lbvwmSThBcufMPjj7GsWVu3r6PZuRBSJktz0ljBdf2aLSu8+1vvsn54BXdwYTmmsHx5T7Kqzz5go6QlDg7nrB7q8jl0SWiEvPTj/6ED977HhP7kFAIsOc213fu8hf/0gYPP3lC5Oe5f/ddhCQmTV0sWeLxZxccvBqysbnKXBbJ6x3m0wxVjYmzBe5URcUiTefMJz5F9R1qTYuT4wvyVp1StUxv6DCczjDklNHYZ7b4nEguU7ZWqd7t8vzpEdPZiDBw2Fi//qvKxF+9Q/mdv3wnC/w5vpdRyBVI46sKu9MxEaUI3czz6Yd77Fyvoqh5jEKFg7OHrNZ3KRoq/X4f10vJ4iKlkkSWxLhehGguGc+vdq5c32Pp+BiqRrXS4vziGY3GLo7jsJjMabRMUiFCM3TixGN0IWJaAuAz6susr9aQlQzPCzg8WPLGOy1s54KV8k26vUu6/RG1LY2lEyOLJVTBhGiGYy8o5ppIFUiDjFwsMFnMUUsaqhRR0bfJr+Q4OX7O5uYm034fP5HJFauI3hIn8XE9Cd8LyOVlwjCAWKBYLDJfTJjObRq5Mvn1EqPJnMh2WC3ojALIZTkqDYWpN8BLLAg1cjkdMQr4/9D2X0HSLYh9H/Y7OXfununJ4cvhxr13L3YXwCKQiARBVVEWbZNlkWWVrbIt26Uqu2S/2VWWy+UHlSzrwbQFSrJBmhEkAhGJBbHp7t745Th5ejqH0ycnP5z5ZoHSCx7geZqv6puec7p7Tv/PP4qZTCTBZDng+uYqCgYFJse959y4fYt4oWNVl5z0BkiCQCpELEYB9253SYnwx6WfbOdehbNXKYvwjEKcMBvKvPP2e4ynxzRqTQ6fLKnXA9odlSBT+fLROav1der1Cq+PnzE5Cak1LOb+gCgQGfRnVCstGi2Vly/P8TyFa7trzF0XNbURnTG20kJrJthqm5fPx7z7YRc5tzgdvEJRq/RexwTJlA9/epPp5Ig8qDMeuQhiQqWqsb7eJS6WLOYustRmejFla89GVjRcd04Y+Riqjenk5Uyh12V7V6Z3MmDYG6OIBopqkUsB5AZR6lFvFTjmTeYzD83yqDfKVZHRhcT9d2qI2Q6D4QnD8YhWt467zEmSBISM9cYO3/72t7l+YwdVdpi6A2bevLwlkzWySCIXFqRJgWFYhMGULKpSa+Wsr9xlMewTL3WKZI5o6dRMk4W/pNs2GY0zRosBrXYVsoL3iwr/7h+8pshL1vDXvtLhwLGo1CtIkoEfyQyGk9LHFgS8enWEocBb77yLZVkEgYfnuSSJR71e4dr+NnEiMhqNkCWVLCuIopgCSPLS7yMrGqIolms5eY4kKaWMfbl4UxQlyHrz5XleuVxSFGiaRsV2UFQBw5RRZYE0zXAXAb/0i7/KfOLzW7/z/8UwDFS1nGZUdQXD1FBVhSSJqNVUTNPEsixq1SpZltDptEjjmNlsSobGcrlkufSJwhgvCEmiuEygaxp5nrKxscOf/tvv8ulnj6hWasRZTprkKIqCYelX55NRekSTJKFIM1ZWVkizGAGJIChB7p8FEXmRUuTCFZP7ZwFllpQ+UlUtOzTDKEG+fJ7SNMVQFRRdu/Keyqp8CVZLL6kgCBS5wHy+QBIVbL1MLWuaRpIVl57WjDhOS/+3opapdMtCpNyZztIYXRYpkpDj4YhOexVZVlA0hVa7Rq1qM5+PURSFaa/PRX+IXTU57x8ThiFbWzv8xE9+gBde8IOPvyArFrRaBs1ai/kwAV/n/PAMGQh0lVqjS2/k4schcTjn5vY1LEEmDce4UpWD0wsa9TobGzovDp6ysbrPsH9BpbvCeqfCbOoiiCmRlyOrJpVmgawJ9C5mRPOcSDQ4vzCwzSpF5pPmEgkxmiKS5ynJcsn4vEchJWSpxEcfbeHOj9jY3uTpp6fU6xX29tbRrRBJ82m2dlAMi2XYJ/Ua3N++zdNPP+WDb9zl//Sf/SaH/QWqBmKskcuXRExRADlpmhIHIaomM5/P+eZffZv9uzLraztYZo3jwe/j/+GM/8PRFnHkk+XwL/9mnce6j+XA229/nU8/fc3p6TF7ezvs7zU4O12yv/MOL5+/Zjp7ycb6Crd23yFKTnn45ALT0smUKUxMKisdBqNHeLHJj33lHV4cntC/mLFzXUKTmowG59hmF1kTmbtTutV7dNdXefb8e4xGPmu3LV4fSXSqCm3DYTaXqNRzNKvgB5/8MdXaDW7duM4f/d63eO+tt6g1a/RfHrJzt8bpxTG60aaWm8xcnZ1b1/ns8UPW1uu8fPCMd9/e4+Xrzwgjhf1bbyGKNidHh3S6b+FIAaG/IJ65LOIL+q5LxW7y1fu3efSyR7t7DctQcN0+B69PePf9r3By9phxT+CDj24zGJ2zWCzY3Fon8RxWV2t8+fkPiIs5vqvy3lf3efT4UwTJY+lqFDmc9V5x/9ovkMsvUaUOkhYx6dX48Bvr/MEf/TMk0cCuamXdmb1HGPkEUZ9mq8JgOKPVukHiZex2b7CyXsWPE2YDn/PpU6bLBaYas3Qj9nbfpbMm8fu/92+4eeMtyDVmsxmtZpNuc5OT8YiL6Wvc5Yy5t2C9I1BRNkjzFuvrDU6OnxAEAYLqY5v7pQfcqDGbHZIJIo68y/a2RRgmxJnL4fEr7u5+nUZNRbfXaVgKzx495GJ8zuvh5+zv/ByW5XF+dEaztcpgckEcy6xt2AyHJ7z/zq8Q5nMePHzE1sYuktnn4PVZOXFKjqVVkZQFj764YG//OrLqcno85tbNfaIoRJUU/s7f+s//chnK2XCO7Ui0aibj0ZJwmbC1tcV4PKLZUVh4Ca2VFt7S5/qtLUaLOU61YDQIye0KXphjmg6DoYueaVQrGna9xWSqMR+eUW9LFFmBJmvkREzGp1jaGsHSZT6d4lgtJAUiP8ZzfQwLnIpOHICsZbTbNvPFBMdWMAy4caPDT/xsh9/9jSWWscnF6TGVlRQxraJLApZlcfLqnJojYxsNLAfE3GAxn5BZKqLVYBl6KGKIogeIcwkDh+PDMZqqIiIymy7IQg9VM7A1BcMw6I1OsGwNWVZZ+HM0XaJa6CTAcDxAlQw8QeRivqTS7CIUKn13TrW6jr9YEEQukgSGrHHe62OsOFhVm1wqCLMFuqKzfX2D894AqbAIioypN6SIJDJi9ta38N2ExkqdjXadk4MBh4/HVJp1WDTZ3FnlkT/l5OwUOc95dTRgZbVGc73F8XkPhJy8SMkVj88/P6LTqNFqO/QmEWksUoQGUbBArCsEocXufhchFXl9OuH6jQbD3pKbd2ucHqgknoHeiDDMnIveGEkJaHeqpJFArVOwVWvy9IszOt0VEOaoqspKp0NrRcYwTI6ORwhiTr2VYKgVrFpMlibEwxzTMGi2LBYjSJKMyeQF81GLYX/MxnqNW7duc3o+Is8dwhBMVWE28nE2U3b3bZZeQuAFZFnB7XtdohAcx2NlrcGd+x/ghws+/vS71OoGwULjrNdHNVRCT8KN5vjxEknSWbgj2q0uUr0gjDNit4KtxWioWG2VYCHxW//4e0hKyPaWTTCN2Nm+jlnxsZoW3lIlEabc37jGfOkiKBqnR2OSpEBVVERJwqi10DWRMMxJ8wBR0hmPB+RCznA45NatG6XvdDam1+8jqwqNepV6q4EkFJxfTJFlHVWz0FSdLMsokAjiCE1T0XUTUZKuQFaeQxiGJfjKMjzPI0mSsiPyEkSWwZgSmPm+TxLFIOQURYYkiciiyNL1+LVf+38xmy1wFzNWOt0SoOU5qq6RpimKIuH7PkUmXgFaTVPwPQ9ZLnsp4zhC1TWCICAvSj9k6ZUsWdayiDohyQss06HZWiOngDDEcnTiJLwK02imga6XrIMsy0gIzOfzy3PL/tx5pWl6Ba4RS8+kIPzoupplGcLl/bgsl+n4arWKJIrouo6mKcRxjOu6pGlCURQkXoJh6Fi2RqNuY1kGIBKFTYIgIAkTkiTDC5dkWX4ZfBJQVRVdlxGFDE0VSdIFAGEYU63WKSQF1/Xprq+WwNSu4HkuJ8dnTCyLNM6Yzxfsbm2zud1mY6fL29KHfPbZF5AK/P3/8p8ynQ9YMWxWN7tkcodZ6FCrrmCtGIhGk63tBmLS4A//6HfY394qb+SSjI8+fBt3fsZv/osn1N5Z4fZKl4ZWZb7IibImw3mGG6bEkx7+3GMxD0gjWF2zIfI4PY3Zu9Zhrd3lyH1GmqdYdhXy0tcqijJiFpchLd0hJSif9FTE0myyMMOdRTybn1Jd04ijjIvRklZmsJybhPMl7a7LZJAyz3pc9M84PzrlLB4iGxpJnKEaFqGUIOel7YOiKPe84cr2kec5muTw13/5FwniJS+O/pDnXxb83P3bBE+HKKpEkaaQVRGECMe4y3e/8wyrHrK6ZTOcuGiyhigJPH76CWkS8PUf+2nOT6aMpwvcRUiRSZiyw96dG/x/fv2f8UHLIV4ugIjvf/GCtabD6koTf6Jx6/7XMYUDFt4Zg94AzTDBSXk9eEl9xWG6GOIvZWZnfd7b+imeH75GN23koornjln0E1baLRaDPjdubBHHLh1rD3VX5Hxwzm73Z3n+6l/Q0Dzudr5KRUu491d/kovDV9z5hZ/hnTt3+P3f1vBVnfnsJWkY8P7uLpmZcnryiN7Q5ue/+dc4638b8WjM+s4N/vTzh3z0wX2WyxFpXEFUDWI5Io48bq/v8PtHPyQIdsiSjA8/+IAvvvgCwxjw8ImPbjVo6gY3fuwuZ/2HaBj0+wPWV66jGbDWWkXQpjx/eYChZNh2jUR8RpHfQNbAX8DG5jqPet+iyGSmY58PPnyf8XjMtd1rDEenDAYelqIyma/hNAPCJKVa6SBpOokX8N47u4iizPe/+2+IfJ00lpjMTtjfvgNixMveAYIgge8jRR6OAe50hVRPWNv3OO/NmXsD4jhHy3WW4iGSqKCbDsVERNV0luEhj5+KXNv/AC+YYzotvv35v8IWb9FYOSeN5whZyunJBU7tBkenn7HWWaPWNPHCgHbXpFbZxXWH1OtdfuNf/TqbO9exrCqqZnByMSH067TWmjSaCp9/8jmrKzXaKzZROiQXMla7dYZDl3fevc7p8eu/KEz8iwPKze4qgZdD7vHuu/uMBkssSyDLLTSxxsHTIz788C1CN2B0NgIlxS72MFoV3nvvFj/4Ycjx+TOaTZtOawt3NuXV8RdEoUjDMQiDEEkW0C0L3w/xpgIwRZEtGhUTp6mzWCyxLIMPvnKbLz57iumAR4ogaNh1MMQtRj2fXv+c3Wsdnn1RHnt1rc/WfosUmckootpUcCenNBs5UQCR7yOjEsUTNM1h6C2J8wJbdMixOJges5d2qOoqwTJgGsyRpYKoENC0knmIvBBRV4mimFzMEYoYS9MgTKk7dRZBGXJwBA1fzskzgThO8cIISc4pximKriNoKYv5HLuxze7OdU4Wp2iWhjtziYIFu9tN2p1V4uU5pmJzcTHAklUyyUYSTZazBWurXeYTAcfSSGJYbV5neNJjfa1Cs71GffYxfhgQu212d9qM3SGD0xMGkylZZhN48ODZEzqVXYbzgAt3jKVrRFHA+toKXpCzvbfPYh7w5acv6Da6CGLK4HxKLgn0ThJ8sQ9eHWUe0OgGmKZDzAh/ucLw9ALJlBEkmU5dJU4nOFZBISSc9YbY9jZ+WE7zFYnE+eAZdWcDd9xGM3JkeYplqKiCxubqdXICbEvh8PCUrb0Od+7eYj6fEiUhumFj6hPOjpY4doX5JEDIJSynjlERYBqiylVG3iGKmVFEDqNRnxeHP0RWEwI/4PQ0YP/WBtcr+6T5lEEvZ317l8m0RxDVmU/7xFmOpoNQjBHSGnWrgyaIDIcHfPjuLvvbGwTJhIppcPHoAiO6SZb6fHC9Qtfp8o/+1Q+xah0Uw6ZSEZDkJVkhUOQ5gqxRb7VZLJcYikaSZ1QaNvOZhyQpuMspjWYTTTfprNWvQF+cZBQFqLJClJQS9ngyAsC2bVRFJ80z0iQlF8oEN5JIFEdIqoKEgqZpV2nuPC9lcUEQLgMpMYpeyuFZnKCrGsulj4CEKElsbDTRdJm19RWEAqIoKutfkkuQlWUkSY4syyhGmcZWFY28KNB0/QrcaYaIqkqousblJzx2YaMoyqV/sSDLqhiX16plmFCQURQ5eRKQ5xmGbeF53tU0ZVmKLpNmJRAOwxBZli/Zzvyyyki/AhRpml7J/m++BEFAN6wSWAoCjmNQUJ6n7y8JglIi73TaGIZBlMQUSUwYxkReznw8I4nHSJKEokpomoLuCNQqFapFeX0o+0ZLYJ+kEWkIi7kHiKRJjiArJPmSLA2p1Z0yKBUXnJ0PcewqkiyzXAbouoZTtQiz0ibx5OlLVFWm015ntd3h2rUNcjwWwxnj6Yz+aIK77JNmL7l/7x6SIGFabS56U1SjhiSkpQUmSPjux58wGE5o7+7QtCweH8/5uf/xL/Cv/8m/Jk8TRDXm7p2v8NmnP2Dg97HtDmtbErWawuMvh2xvb0Oa8OJhn06niyZU6PeXVBwLJBGEBFkUiKOINNXIWJDlARQaui7S7ohocot242229hVev36FpTc4OTtgMY+YLB0OBlNWuqvEeY6XDtHaBY+PnzAfNzE0CwBJUhHzS4kbEPjRzUNRlGyxO/X4kz/4Y07PLtDMgnff2SQ7iSnIiaMUCol37t1HUidM51NUNUAQRGTBRsDj4OiCIFhSq9VxbIl//i/+kH//7/xtnrz4PfJ0lSQJ6Y0nyAdL3rp/g+EgZBzY1GyHneZ17r29wT/4r/4JX//mNT755PcpEpG9Gw5puEZntU0QnbGcvOBipqNWYvbXP6CedgnHLmvVNoeLM7IsQ5IqrG69ha6cM774krWdKo1Nm2qrRj3IEc5W8cIxS3dM90aT42d/gnP2OR9nOZXU5sOf+/dYuGNOzp7S2X6HOFcI0iXfe35ItbGO4ic4tRmfPP993r7xLsniNd/46nU2axqjl19giSlLM2foe1QrGp43J8pVGvUWXtCDXOfg4AHBMmM6KFjfrvDyxWveurvHweFT+sNzxqM5RVJmMUAkjkOWrk+rdp80EXj/K3f4/NG3eH38QxrNCpPhhK3NXcaD8ga5WoHRYEyzY3F0+BTbMTAcyClorCSc9aboSp3B7AFxbHJtZ5skFFAUmYrdplbNGU+GIBSEccir1y/Y7t5Ca6XMvIC63mB1fY/+6Jg8EJi9DMGGTqdDnLoIeRXZCDg7niMWp/jBEj112N3bZL6YMvMHHJ6ecfvOTTa31hn3ZjQ6Cp9/fsrdeyvk6iq729epN0yeP3tFs73J6ckYGbO8ZgtgaE2++nWNi4szHGeX8fIFUTAnDTUMzWTYG/Leu+/T75/QbOggaizcHqpssrbR4Ft//B1arfpfPqBcziPizKVumUiCRV5MyXKJd97+Sb7z/d9mddthtvRwZ2PaayGwgW7nFKnBJw8+xc9DaqtVVFXlqHeGqpgUmkmzVSVPUoQoAEFgOhtgah0MvcCplDLTzB2QzhbousjO1j7+IsQxHXb2WxwcDMlzkZVVG6nQ2dvb4dNPClQtp95UePsnqvzx735BhImmFkRByNbqW5wcD5DUjFrVYzxw2du9zdMXxxRKzsqqRRaLmHGVIHNx0ymBPyfKMwTVwLIV0iymU23gLRf47gxNsHEXU4RcQhFUQi8lzcCpVAmXWZkwT2PqVRtvnqIaZlmeq8pookHoR4xHU6qmzEq9wd7mNvPJAm+2ZG21iSasUO9WqdXAX0Z0V5tcHLnsrN9k6jWYeGOicI6m5NRqNU6HYxZeRLu7gesPseoKXgF/+L2PSYuI7Y1VUq3Ay4cM3ENMw6HWNZhPZeSwQuEvSKIlk1HK/s19VNnh5PUZcZqwtacT+UMuTsf86l/7ab7xtfv8J//J38ewDdqbDi+eTFBXF7TXTQrFBEFFUguIRUbjMzqdLqJoo0gjamvbKJbISe8luhWyItWYzyWidIkoKRh2kywyKXKHi8GYlS6sra/y6skRcRM2u0tWVx3cpcJP/MTXmE7nzBdLXh32EYSC0eIc27apVGucn42oVDPOejGN6g6yliNrKU69jTttMB27WGbBcPQYQU7YXL+B6w24dU8GMaW/eEq1XqOyYrAIZiw9ia31FtPRBMFU8ZYJQTBE9B086RQhLXj/3ZtlwKjikCYNamtbNPQDvv7eR0jzp3z70z/lHz8+5sb7P8Hm9h3mQYAhnmEag/JGJQ6Zjxe8CgJ0s0aeewhiQrPVot1ZYy/dQ9VEwrQMi+h6CRJlsZRdwzBGESV8v3w+Wx0HQRCI45giTZHF8kNTkMvKH4BWq0WSJFcdkmmaI0lcMZSCILBYLK4qfZIkgaJgMpuXoCsTyfMEX44RFZH19Q3aLZvJZEIch2iaxunpOZ7nIwoyQVAWpgtiXnYPZgWybJV1QYFHURRXknGa50iSTCGAHwaXBeY6gpqUx5wWGIZBlkFRiCiKRkGGtwyoVqvEcUxepOU1zfNR1VJqz9Liqu9Q0zQEQSAMwytA6ThlCO2NHP4GZLxhLdM0JbpMrwuCgGlZlwA8Yjqf0R+Wr6epm1QqDrKWUm3ql12hZTBqPncpUOhdTEv2L4rL0u+6w+paE0kuyFMNkRIAe8uAZZjgLnxkWWU29XCn/qWUXuCFC1RZpCgEZKWJZVWp1essPRfh0r0QJxlPn7/i9u3bmLpCUhzSshsI4zE7NypkSYJIzvlRj4MXB9RXdvjgG+9Qs2o8ffyERqdGrb7KrXsSx6fHDIeHeMsev/nr32LwbETX0ZicD9h6611W2l/n0x9+xnTik/gGgwCazSabWzpnp6eoWGyv3eGHT47QNIO8iBFFAUFUiGMBRWogigoCb0B/2WN6eFBuqju1jBevjyAPqbQU3u7e4vDojELMWF27wdKVmC6ec2v3mzx58oT37t/gi3jIZOJhKgpFmpPLIuLlTcObTtY3izmapnF+dM7TL0K6G3Uuzs/Rihq1EOIoRJYligwuzg6ZaiphnjHt5bTXWsznYw5fL6jWW7TXHAI3pWkYDOcf81v/+nep1nTi+AX+QmH7zl2enXyM58e01S6NRoNua5OHjz8hy8OyUN9TseycaBmissX+vsfDp59xNpixW1tFThZIYZv5ywJFF3l48QVpfoHDh6SKQCZM0Fc7rCqv6Q97mBOHhZQieh4HR0OipM37777Fy/4G/XmVxrrI4cEZilKlsOG3//S77FcLPv3WH6LeTmg1bORMRas42C2DW81dHr04YDZNiH2Xm7sGf/KvfxPVyfnhdz7h1t13CIsIyyiY9Ef0xgXbb91me3eVxWJGnqZEvsbZ0SF333qLyMvRDJWnr15gWysoagPBmJIVApVOwWJaMJz38JcF9+6+jyAUHB+f0u8NkVc7nJ17rK6uMRqN6KxU0fRyxc13M+ZDBTFbJ0l6tFYdlosxJ2cFnz/6NvfuvUO7s8V575DpdMrOjkMQzMkzhYV/xnKu8RM/8eM8fvEZS9/nbDDjdtdi0UtpdyROXg2pt1os4yO8VCB1U/K0gapVSROByXyOZbSwrSqkEo2qw/e/9ylr69e5dkfj5dmYV68e0LT2sA0FP1giawUPnw4xNZ1e/5zFrM3mbofjkx5IGZrhkOQTBsMlppagqAWC6HByNMK05yiFjsiI0SCnXusQRz61mkPgpywXOdf373F8fMxwOKRW1xiPvL98QCmqPmISEmYxg+kxyBGNTpPPH36CYeiMhxPShchmd426nZLnGuP5IZPpc5JIR9FE5KRC73hBnC1prypoUoXlNCGNQSJDVBPq9gpRmmBWdARFZLYYUamv4nouQqHz7Mkxhp6TxSoHL3qoukYUWRRFRparDKdzdm9UMOyE05M+3/vBK5Z+SOjJZV3GB9ewnTmb+wnHhx5e4HHn3j556mLXCtLUQ0pFFNnk8OCEWrWFmVss4ojOWpPlzKUICyRD5/xwBEWMbVepVaoEcUTDtBGE8kMtjmNso0JuSCz9Gf4y5ii4oK21mblzxKpWgtL5knpDQdJUwvmYKPK4GJ6xmIZUnAbeMmaS98nYJRVFZvOId969hygMOT07YR4sCIIM3fTZ236Pi4slvdkxuSKxuHDZv97hZf+Ujt5G0RYs3Ixvff8JextVbu+9z9Sf8+rllJt3W8xmIzTNRogM5smYlfVbqGKN737rY8yKRJZbbG9bHD3q81Nf+5Bv/szb/IP/5h9y736bxkqVVy9f8/43bqBpb/HFs4dsr5c72OdnHraqce+tTTx3QTguEIWcKJ/gXmQY1QDfjTBrCokfMZ8lVBsy/VEPVd1iscgQlZxBL0XN6lQrBYriMxpPmU4SLgZjvMCnVl3n2fNjkrSgs9ImzpaIYoXAM6k3BbzlgFu3bjGdLPB9kbrZ5PGTF7Q6HcJpwqg/Q5AFFD3h/OIF7eY+QlEQZwGGrBOMcwxFRjMd2lqd3vARqmQhFBKThYdjt1nvrpOmHtWKxGTis9qp0xsHqEqEnnUwnCr/5nsfMz58jty8xu2f/hm2tqvEkYApibRbTSRRhKKUvR27Sq1mosgmWZ6SFSFZVhbZzmYLFLWgEBVkWcT3fcRLeVYUZcIwxM9zhFxAVSWky1CJqamXgDOkYVlMp1MsqwRxYRxhWCZL1y/9fHoJkizLYjKZUJBdreV4gY+iKBQC2FUb2zAv64lilr5HGIaEQcr5iYRTsTEMDVmyWOtuE8cxk9mCIBozd12SJCGISqZUSJMfXaRkmTQXkCQVipSlF5HkJdiVVZ0gShCKFFGRyfKEhfujTk0AifLYdb0EcEtvgWEYVKtVQs8vk+uSWi7z/BkWtpS7f9St+cbb+OYrSZKyr1JVURTlkvmRrjo0i6K8DjiOcwXGfd8nClNcNyLPwLINNC3DsMoglSDmNJoWgqCwdAPyTMRfelycLymKgjAaYxplh69hmJiOSKtZRZZF8jQhTJcUuYI7j8vgk+chigIXFxdYlst4XDYK1GqVkvnMQyq1CqPZmMloTJIG5BnU62soikIULdA1hf27b0GWk7MkzwTaq1Xs6n1ev3yFKPv0zydI4hmGusKNukGyOGZlv8bezbs09ILv/eG36A1DghR0zeL0+JwPv3ab509GnLxyODodkAdVhoNvEwg1zHaXOIspipw0chFFGUEQKTIIvSXi5aKNXbHorMnIUsJgdsrW1haKIlOIC16+PqFIy+3pyTij1pCoz+s8evB9umsNFEnEnXvoig55gCiqxFlGaQgp13KAsgeLso91Ok/w44RlNMBdGAjygPzMI4kBMgQEnj59xbe9c3SrTS6HjJ8FrG2usoy+QEwWRCcm0+kMzdzDsW9wdjFiPF/SbKwSajM++eLbLBc9ElXE2XAoopz7N27z4Nkn9CfPyeU509E7NJoWjw4/Rpe22b9dZTpccn7cY1V+m/e/eZu//1/9Q37lF1pERkw29anneyyCKVVhlXtbe1wMXpGcP2DF6PD1b36D/+J3vqDThoPHY1Z2dD578hTHuo7SUYnEhOR8AonNeWqhC2P+1e//W/zEZstOMSo6upezLlUJsjp+Ds9ePadTv86ffutPaDoafiwwjX32fvyXeXL8gGg05t6dO3ihw2pznRt7Kzx7dsJoOGU5T/nG1/4K/vWQ2aLH2sYqorpOmhf0zmbce2uTKBtTrRoYtkLvNOba9bucH/eI81dowk2u773HweunVJ016rdXGU4OuOj1mIyX5fu/EEDIefDkYyr2Gnc2d/j82XcQcpt2U2Rra4+Ff8HR8SmdWhNRhNFkzDI4ZuEvqThNNFXn8PQF7sKju7ZJlPsU83XuXHuLcTzn9ls7TM5C9le3ePT8KQ1HpHc6RFEURFFGV1epWI2yhSHPmM4mbG1sY7WOOT0Z07a3yUKZ1eoOteaS3/idf8za+luI6pAij/CDOY26zXxu4iUu/izDdkoM5fs+zXaNpTtjPnfZ377OfKJjqw6bNyNevzpmPDrHcMB1l8hiFcduMBpJSHKBLKmsr3WIu+n/HwClFjGdeKxVa1jVnOkk4uTsOZZZxZsLqEaV7f0qmxsr9I5iXh9/imE0UQ3orm1wen6C68Us3Ig4D8kGU2azGZWahuGY1KjQ6jYYTX1m/QcEocqNG3uEUZXD40Na9VUk1cdfFFi6hah7xLFCrdZlPu0RBU1OTo65dWcTRVVYzGLu3nyHzY0bHPe+YOFNMFSbjb2Us5OUk5MxtbYFWY3z0zHJNMHZyKhUWpy9cJH0mOpKhmkmKEUVYpFFFKDKKuPxGCUVycIC27ZQFZ0480HICIOAmlMhSTJ0WcZxLMI4Y7ZICeOCXMlg3kcxVSazOWGcoSCT5SmK0cDUVRKWuJFHJonEWYobFMjGkidPP+ed++9g2zYff/cRd263kdWIvc1VXj7qs71znRevTumfj9l/a4M4XCIoAS9e9/HCJZu31pgMfFRJIF3kONo+5xdjLs77CJnJqvMWJ/F3CIIlsadw+537TOOQ3uiC3esSpr2GUJgcHT+m097l5fMz/vhb36MQJVa3GyzTOXu3OiTFHDHUMQ0Bf9Fn0mvTWNXZ6OqMZ0Peeusei/MQL8wIs4yVboWzuYtl1PDccodb1QzmC59CkPFmLpJUruXIospZ/xV5lGM7ErIyploRqNYajKdHxImFogpUGiqz2QjDtgiCGUYlJk1ybEVBtQWKRYFdFwnSC1TVZDweIxYCnW6Dx0++ZHW1zWTmEixPuXfvBs+e9zh5OePm3hrZXGQW9omK11TqqyimzvlJn+3tJp1Oi/kiwl0UBMGSZn2NJAtprq5h1QxMu8pabQN3KyH9sTsIkkI+ixmOFmV6WhLKPAAFgigg5AKSqqJIAmHkXgIWEVXVyTXQDZU8T8kF8XLCML4ENBGCIFCrVa7+1LMsu/I/vgFGQRDgui6tVqdcnwnKsvEsja/YtzfeyuFweLW//ebn37B5y4VbAqbIR1IExFQsF29klfFkge+5KIqEJAsEgXcF7sKwZPx0U0URBfK8IC8Kokt28M3xvvlekiRkTcNQDHzfR5IFVM1ARsf3fRzbIU3TEoRGIZIgkmcZcZSSpmnph0PCWwZEYYKulrVHsqmWHs28ZEnfnPsbxjFJyg7KP+uhfMOclkAUQLysXioIwxhRFVkGAaPpFE3TrthI0zSxJb0cgBBTLoZjdNcsvVeUCXoASZLoNBuYHR1VK72mcRYTBzFCLnDRmyLKKkHgkUQe1YpFpVlOQ5bduzmqVlx1cIZhjDspU79HRyeYRrV8fpULas0akqiQxR6iKBNFfURBRtVkwjAhyxakaYxQSJz2Jnz740/RVIc8EXn3nRUkXeLz77nc++Aulqnj9h063Rbnwx5aZRPVbnDn2ho//82vM3V7vHp9QZwk/PV/5zavXlzQWfkGViXF1Gy+/dk5oRAhCSJZrKKpKmmakyYpll2QZS4gQm7S6TRY24o5fj3AVnao19qcnr9ivDjFMbdLe4AYoMk12rUNPvuTl9x//yPiwmc8nRInHuQ6GTIRARIqgiCSU/AmgvXG6pDnOZmWIRkKk5mIVvG46MdYoY8sV65Y7cE8prK+xcuXz2muWUhyjW996wVb11oImYq7WFJkOU++PEUUNDqrCae9Pi+fR2zcThmcj1GKCqKWMV4s0OQGzw6OqGot5q5PpdLg+YuX3BR2ubn3Tbywz/FBwe72HdYqq0i1hKPzE1Z3DL5z8M8psoIN4xbblS0e+T/g+NkZK84qtqPhJR+xu1fhN/7Ypd3eIlcDxEaCIZsUy4B+kBOmCw4ePmantcPGtdtkfYlb7+zRdzPe33gLy/F5+sND4obK5PlrpMRnsWjjRzkTt2TbLatgkSdY0iqVik63uUutadAfXeC0Wki2yCcfv8IPfarOGo06XAzP8TyBdmuXLA+ZzhcspiH1WhOBhA/e/kVevjggWCxp1E1Cb0m91qbVkPj428+xLIOtrT1OTh6jqA7Vuk6Rq1QrbU7OnqJrDlFU8PO/8Issl0uS0KHT2kZSDJKgBFF5usLPffPniIOHPHnyjJwmo3GAU5WoNaokkYJpCTSab/PwwTPW1+sczF+jWlW6q1uMXo8Y98eY5grRNObmOx8hCN/n4efP6KzssL16jbyIkQSBLE+gEDB0Cc/Tyvoyt8L13T3qtZTQz7h//edJyfEShRQPUZaZzKa8ePWY9z/8Kp7Sp9e7wKkKGE7BeOqCWLYgaJpOq9LGWwSMzhfsb75LwDnPDr6kUq0jCwbdbpvFFNor65ycvoDUob1i/eUDStPocP16kzQXePHigorlQC6w0mhjdCLcqUfd3KZ/3mMZzLEMm2rDJ441eqcHHL4coCk6tlEg0UJWLG6/v810dk6zYbLVvsXSj+mNj/C8FNPQCH0f05TZ2KjQbtQZDAY0WrB/rcPnn35BtbLK9k6LwHc5P+5z/XYXTQd/adFuWTx79qB8g8QrVE0RTY05eD4jSiUqzjpnRwN291RqFZEglpFEhTiLEOTyqVlf6/Dq9QmS1sJQEwxVRi4ULMem0azjLTyGiyGCKKKKDppqkIfxpXQm4zhVxuMhQZTheQGO3cBLYxbplI5dwQxgsphTtatImcDSdwnmEwolodtpsIwWyJqAJCq4IwcxjxgP5ii6wGLu0TuXeProgvo4JwzGPH3mUjEtbt7dQFY1ijzBki2Go4w7azf53g8e0uyaNOWc3VWZ8+EjLGeTWqWL0ZB5cdSnYteZjqfs31jj5csnyFqBpdeJkkYZFvDOiUONSE1ZuDNaa2v0R+ekUkQUVpjPxuh6A9d7iiDljJcBmmlydhyTRDmSnPHpD44RxQmb2x2KRfnH7QZL6vU6sqwQJwVIBZpqEwY5Wb6kQMTQuwTLKf3TM1r1dbwwwlI0njw7Yf96BdPYQlNtcjwqVYNmq8L54AhNq6BrCl4xIQzmHB9rTCcLNjY2MJUOcTpmPJ/TrDUwdQMKEYEOgSvQ3enyxZeP6Da6/Pzf/Wu44z5FYiBKPsNpn1xQmLshaxt1cgQQBRTFYf96gzgKWF/vsrrSoep0QEoQRYFZWpDlAf4oInbnxGmCrpvUmiZxktHINRRVAQQKtWDv2gZqTUdWFBaLJVGYEyWlpy5JU/JcJb0MEEhSGQiTZZVCKBPOaZoS+hGmaZJmGWmekfiln1BVy8nD5dIvAzGqynQ2u5Jh34CpN75JXdfL7ke11EyzLCuZeKeUk2VRLL1kokSz1SD0QqIootEqvZ1ZlmBaFopaAiS7YiGKInFarugYqkyWFUiigaZpV7K7JBVkWUGcpZc+RhlRVC9ZXB1J1FCyAkWRr8CgWJQ+xyQrl7uCoJS935yTLJTgst3sEF+eoyAIV+etqupVOKdc3fnRlOQboCEIwmUNUo6q6peF72rpQZXLx6s4NWS5PK8kTplFy9I3qWgoskKlmiEIYBgqcSRdrQulacpkPiurjdIU3VDRNRtVUrBrGvV2hU5n9eo4z45PGI6nhGFEll+GnPIM2ypASLh1exfyBISUWq3CRW/CxUW/DF1FEV4QEcYpipQiK1BkZcJflmUW8wnVmoMgS4S+hGE3EASZNM34/qefk6cZ3a2bzHrPOZcsJrMhg6WHgMxodIBmdRj0h/zX/+/fwXJErt9cZ7qI+N3f/YRGtYZt6bTNVtmDGUJhCCRhgqboJLkHFKiaSJ7kRCFoskWcJowGY/IkRxXaREXGlw9f4NgN5tNDZjxGzqucHngoist08ArZlulPnhMmMyyzQaPV5HCeI4o5qqyTpyAiXAVyiqIgKwokQBQhiVLSpGShqw3Q1S7XJQX1ZEieF6i6xHB2Sr5xg+v3NlksckRpQaUakAdrOJbIzO+hyDoVy6HdKcNYDWedSTrAO5Mhr5IbNSqqyNPPDtjZKfj1p/+IPNH5+kfvcn52yupGHeQhjn2D1EiJ4yXd7j65WuUHL7/FcDgkXmq889Ff47MffEra2WYsBmytfQVTe8Xx5HN+9vYv8CR/zPcfFdS2TFIvY73jMD2eMxMilm6f2XSE1rnH2sYdZpOUeljw7OQ533nwkL/9d36e4cURD5+fceujXcJJykkuY9c14nmOU9d59uK7bG++yySAwWjM/es7qELExeFL0mKVzuoKzx68YqVWK9mxqUB/cMH2+nvs36ijOxoPPz1ma19hMctQFZ0bN/c5PnlJkU6o1Rp8609+SLtVsvs5BQvX4md+7iMmoxmuN6NSqxGGBYu5wI3r28TZkodPZ5hGhyyf0B8uWFtb4cEXB3TW2ghSDRKBXu8zdK1F297k6cFr7lzf5HsPnpCnJp1Vg2Fvxv7ufabzM1zvmM3NJrJiIQRzpMBn+eoZO7WMb/ziNr/+mw9Z7V7j9OQZ7UaL3W0BQZDRzQKn4vD0wZilG7G9UydOBDS5QyF4tLY32dp36J8v8MOI1W2b4fScRU/mxp0bnJ+4nPafkucap0fH+J6LZYuIYhVvGREXp6RpjpI1II9wTB1BFbHVOo8ef8bdd29z6/qHLL0pSZRwfnaBNzdZXSuHTmRmLOc/GnP4SwOUYeQhSimKWkFTCpJsytbGDrIcI4s6o2TMxXCCopgo5pxskSMrJmEYIsgRm3vbRMuA1dUajeoehmVy3jvAGxmkywg16xHmYyRBoN2s06zt41RFNCNnvvQR8gwpryCIE4ajC+7cfQddqbC3t8fzpyesbxpM+hn94pxWR2c+A13ROTvo0263KeQWC/cZSagSZVOc+gLbDxmex6w2a2QrAqEv4U9m1OsSelvk7NRj0lPZ3IoxHIVJf8ZW+yaFolCkCaap0q1vsDhfsEh8cgSSVMSPU4q0QBB9nIpJkSY4hslsEWDZBlnVQrdVaprFbNQjISV0CyRBoWJbeLHPdOximyrLzEOVdRTLQjIkjgcHtDs7eLnHi8MLclXgbHRKQ7fo9QZ4tkFoB0TxgmubNygSmyR+Rc/t8dbbbU4PfQJRQdFtRuMzjnrP2Flrs7LRoHc84sfe2+MH3444GX9Jfb1DEed4oYhhGihqRJUGs+FrpPUFDXMVVTJZeimt2gqhGzELZyS+h1pvEnh9gkJAkjMcR2KySNhe3yCKXbxsSSddwQ+HoNRgJiBkNZazEYKso4odKg2dpVeGSCS1SiYEeFGfzb0VFAyiSCaMJFJxRhg2WF9vI8kZ3/3ea1zPpN6sU6vVMC2JJ58f0mne4PYHb/Hy5VM2dxxW21UkmpwNelgm1Bs2z54c026tlezQqsXZ8AFVq40mGNTqFsOxQCSGdLsd1hsdPvvkO7RrW5i1fabTCbZt02w5DC+mVKttZtOA6fCMentBlkpoQkSuKliKQiooiLKIZtVQ0hRByLArDqk7oqBAFEASRZpVjdq92wShi+/V8byc+dzF9RdXBbpJHCIIEllaEMU+kqSUbF+WEMcxpm4hiBJpFKFpGkuvBCm1arVMYbtu+e9arQSeaXrFqP3ZPkr5clIxL9JSdr9k/SgAROIkR1NMRFFgOp3jeS6Vio2mGWRxQlGoV9cUQ5fIsgzDMBASjywtSNIEWRSRpIIwXJYgLk3JBZkoji59m9rl74U0LWuPJClGVgQWyynqJSjN0wxLtzBtC43SH/kmla7JpfQfRxHCJTh8E8Ypa5VCoJS8TdPEMk2iKPpzoRxVVfE870oa9zwP0zRZLn0UXUHOZdI8wzBKhq2UbbNL6XuJpml4Xoaum4BEFMhkeUAQJLRaLQDm82XpJ8wDslRgHk7RFRV3sSBNcp4+OUdUZOpVh1qtUi4d5QVLLwAkdM0kS2Ey9jh8/f2SrTRE1lZXUBSFnZ0dmk0TwyiXqERB57x3ymg4YdCfEgZF6bdFYjSdUbUcJLEgD2UqVZm9rSbLpUWj3iYIPHJ2qAsaG+01bMMizFOWgYcmalQzGz8ob2J++MNj8iLh3t23yfOUpTtmNlVQNANRMEgvPYtJlIAEimKQJgWFuEQQCuIkQJIEFFlAl0wqRpPrN2/w4Aff48X8CZJWYRoekykDpHyPKNG5e00hOh0h5Ta7q22QfQ6kMXEhYkkSqZeDJpIVOUJRIFxaJvK8DI6pqkYyX1BTt/nZX9rnn/7j36TW8ihSDUGg7NFNCtr1Xc7TpLxxC5cIKGytW8Thgsgv+Mq7dxgN5xiaiTs/ZzhyyXKVRmMN01IQxkNEcYgsNEFpI0s2WSoBIl989pThcMjN6zIzAyo3Zd5//yO++8Pf49XBkP2NPT546x0+f/KA6VRgMrzgV//q/5Ab9yX+9be+y/DgANk0ESX44sEDTFuiXteYD4/5wZ/O+bt/71cYDh6yVtFxXZeV9ZsURcZsvuTGrXXOz3rc6lzD2+nz8Z98hw/u3OfWL7/P9z/+PbKkwQf3PqQ/eY6fxnz9x3+OTqOOo3YYDHvsrHaoVTQODl3uf3Cfz7845P67H/DJF9/jdNBh18w5OjgnK2KevvqU7Ws/x3gy4+s/s83rl302t6ukUZU0yykKC8VcMJ+7OHUFLyoXye7cuUd/cIzvKWzurDBbTkmTgu3dLQ6OXzGfl4SHZdZZhlPMioamVBlNz5nOx3Tam3RWmnzxg0cEfsHP//xt/rP/83/E/+gX/y+011/xu996yY//+FscHH+CzAruYoxtaQyHPUgV1tcttjjl/Okrfuyn73PrG0t+418+oqrcJU4vmPULGmGH7uoqS3+K687o9yM6nQ6L2Zw8txgvnuPYDfwlaLrLdKZyeDpCN1Qm3pjz3jmWscp07CPoE9I0ZXOryXBwTrO+juNozOYX5U2zUWAYDramkYYpTq3BxfExOxub7CgyT54/pt7osraxQhLCRX/E/o02i8US3/dZ6bTQrNlfGFD+hXsoP/jZm4UmTPERaDWa6BKg5DScDZbeBc1GG0XLOX69pCAmiCZYlk2t2uH1ixGba02iKCCMMkQV3MBHFjPWO5tcu7bBwycv2VqpkOVrPO89ZmPV4uJ4hmDl3H/rDi+fL5CSGQ+OX7DVENlu36C3jFmp1kjjhFiNWM4lVNWgXjWZzWZEoU+cuKiqjunU0LWCwIO+O2DkXxAELnZeZb1hcdD32N26yY3dfc56r8jVHtVKnf6rClHymvNwjpJp3Fzb5fj1mKJI6IcHJHaFalbF649pdqsIosxoWt45BW7IxsYGCBZzd0EYhkTJjHp1DbNScHYyR9YkwniMqrgYlX1iV8IxcqIiQzIr+OGYNJMIllMETUJHI1rMKAwTN0gw1II4C+lUqtTtDQ7PjpDEENuuMQlCwjRip7aCYRX0R6dYlRYSEbamcnQypdK22eg2CP0mvh8ShDPixKXT7BJ4IUkYsL2xz3A+xI9UUGOG5yP213ZYLoZUnAZL3yOKZ7QabeZuzMgfIcoiqqQgiwmCoqNqBS1plcPnR+ze2+D45IKmUwLogTvBkBxs22Y4HJImEoJcIGhjjIrG1vq7vH56wDwOCJKAdtUgXZiomkEsnKAJG8hKRt02UIoaXjpEtQqKJKJWt3GDC+7feAsvELDrVR49+JT7d+/x8ukRng+mUadIXfz5iNbaDY4nE0Qh4vDgKU6jxkrrJtvNLeaTc5azCR99+DWm0yknvXM6K+u02hv0Z4cYhoEsmDh6GwqRIFzS6/Vw7BqiqDIaTiky0DQFWZVIiwxV10mygjwqME0dSRBYH7r86u98DwQQBJH/9iffpdepIikiSVL6B4uilKJlZPxgiWaZV4GRKIpwHOcKAFUqlfLGTihXjeIwKi82skwQBCiKciVhZ1mGZpRAqQRDRgkuJZkgKmXoOI7JkvQKhAGol7vgaVr2JcqqgqHpZdhGEAij5RUjWhTFlS8xy/NS6smFqw5MVf7zTGBRFFc1QYryo6T1m8d+43N8wxTmeVqu64SlH1MRJaK0DO1ADnkpiQtIqKpadltG0VWXpCBLiAVXQFqRZLIiJUkSKo5zdVx5npdg9hJ0e2EAiGhaefxvWE1RFK8WibI4uTz2SwB7yai+OV8hL0q7jFVWOcVxjGmaVByHxWJBliVXifOytichixMksey9LBCRJIG8SJGEUoLP8xzHsUoWVNNYzF3SFIpcwvNKv2uWpVSrDo16lc2tDp1ViyCYU6vViJOCOMqYz1165yPCIKV/MSGOyjnNggzLMpCkkpEtE+sqlq2hqjLa5SqaKMhIokiWxXh+uaC0urLJcukzmYwoxJTYFzkbZqRyQSbmZGGOJOYUuUBOgZAHnLx6AImAZav8b/7jf5+jg1c4Sg058al1ZHoXS/7gD7/k/r33+J3f/Wesrq+w9DOCJKbTMqk1ZCJPwjJ1BrOQg36IYSoUoUYqR2WgLStvEuI8JvI9yAt0WSPyBD782g7ucsLmjoNiJDSOUv6jL8t50bzI+c2/ucGnxZzeaVkAv7XdJc3LKdiW08aowXwmIhZLjp8nbGysoVoRw9mcNBKJYg/TNDD1FpIY4ocpk8kEVZNptzexjJzRRUqraxEuM/57f+Nv8O3v/Q6ZqGOJDrpVcD56QRwZBJHL13/sJ1kGz3CsFY5Ol8TZgIKM+UDkva/cJY6WiFJG4Odl6XueI+khg0HI/o1NHn1+ympH5+5bTU5fL1nrbrK55fDll1/izxvcfecemTDh449f8s5P3uLkxSta1Q6vDo/54Kv3UNIKjx5+zty7wDHWKPIehrTDnTtf4dNHn7G2vcLB4acspwL7OzfZ3OoyGo2o1qvIqkEUZjSbTZL0jP6Byva+zunJAWHksrLW5MWTGd31CstFwObuHmdnZ6hCBb0Sc3LmEkQu3c4O7aaMOw358Z/6Br/12/+EMFqiKBrtxh6VesHzpycEfo5uKNiGyXj8AjFX+M53vsff+zv/a+7d/og/+M4/p1JZ8ujhl6xt3GC+VGm0bT75+Et+6a/8HL40YfTkmFsNl7E/5xs/UePjPxrh1T/E7sDZ2ZDEr/DOu3fIOOEH37mg1tTZ2NrEXY6IAonprFw30g2J+XyKobWwKhmTYUGShOi6j6F2cdo65xfHZJFOo67xxYOnrKzVSd093vrA4uzkNf3BCe3GdWrVFotlRBotsK0mpglFvsRdRGiGjWFbJDFYWsYsnrIMJoz7IZWqxnwR8X/93//xX24P5dId0ei0SNIJi9mSCBGzLhDrYyQknr94wtrqNZJsQSEmZKnJRb/PaFxQqdcYBGP2NhqkS4mLs4hOd0mYu7jxS+JEw9IUXh0eIGsXbG9L6AoEsU8SKvSPbUwrYH4x4cb+Jv5kzoujZzQ6m2hFwnQ+ZpSMqNVqCEWLo6MDdFNgMYvodptUnQ6vjgfEl/uvy3SKqjqkQkYcJhSZialI9E6OmPRGVOqAHOJ7PVbXG2jK24w++zaWlVGIMZKlk+cpqlh+mMeJiLXiUO9UmY9DVut1ZFGm0ukwmJ5iVjr0Ly6oNco3ieufEBcatZaAH+QsRj71bQXkJcPTPs1r11m/ts93PvsBll7gWHXmk5i1xgZxlBObCePRiLbTQcozKlUHGZuxf45mZNham9FshqgKVHWZIJ5jGQ7LoCBJF5iCRGWzgulINIwavYsZEh4UEu5yhiYrTMYzHMtBMSV6F2dIioSQjJGUhHZHZegd0+g0ifMUScsxPQsvnDJcnKNX60ynEd22AyJM5xFOLCJWdbZ22lScNkJxgaapuPMYTatz/GqEbJ7T3aiTxBpFZmI5dYrc4+zVBaIhI2SgFCZ+DChzdFXi4iBk72ZpKo4jF1MJiZigZjKWapOFMlLRYjQWuJi8hjOdNDAZj30Mu4mlW6zUmpwPjnClhOOjU4IkQzQEHGMNaakTGiOOXI9Wt4Mo1vhHf/JHGJaJmKa889EHfPyn36G1IVJXb9Nt7iGIKb3zU2yzxQfvfxVNkxmM+iDEmFoVbxmWMupySZgEKIqGICSEfkohSAzHE/Iip1TdMsJ0TpIZzH3/CvS9AWepkKJbOlzOI75h1MIwpFKpkCTJJWAo+yQVpexLNS29lGOr9p8rLI+iCFGSrqRt3/fLyiHTQhJEwrCcWNQt6yp5bRgGURjiBwGqql6B0DfF3Z4XlKxhmhKkUVnQrWlIooiiQJrJRJeAUZblsuYoihCE8t8IAqquXXkp31QFKZcAMAhj0iS++r2ybBHH8eVEIwRxdCXhF0WBaljEQYgglFaUN6BPEARERb5Kbydh+XOarpMk5ffZJYCGN7VBRin7ZyWIVRSNNI0pioJGo4HneVcAWFEUsji5AppvzkcUxavQjqqJJetZRMRRSrPZZLFYMhx75GnJZhZFcSXbR1GEkBfIsnJZgp5gmDaybKKrOmlappSXyyVB4BG4Cc1WFUFMsasaa5sNLMsiu9yrvugN+OyzL4jCnMnYw7IsVFWivWKysdVmZbXB+voKmi4zHA4ZjxYMBwsG/SmuG5LlMVkKgqDQbq0AApLiX4JOjTwtWHpznIqBIms8ePCYJC6wbZOsyPDjEEE2yYhI0wxBLF9/xPLGIfY8yETyTEAQDD7++DGapvH45CWZO0M2VHRD5pt/9Wusrzf56tf+Fxy9PuLLBy+4efMOop7w+ugFD14f07x5p/y7kUXidImqgCKZpHGEKstkeUyepIiifGkViImFgqBY4rQKJE1i7o7oKDqQoSgygiAzn06YxDG1iolddYhTj5evehSFwEpd5ORFRpoGmLbCnXctJqMxi0FBwgDb3mLVqjIeLdFU0DSLKFzSaufoeotOq4MsT8higUYHPnn5nIcPH5NmBl4w4XD0DKcqs762R7O+xVn/Jd///HskechKt3y873zrC+rVOpba4uOPn7PRXaOxkjGZDQh8nWp7zuCsyu7uBhenMxaLc+7c/Bqjc4X5NMddPGW5uMlHH/403//kW5ydvybJXVbXHLQ0xhANZAS0wufjP3pGta7TaNQJ/AmS0mBzfYff/ud/SLW6iyhILJc+oaejaQLj2SFhGLKYRzSWIY0VjePXPjd+ucvnn5/x4nDAtVs/z+npOZ32GoEnoNsSk3EZRqw4myzsJf3zC9KpglNr0GzrzMZnVJ1NzvrH/Lf/za9z89Ye0/k5aWQgywqz6Yzu2gqhP2c2FtFUmTxZ5cOP7qFXYgazUwrnId/59vf56KOv8c57f51UnCEMdMRgwk/d3eH+6ojY3uO3DlxO8gZuNuT//g9mbG7vMHFPqC6ugXiNIvVoNKv86Z98SWeli24W9M4H1OoWVtPANBpMp1M8/xDPd8kSnSxp0+4UeJ4EmYEgxqhKE1msoagxk9Ec27YR8wo3b9sImYSpddneaLHW3eDl4fcgN8gKkQyPhw/7qGrM9Rt7FIWBLBkkgkDIMQcHA/b2VpDrPu3WFiNl8heFiX9xhvKn/t31whQbnAwWGFaGXMTkRZVWJ0PMK4x6E4oiI04ETNMEUaLWqNFs1uldHNPYaRAO5hixRqvRZBZPGPvnpLHH9e09yBTcKCYOBfxlj+39JotZWXFyfHZIIafc7L4Hho+pKhy8PqFRrdFUwS8qvDx/SRKp2IaJqi/wlilioaJpAZq0xnCZEMYTciFFNURkqc5sMscxTAxdotVeZWN1hccPXmJWY7wgJ04jEuY06m1so8JgMGA8mBImIqKgE+Zz4sJnRd9EraoMh0M6LZv5IEdXVMxqzsyLCdIY/IJcSKlUO2RFRlYsyVON6XSIIpuIFZEsndNQGyiKgy+luK5Lu+KQxCIePlIuQl5BUBbYikruqQTFAlSVyXDBPFpSMxpsrqzz9Ohh2bWHhu3oEPmEWYaoiMiJTL1uYNU15m5B6M3xfRenZpImoCoWmqIzn8xp1JsEvkeYF8hyhKWJTCcesqFgGlVMpYmmZZwNzjFUCz9akko5cRpTtQ0kUcf1UrrNFoVbUGtkFHKN8eICIQnxlikLv2B9c4MkDcqd0qpDxW6T5zAan5cycA5pHLD0cgpJJg17rFRWkaUmseoynI6paQ6bnesMpkc06l0GZz1ajSozd4Gf+ei6QpH6tBorzGYhjXYDTZEYHA2RahrzcEERp8h5jUqjwnB8QNVqsHB9LFGj1mhxeHYEEsiaShzEtKt12tUqrbUGYRhiqAaG6rC/v0uSxgzOF1ycBci6gue51Ot1VEViOh2zsbbJqD9i2B8jSSYpMZ99+Rl/6977/A/++DGCKEAB/4+vX+e5Vfr13gC9MnXtEPplkERR9avt7TesYZYVl0Xj0hWr9cYbKCtiyezH8aU3sLgsFS8XaWy77HkMgjeAqAz9vOlxrFarV5UqSZKQXjJsb8BuUZRSaVn4DaoqIwjSFTP6Bkj5/vISSMlX7KYoiohIpJfJ6jchojSLrxZ63lQfaapR1rpQViX9WabxTXdgkiRIkoKqypfsaHC1TR7H6RXT+Ob838j6mqahyQpBEJAWb+TwHzGnpQz6o3R4kqWQv2FPfwT4NM24Sn0Lb3oOiwJBlq6qiizLIgxDFFUvXx+13DnXNO3qMaMwJE3jshoqTalUKjiOQ56nBEsPz/MIA4Ew9KjWLBRFola16ay0aLWr+P4SzwuYjOeMhnO8ZUKRi4RRgKqK1OoVKrZBrWaj6TJpnLBcenjLiMlkjrcMGI1mJHGGbqh0u232rnVxKuaVv1RRFKaTOYPBGG8Z4y48wjAmTku2XJVsVE3CNMvnrdlsl8X2RcbO3jafPn3G0leIiwRZUiEXSLOYIpdQNJlwesHpqyfIgkaz3eDr37iHH3rs79+gbuuMJiGCmCHJOTXb4vyoT9Vy6KzUeP7iCWnsU4gFWaJj2Arf+u4XnAxyjIpKGiaIkoJADlmOIECchORpglAUlzdUMdu7De6902U0mlBrCHQvQv7nX+RQSOS5wH/+tk5/RUeQYgpiJsMY22kwW1yQRAnN+ga1hkoYB+TiElmpk6chzXqNs94ZO5vvsL5eJ4o9Xr04p9aEp49nrHabLLzX1MwtKnWT8zOX9ZV9losLksIjyCNa7RVUWcFdhEzGHpKsYrcynr8+YHVjBy1dcHBwwLt3PoJcYm11ndnIo70qcfRyRihE5MKEJGpTb+qEUUqv/xpFtgiiiK9+eI+j133e/8o9iiyh013l+PQJYRQRRQYfvLfJ937wEMswSb2CIFpQ664y6rnsbbU56A/wpgn3b93B1m5wNvmCs9FTbH2f3RsaB89OCL2clZUukpzh+kM6K02iQGE4fM2773yEtwigkOj1+jQ6DqqecHww49qNdeJszvGLDKuSYNgZF+cJq509BqPHLBYzbt66SxAukUSVIJpRZDr3797j4YPnNBs67tyjVb9GlEyxaz6mss/F6DFZ6vCrf/N9/sk//Ge4E4edvV26Wxq9sz5763v89Ic/Thj8Jgcv+hS1f48Xr5/w+MW3cIx1pCKnP51SEUMq3W001UBSQhy7wWR+QL8XUCBQqddQhLLmrFKxePr0BSudLmEywLG6bG1tMZocshxCZ9Xh6OIQWRNRpJz+2RKntk6r5qDqMY8fvaLWFAn8FENdoZDPmEyXVKpdlosMQzWRlDEnR1NWO3eprwg8f3GE4xhUKg1UQULIZyiSShD5/C//p//kL5ehbK6a1OQax6MhmtqiYiqgSLiLIVHQQxRVkjjl1t1NsrTBZHFCHCccHfeRNYvxcMT8fMF6ZZWT8wOCVKTQYnRTJYptWk2B4aiHbplY0m1keUKr4zA4P+be7T3cRYggppydXqCrARvb14lmCr3zp6zuNohSG7tecHw8QtM0TDtEU00k6xZB0qMQXQTVQxFrFDksvSFV2yKNJaZugKguGA6HFLFA3UwQZYGFl7O5scfgIqewNGqNNtNBSsWEJM1RhDXizOewd8y6sEJ3bYXe8IJcNUAryOPSoxadX6DpFeI8KmU7WaHfC6m3gEKj0bCZJhGmbaCoCjXbZnZ4yG6zy+p2l289+pS6UX5YuolLt9EiCz0iY06j0+DV58ckhUijYVC3TabzERW7gh8tsXQVPwoRIwlFE8hIGc+XOFaF09MZhazSsCokkc2oP6DZbKLIFr2Lc3RV56w3xrE0JvGAqmWji5cAvNEk9jPESGZwNkdv1MkjhVl/SqFEOFWFmrzBoOdjN8eMZ2McTWbiiYyX5d2YKJpk4QyrVkXIUjxvTCZGTCOFwfyYiqWxvbOBKig8ODxgf3eD5esLQGI4iZCFBbWazGLhU2tUmZwNMVSHLI/p9/tIUkF/3EPXa/gTj+mRwMaag1jkiJrB8+MjdLugudZlPJ6QZzKirFGt6CwGLqJcYZEvqFfWMDQFKVe4s3UTy5S4GPZRqgaT6Zxe0qc3HLHabeOJEZYeM/lkynp3lUdPH1OvNdGdNqP5jGgYUK020TWbj7/7PUxVoV6r4rke9YbA3/k7v4T8+BxBLMizDEEUyyCL6VyCnVKOlmWFxWx+WcQN0+n0Sk4uCqH0JQoFaQqGYSFJwlX6OYoifK8cEhCkctqPokBRJKIooFarlP61MKJZq18BNEUpJxgXiwUUEKcpmmqUIZxLkJTn+RXge5PkfgMUBaFcfSkBXplofsOOluckX/2sqmqkvn+1VPNGvjcN+wocG7p1eb4lGFZUCUVTr2p7giBAkERMzSJLC0TxMowjq5imxWw2w7Ksq8L1N4xrKa2XKzfzpXvFnP7ZhDeUBchZkUMhgFAyp7qqlZ7DosA0TVzXQ9O4Kkwv0uwSeGmIinz5/JRAWdM0JFkgigPiJCxT8EHZAScJIrZtkudlmt0wDFzXRRRFDEOj2+2iaRpzd8BsVnZ7+n7AcuHx+tUZiqKh6yqapmBZFpJo0Gg4TCYTdF0mL2IGgx7HhzKSUCBJIgIplq3hOCY3b+1gOwaWZZHnKbPZhLPTIa9ennB60iNNU1RNBjJ29zZxKib719vY9i62XWHQn6CqGoNhj+FgwkVviO9lvHxxgmHoiHLMcHLGUijZVTErynUmWS7fl3GOIGcEwRRF5srK0V3dIswSRlMf3y0BsqorqLrAk+eH/Na//COWrs/1GzvcuHETW5LoDc65ceMGab6k3jQ4HswQCxtREMjyAEUWEQURSVSJ4oAsT1AllTQWECWJ2Szmiy9eUOQx58cajt5AJCNOM2RZYDGfs7Rj5jMXQUrxfdi7uUtzXeL0lYuqSST5iKcvDtjZv8Vp/4S19hq9swsWiyXpSs5yLvLZF09Q9Yhme49re3XibEJLW6GIy97iQgjY273FaOxzeh5iaQ1ERJ49e053rYOiKHztaz/Fx1/8FptrLcLApT9dUG926I9cSG0azZip1+PoiyGLWcb6ZgeSVTS9vMkaDYdkeYiiKliGysMvj8lyn35/wcnpaxpnz5hMJshKlUZtnT/+t88pxIDXB49oVW+ws1/l1cEJm933aHctHr14cjnlGrJcBLjuBWLRodqUOTg6wrZWsKwpg8EFViVnvlgwGve5vvs2N/ZvMp3PkKUUd5LxwUfvcXp2TBRnnJ6OgAKrliFrAoKg8+LJhJ/56V/AD6YUrCKmdYZ9H91WaK1UWJycQSZwcnLCee8YKb9Lpanghkc0mjVqtTXa7QqDGQz6Q06OA67f2OJ3/+Vr3n3/Fg+/eElGQBpr/NRPNtGNX6Ky+dv81//s/8je2q/wjZ/+Rf74u3+IGjs0VjRmswK/N8KuhZBrmEaVV68Osc1d3vvKJuc9l5rd5OTsOQUqneY+ghBg6haaFvPdb/8plbrN+so6n3/5kLW9On/y3W+zs36TNItxx6e063dIY51Gs0Je+Ig0qdRsBiOd0PeIYg9YUHFWyBIdUZARlSVhHGHaGdP5GYPhGXVzg81Ng9g3UDSJv+jXX5ih/A/+V3+jIPFI9Rm+N0Wjil6tcfyqT6Ui8+5X3uYPfvdj7JpGFOY02ibHxxMypuhmldj1aTUqKLJMHLqsbdoMhi52TUYSNRxT4stXp1zbbyAuWsh6QhRI1CoGx68CNler1Lod0iLEm86ZuSH98Wta2jpZZmI055weTxn0I6r1GpqdYNnVkqJ3XYK0z/pui5qzReglTIdjbNvioj/Hi4Z0ujV0TSP1K6y2HSpVm+PTJ9RbFnFSRZZUXj17yr27H/D8xUP8KCNLDHQd4tRHkzOWqUBveoFe0VEFhf2NPRb9BUIeESYi1bqC76c0uim9XkC9ukqW+qRpiCGLZEbG3MtY29jBny243tnn80ffZyyOWTG2mPQzFMckVVJmkx7drTX8aUZ0MUBqSKhClSQtsCsW7myCqon4aUSeKYSjELWWYygO6TKl0m4ycxdEgYsoSdStDpLhM5mMoVDorjTJstI3lcQhZqVNEESlrBj6NCpVqhWT494hQZYTLkVsPaJq1wl9C0XJcEyB05Mxdl1nFiyo1xzyVCfI5tRqDQzJYjwZUW02yBiTBRbjgUe9qrG20eTkbMhKe4vpfIIXjLhxb4+zkxmJHxEHGYqmUm/mzEYe/dmEW1s3EFOL4eicSrVLq65jWDlT1+fFUY97O9fY3dzns1cPSaQQS9TQZY1lMGE2W9Bo1IgzHyWXuLn1PtNozuH5l1xbu0OvN8DRm+QpLNwphmkSZymmJRMlM0TahJHP9s4qhlUwny/Y27pH73xIkgZoWplyHowHWLpBnmWcvD7kzrUbrLTajE7n3Hpnk29//zs0LzL+o0dzFE0mzzN+7RvXOG7Xy7vVMP2Rt0+RSNNyoaMQy73pN8BOFN9MCKZXHZKu6wJcBWyumLUkKVkZStbNtm00Wbmq7JEkicViQVEUeF65zpNlGYIsXXkvl5dy+htQqSjKJfNUyrOSXKah0yS/+j8F2VUQppSLS9YwiiJMw74slC6IogRZFv87KeyyePzNik3JZIaRf8UivildV1UVSZAvOyPLbs43u96CUILvQoDQDzBN88rjmSTJVZ+sIIn/HYbyjaT/JlgkyzKhX84CvpmqfGMbkCSpXOMRfsQuI4m4rlsuaDnWZbdoWeX0BoDGcYwmK9iOiVBAXCRXqfs3K0aqqiJeppLTNEfTlMtjyIjDS1/ppQc0SSIURSPPRIpcJI5jKpUKcRIhyyKiHFHkAq1GA8cqfZuTcdljVxQlK9xqdqg3qqxvtJHklDBy2d7eJkky+r0FJycn9PuDElAJAk7FwnFsVlc7bO926HZXEKWyk3M6WXB+NsRdRCxcn0EkEUcCilKC+DgsGcUil9BtOH/xCG8yI03g/Q8/4P6t/dLSoFsoAmRBgijIWA782z/9I2qNLisrK8xmExyjhiCE2JUKF70BlYrM6+MeT19OysWnIiOTBMhFVLnAD+ZUnCbTiU8Uuth2RuQJNBtdVro2UrHk/GDGe1LOfxo0SNKMPMv4f75v83nuc3o2pN3tYtY9xpMloW/Qbrb58Md2+PyzR6xtbqFUYx49fMbbd95GzCR0Q+To+DWWcpdcOGU6K1dd1ro7pfQv5GiSxGByysXwmPv379O0rjGYviCXTIo8ZDieUK236fcSCmJWV2tMR2MUJUKRmkgSNJoOwVIjTqcgJAiCSLPZwJ2PmUwj9ra+SpCclmX54TFumNFs3UAVlhjKJitdm+9/8jvlso2fYegOzVaVi3GfdrVCs17l8PiUGzd3ODt+Ts26x1vvXOf04AjDEjl49Rpd7ZLLGSgmWiXG8yMajsB8FJOnEGcztrb2iMKcPPMRyTk6HfHLv/jLTIYzUgIWrsdi2SeJodNpEQUhih6hqTbLRUGjskK1pvLo8QO2Nvf47g+/R3dDQ9dXmM9ionDA5uYm88UpYq5gOQ06nVWWY42NHYXe+YSlPyJPbL7ywX0++fS7rK2tsbG1x5MnR1RrEqNBj73NFf7aX/nbmFaF+eKI3/mt/4LPHp+TKBX0wkTTFVZ31sl9AS/sE/oS2zsdLgZDxKLGxo7GfC6QhgX717b48ssHjAYR12+uYNsKYZCSpWClEv2Lx5jqBo31DicX5zQqLS6mT3FqbY6OL6jXtti/afPgwQMm05TOmoC/KDtr43SOUIAsaBSpwkpnnSCec3R6QK3apSjGLN0EVW2WVjmtQ3elxf/s7/6XfyGG8i8MKP/7/+Hdwh9lLPMMTfNYqe+QiDAdLlnMlxiVgmZ9E3dSYXXdRtccDg+GWNWQk/MD/OGU9ppddsbpC/Z3uvSOcw5OXvL2V7awzJgffNLj+q0VitRl1M9xTANShSgQuXf9Osu04GJwzuZqmycvn+LFHsVyDRGXvRuN0pyf6GiGxPNX58wXGXY1pXcxoNGysKw2h69G7O9tMp2M8IIBttUAQWJ1q8mw36dIbaqOjKFpLBdzqvUaiCrjwbCs55BlposFfigixDGFP2dz/zYPnz4ok512Rq2ySRxG2CZ4s5z5dM76+iqVOqxvbjIcHyFKBs+fjNi7qREGBYvpgkySqVh1ZCllrbvN6asZZxcH1HZMhhcxa+YKp6+HZMaC5lodb1kWkLrzJYIeMHotsL13E5Q508Epm1sbLOIQXTdYjpfYVZWTgxm7G1vMgykT16NetVlMPAI/5dqNtVI280VAxF9OUVQB09SR84xMhNL2n9Ot79Dv91GclEUYIysFRAWOVeH8bEi9XqVZlymKjFwyWE7HIEjEqUSjaSFLGpHnAzp2RWQxjWh2HOI0ZzQNqdebkOXoqsJ0csHNW7v84MEPMLQ27969jlgofP+HTzDNEF0UGS5T9tY7uBOf1bUqimJhGyrj6ZTT3hR0hZotYluB8UOxAAEAAElEQVQr9GYBSAvW7DqVokaW+jw4PsCp1Wk3qrjDBbKtsba2xsPPn1FrWdiaQxylLOcuy8ArPZRi2QOZpQGt1TVUVWY4mCKKKlmWYDuXHj43IY5TDEdhPLlAEArEXIHMRC0qkIj48x43762RiRm7rsbf+jcHKJpMUeT8396tcrG+iarYpEkZ2jBN/VJyLaf2ROUyuCKWIOZN0EOUyvDIdDK/9MplV0zcGzApSRKSCIZRVvXIQgk0giDAMswyLEPJAi5mc2S1LOXNBcguQV+Wlx/kbwCUZVk/AjuiSKVqXx1TOU3ol9JtXAK34vJ3akpZyJ5lGWEUYVnWZbAkvXq85XJ5GXQpt6TfBHHeMKJxEl6Bvzd+xTiM0HX9z+2Sw4+qf3JKplOR5CtZ/w0oLVPkEmEc/TmWUlGUK1YYKIHjpc9UltU/B6rfLPBIlK8TknwVohLFcjbOsiwqtoZt21dg2LIsotC/2iKPUulKIn/TjyjLMqG3xDAMfK9kgz3fLe0RCEBOGC1J04SK0ybLErI8Is8zVE2+eg8EQUASlCxypVIjS0NarTrrGx1MS+XZsyfcvPEO/X6PIAg4Ox3iLVNkyUBVVZrNOitdC0kWWF9fpdGsMRyMieOEi96IyWTO0WHv8qZEolp12NvfYnW1zWq3hSiL/No//Q6ioJUdnVlOXoRIqkyaCIiKz9mTl0gFuJ7P/bfusLu1iigKBElCEecUSoaQiShS6eWt1+uIUoYiw3w+LxP8afl67Wyu8u3v/JDPHryi0ajj+1MKyabIEyRkZEkH0cOupnhewKgXYRY6rZUK/8F/+DeIwmOihUXt6JBf/cP+pZc3439rz3mi5YzHCe12m5WNHIQAcge7bpCmOXYV0qRGJJygGiAXbebTEc1GhSwVqdWqV6MCw9GovA7rKxiWRbh0ieKCiXuKIui899ZX8OILHjw55M7dDxmMXAQlZjw/RVEyJucRTafLzsY6k9EJWWbx3le3efjwKbVGg+HFgHplhSxPOTp+wcpahX5fQXfGNGpNznsXLEMNw6lii6AZFQQUposegpwjFCaiKoAQMJv7JO6SH/vwbWZuimnaZP4EIdO4dnuX0eCUJKpw8/Y6aSbihQK5OuJieMLrwxnXNjYZjk6pmutle4QYIIgp4aTC+x/s8K9+7x+xt/l1PvjKHb748lOsShtBnnJ4eEyrsU21qnB+8QzHqdJq7PDyyRmtZoUgCqnUK+iGwKdffMz29g5hqGA7OnGQousJ56dL7Bp0V9vkGVQdiziMCIKA1bUqvZOUamOdlCeIyTrXbqxydHBCs7HG559/wtlRyt/7n/w73L55h46zxQ+f/Rp/9N1PUVQHXZWZ9XX2d1d49eIcy6yQCReEEUiijm2bGHqd4+Njdrb3qdQkLi4uWC4y7r3b5tGD1yznMoYg8uWXv8d0lPHehz/L7vUNZDXkxauH7Oy+xfHRa9Y3V5nPPNrrGifnr/H8BQgWlqVh6RYkDcbDCzbWW8iSxdPnj2iuOsymc9ZW2xwfDbGqKnZlhSBcErkF/+n/7jf+cgHlL/2tDwshXjKJLlCkCkUsYDZzHFtjOD5FE3dYLmcook61WiWJRExLIQhVwuScIHSJQxex0FldK8gTEVEUaLSr9M8D7t/pEgQB5+MebjJnMZD5xjd2eP0s5uaNu4wGAYp9xHxSUKutkqURzdo+L18dI8sziBaIQh1J7mDW4eDoFbrpkBMQeiqDxZQo9LCNCkkaoqoVoniJZVgoKggSWNYKVWeNh09+j72tTRaTFASVHJEodalUNPqzBSI29U6def+cGlUWRcRgOKG7o5PGCudnQ27fuInvT7AqEpOJiFORaTQkVK0g9CVG4xirEuKFc5zKCiYmg4VLtWYzmr6m2domXFSQopgsCjjwfKqKz37rPlE84+DikFRUQPWYeBbNikzil963otA4PzzB0gWa3SpBkeHNlwgpaHqTNE3JYx8/K0gKASmKaLW3QQzKlRNPgkJDUXNyfIRcZr29TpjM6V0M6K52MCQbCgWnIROEMb25h6WoaFLO2eCcjfVr9HsHiEaEl2TUFIPGapXxxMMyDNI4Io1SKk4dRYMk00hin9l4xs7WNpJcIIgqx2fnODUNSVMZjS+4festxqMzHMfh/NSl3awyny4o5JymqdCt7jKZTFhZWWM0PGc6nfNLv/I3+PLj76PYChfTIdNFQBDFGKZKq1Zl0p+yCCLqHZOm06J/VqZFTSNhfBGTizartTaOYzIaXyCKIidnp6xvrTOfzKnXmgThnMFgxM7eNRaLBXESEoZ+WVlVCGiywnQ6ZjKfXTJEMvVKDfICIc+J/IIwnlNtmGyNA/7jZ2UwJAxD/v7X1vlCTdBUi3plBcOwiOOQKIlLW4dUgsgShGRXrCBAHIdXFUBvAFiZgC4nEEWhBEy6plz5F9uNJkmSoOv6pQdQK8M4okgUlRfYNE3JKJBVhSzPiaMISVQIfJ/5fE6r1UK5BJCGYZQeR1mmXq9flXzPJtNSlhYK4ji/Yh7fMISSJJEXBcvlspxHvPRUvjnXKIou/Zgi8CNwnOXJ5cpLdMVyCpep7SAIKIoCy7Lwff/qZ6IkRhb/TI3R5ZziG/DzJuCE+KPrap6XE4iGYSCKYrm4I0pEUYKmaQSXIaXSixqU/k5BQNUN0uJHizwAaVoCqEqlclkmX8r6lmVRcZyrHfLZvDyetMgvJXUXWfjR8UhSCZSLojyPXq+PbRrICti2xXy+xPdCgiBBuPTFWrZOpapjWhqGqtBodJiNF2VTRpTQOx8ShuX7b2OrSqNZ4/79O6haCXgUReLly0MuekNOjvu4C58wTFFknVrdodWusLG5wvpGB0kuLQJhmLJ0Aw4PThkMxkzGCxAkUqtDlhUEgYdhWOVmNyCgURRTjh8/RcpF/DThzv27dBsN0jQFWUVBJxBnVByLPC57Qbm8obJ0C1NVWC6XOI0K88UEU9F48OVjnjw/xiwRHoKikic5klxe+6IwY7GM6KxUsayE41culuXwK7/yDU4OHlJT1vn5lRbf/I1PEGUB07T55z9zg+/nR2zt3WQwGvLxdz9FlgLee+cDDg4fU61uoOo2k/k5ui4Shw7d9VVG8yP84ARdadGod4miAEk0cH0Xz58hiAVrazscvOzRXWswX0TcurnD84dPaa2aeEGF2+9s8J1vf0LFaSBIHsPBgmrFZnujTRQFTCcuga/TXrNB1MiyhIplUSQxk3GPwPXYvbHJxx8/p9FsAyGrnW2GiwGxdIGd3cAPE3THx3VzJA2i/JzQ0+msNAnDGFWQWS5cavYGu9eaHLx8xfbmFnFUICszKDoIUs7qyjaL8JjBwGXsPsXSdqlYbXQ94eTQZW/fJovaKLrL8atz3rn3Nab+CSevJ7zz3nWm/z/W/ixIsgU/78N+Z99zX2rvrt5u9+2+69w7mA0zBAiQEEiaZIgUadNWkGGHHdaDHJbDL3L4TX6zQ46ww/ISMh1+0GKFKdEiJYgECICYwWBm7r71Wl1de+WeZ9/P8cOpriHf5gH51FFV2ZmVJ+vkd/7/7/t960skVcINJ8hKjaEO8f0D8rKkyEzevPcOcXqCu6zp927y/PAbbt24g6RKXM5OqWqFR+/c5Oz4CF0dcDl5jiCUlInBeNxQTjo9mSxLUKURg5HBeu0xHg85OUjZ3BOZnKX0xxZxNeWrp085enLCX/qtv8wPf+s+d269y9Kb8Id/8gmTy3NW5z7vPfohab7mYnrCYNBBpI0XnLG10yNPNdbrNbNJxDvvPCQrfC7PXYL4FN/3iQKRm3sPGHQrXr08xG5v8c2Tx+zv9dnYHFJWOoIksrlt88WnU1SrQrNygsAHNSSJS0adb1EWAfPJkjfu3uWzz3+K0aoRlZgaGTdYQtJBEjPeff+H+G5O5Lr8r/6df/jnKyj/8r95u86DJaK2jSjobG1VuHGIpuWYpokgbnB89gXdroypbPLi6Zz+qElbrpcpgq0TLyMsuabTTjBsC91oUUuXpIFNSzaRyponZ6/Yun8XEgNVPyJwbSxL4oPfuMvjn/hs7pmkUkYdpYxbDzmduGTVc4RaZDr18YMEZAG73WE2XSLLjf9rNvPp9kTy+ApWK1x13OKgKgVRFPPg4VtkmcrJ+ed0jE2KTGCyeIGi6QimQOJV2B2TLChZBR77u1sYpcHL1QSx8uj3hhwdX9BpN9w6WZaRtZIg8nD6FpZhsZqvEGSXwaDHdGKyfVtEU4ds97t89vQllxdn3Lk5YO2L+GuNlqTRM9rMiwUzr6BKQzZ6NmfrKV6YIZYymiOy8jw2xx28WUjH3uDw6Ji+biNLAomWoEkFomIQeT59Z0hS10zXLnUqMhprdOy7+P45rndJHFVsDG7R7euE0ZLVIqHrjFDUFEPpsvImaKbA5dRFERU6rS6CnLO/tcezJ4fIVoph6XjrHD9zCasSQ7Bpj2rSQqFMKjQtp2VuY9owPfPI85zRWOOtNx/y5WdPSdME227Tao1w3RWX/jkbvQGKnHOxnKBpJnWS8cPv/Tad9j7/73/0H/LBW3e4t/ktAj/DXfmkoc94tMXDd97ln//zP+R0OUfRCjYdm4Onp9S2SX/DRq5zVlFJtl4jKzo7t7dZTs5oGZ0rLp5Gnte4/pICAd3qECUxslzQdxy4qkA0LB1RlFm5Lo7TRtVETs+eYzkiWj2i2+3i+gFBlGEaFmEUoEg5nr9CMxVEwcJdunzP7PDvfZGRpjkg8H/9/oCPK5dBf4ted5O6hsVijqqqdLoD4ijHNltXXdzitchZr5cNokiWrydyjUjNrgRXfj3po6qvJ4iWblyHdYBrcVVV1XXoxo9CnHYL/WolnL7GEknqlUAq0K7CI5ZlkeXNc6rKBjukyDJVVRBd+SRlWb1eC78GmhdFQXAVwnnta3wdsnkdPnotokVRbEQZoOnK9c+8/jlDa56Lruvouk6SJMRxfO2dDKKQPM2uPZmvfZqv7QXN1FH+11beFc3q/jWWqS6rq9ewafSxbbtpH4qiay+pv1yi6gaVAFXZXMgqstys7AURWdOJ/OBfg6b/q6ikoqiuLzSqqhGVjS9UvAoqJddr+LK8avIRRMoqZ3NzjNPS2djYIIoCViuXwE+YTOYEfkYcZVgmtJ0W3V6Ljc0et27vIooi8/kSdx0wm7qEYcR85hFHGbbdotdr0+vbdHst9m6M0fTGEhEEASfH55ydzlkvE1bLhivqtCxG4y5b2wP6gxatlsVgMOQXH33CH/ziFbbVarridYOyisnSAlnUKesVp199iSprrCOPv/l3/jrDdpvJ5IIoKyEHXW3je5fomkZZVaR1imF2UCWdNAmwDRuzbRAmAZZq8fVXz3nx6ghJNpAEqIUMWRApixpZ1giTgKJMiKKIltNjc6/i/KXI/+zv/z2Onv5TutKU7ykOv/mHTW97jcQ/+1GfgzZoPYta7uHHKaBzejKnqFZUSsj2+Ies/VeIVUWeiWSlijUoyfJLDGWH84sTXNel3VfZGLyNKAu40TNct8Q0JAxtgOv7dFsak6OYR+/c5HR6ydbGbex2wKtnS3Z2N1hPJfZu9jl49YI48bBNBzeeISgiWdpcVCXrEkUsGQ9sygwqfU0tyZSFTJoskRSDgpq8lMnDBYP+PsPtLk+fP0OUO3THElnhcvwyYnM8RLMVFBSODw7Z37/LzL1AV0zaeg+xFknKKba9iaRltJ0WcViRRBLtfo6ubbJYHGFqHabTKS1nhKFJPLj9Fkl2Sl50kWUP29KZLo45nx3Q791BVhOWswC5VlE1ixu3R8wvfSQpZnYh8hu/9V1OJy+YnE7QtD6qlaEZMmeXFxwcPOfO9m/x4O0ejz//lPHoBp9/8Qvu379Ht2+SxSYFq+ZvsoiYzEPuPHjI4ekxrVaL/VtdLl75qHLJrVv3ePrl1yRpHz9/xv2d9zB7OR999gWddkaVb7C51We9bM4jqmLQaltIasjRQcDGtsqLw0/Y6L9HHIqgXFCWNfPlEacnl9x68y0e7j9icjblcnaMqiqMujY7W0OywkBUS376Jwds7vUoxDWSAvPZmuFgB6uVM1nOUUSNxNP58IN3efbsGVG8ojPQuDyX0bsF7uSSvjNmuVyiSDKSEPO/+9/+6Z9vKEcVNOzxEM+PCeM1L88C9vZ3aXU1Tl8FbI4DdsbbJNGcw+Mn3Lz5ASenp+imgG13qOqQ1rbAbJKiFT3WhzG6mnLj7g381ANLZrQzxtjPMHQRMb8JQg9PrOl1O0xevkRUHURVI5g3ANJJeMjNu2NOjhXytIvdLVmn5xSpRu0pjDZaPHtyzI0b+8RZTi2ZRGWMLJZUeYW3DhkMZbygwk9TLhYzhCrGkBTiYEWUpQx3hnihRxlDp7VJuLpEH5houUKwXHMcXtDq9UgqgyCt0R0Lq6VhKQpHR0s64x5pKGGKGadpjFLKfPvdHxGtF/TuZqyFmOAy4I+eHtDq2PRHbVrmkIOzCU5HZFNvs65SvEmIpfZxo4jL5TGmvEGSLwiTANsYIhUGh08nvLm9T+jVDLoFq1mMYJYomYwgKuhO88Gd1TlVUdA1dNzUx3GGXE5PaXcM4khFFVXKKiLxa8K05M7WDc7dCetzH9kJyIoEqTTpSBpunvFqfcJ7e3dIJ0uKOCUTmulLWcU4qkNaBGiWSuzmFNUaUVHJ0y6HF094+8E+ZSiwuacS+yUrP+DNh2/x6ugAUexRlY0X9MO7+xwdHeFFAo61QV3DKp/z868+YWtzSnc8JI8SvvjmcypF4cZ2C0lo0p5ff/UZvbFEEJrERUyuS/RuG4SzgsRdMty6ibeesAyX3Nn/Fv2OROFvYg8MijJk1Bpw+uoQZ2AzP5oSuiVLxWfvxm0MXeXo/Dmj4V1KVqRRQRmp1KqEF86QKNHZpKw0jk/PyNICp60iywJZEmAN2my1b5DGl9y+tY+uDol+8jFFkSMKAqIkk0ULhjsjJFHi6ZMvsFpN8MOxRqRxQhJHaKpMTYlQS8i1ROS6GLqEKNckSYwXrmm1TMqkQNdVQt9HU1uIYkUQpshyIyTFuiLLIzTNIoo9EEpUuWnFapLeIkUOltlGrGXSMEeSZAzdupoANlNDWZDIsgJNa6ZzNSXuak1dlBiaTppG10EZUWiEXxg2AZTX6WZV09jp9lksFrTsFooi02t3MA2t4fKpMuv1+iqV3roWvnmekpVNVoYakiLHzxovZZ7nBEFwLRSDKLwWYLquYxlOIyrzBthe0rTMWEpzqny9Cn/9b1EUCf1mfS9rMrLUVDhalkmcJM3a3jFxHKcJ01zhmsq8uOZuuq6PLKloLQvXc5tUu9l4MlvdDp7nNcicLEMVKzRFpsgEirymiNOGDyoKV4n6RvgLdbNKF2oaBBUiF5dznj7xabVOaDsGlm2yu73J/o1t+v0+IDKfLnn69Clnp1OePD7kn//ez2m1WoxGI/rDHttbG3S7bTpdh/V6SV0LLOZrTk8uOT8744//4CMUTaY/6KDrMju7m3z47beRJBHLNnC9kNNX56QxPH1ygO9GyGKF3tKp1AGm2aGsJGRZQ65UiiqjFAtURSLxQqpSohTBkE0uX00IWx62bbEx0MiyhJoERXeoa4EkLtBlkzTNCeM1kiThhzHeVUtU3ZYRJBDqHO3q/UCtk9UFmixT1SG66FAK4PSagJU76bJ1K2PtHyOELrK+5GJRE0QqVd1cvPzBHx/wpRjz6Fu7tHoySZ1y826XepYyHuxwebFgMn8OQkngwu6NEVHk0bVtzmY2olWysWVh2iWBJ7BYHfPwrTukxx1qZ81otIHvwnhocWv/Li3noPFT3vl1lu4rUk/F7HZYeSve//Xf5vOf/wlB7DPsDFhHEZKoE60jLE3Eddfc3HqDMPKZTjLqouTNdx/gx+eswgBTGxOmLmFYcmPvFtNSwC1dlk9c3n3jfV6dPyWew3SyZtTtN3WrfsEkX1H0SuaLc7aHe+j9DklwglyNMcUG2VPGAUkFZeEhZWOqSGUdvaQIOvzmb/8lXr54xdePf0Z/s8erVy9pdWTW8QmamHF50uKd99/FDxYMOg6a7hAtEtqdNvfufIssLznNPsePfN5976+RVzGff/Q5qq1zb3ufk9OXKCqUdcDOxl3uvTWkkmPSwGQiPmZzY5fpdIpm9CmzCEMXkQqb0/mCVttEKDJGVp+L81N6LRlRyilKg+nMR3da/NoP3uTwwEIQaqS6xbi9w8ZWl6++esbsvM1v/Ob7/Le/94dsbOjoisr+zbtQ/pwKA10d4nke402b5aIpaLh7831+4/sb/PRPf59gWWK2EwbViHu3NvGWEZ1WnzQrOZnM+eFvvs/x2Su2+w+YzpdsDlUkdUG8GJCGKf2tPhv9DT766BPG3V1KIeDyeEWaifhezmw2Z/DmI779/W2effOSXufuryoTf/UJ5d/+u2/VcQiKLiGpApLegIo3N4fIioBmDDg4eEyRlMhKG9f36PTbqKpG4AZEZYwfhSiSjTcLKd0SR2+xe6PFYNyiIOXpsyNagwgBhUF/m+HQYdge4y4qTt1PuX//HnEoMh7ucz75isQf0nIMXHdFhcCro8c47T6L9ZTuoMd6meAHa6IoQlRkoligrlQQfYpIpi7sK39MSFkUmJbI5miT9dLHUDV0zaHTG/PVN18gmjW6ZFFUJVUlQ11gqgqq3Afd4Gx2hJQn9MZd5t4aiNCkPexewmrismH3uCwuubd9B1nxSQKPljVmLXp05A5lleAuQ27c3OPV6ZKL+Rlb4y0cwySKAmbTNZ32mCKHy8kJde3QH3WYzpcs1wuMusbq2CTrmtpIKbBQhZz1KmRkdXHMPstgTs6a7mhAFLtYTofjVyGWbZDFC9r9PlHo0RW7rOUUoSxo2xqj1jY7t2Wef3HGyltTaQ5VLWBZPrOThELUuWk7THyJjY1GLLzz6H0mBxO+Pv2avCUj5im2bZLka2S5w+WJy/ff/5AwWLNz2+Lw1QxRNNjod7ich/S3BPxlhiBL5F5IyxkQJJcg57RbI6pKv+qXPkEQM/obXazSoCr7tIYW0fKURzfeZzK7xC/OMJVdvOiSP/34a/Yf7eDYbQ6/XPDWW7d5+vwY20npjttEgcjbD/ZRBJunL55RV41XVu8MqfMcqzciWh2SpDmZaIBbMdiyuby8pKZgc+MmYRBRVyWe52HZCstFwObWkOlkiWls4rQMTJurCbqIpssoosNkMmFjtM/WdMH/9OfngERZVPyfP7B55mj0un2C0CXOvAYo7zeCpiaja42bwIUqEMQ+puHgtAbEacLl5JiWbaNrHVTJpiqbFbEo1eha41HMi5g6byZalm2wdhfIUrMSj6IIhIper3cduKkEEAX5WpzppkGZV9fNOUVRABWmpQOQpRWqJNOyHbiaLGZpQRBH5EXj8TOM13gdgawsrpp8elcXJymqKmMZGqNxn62NMWkWkWUJw+HwyktasF57TCdL/CDGdX2i5Co4Q0ZxxYAURfnadyldcScNw6DIm6rBLMtQFYUsTojSBN0wKMrsqoXmlxzKLMvIs2Zia9tNSri48l8KcP3/vvZRpmmKZjQTXEVR8Lym7MBxHOI4pixqBKDdbpMkyfVK/DW0PcsyJKVZU7dsh8gPUFUV13WvA1HA9UT2X232MU0T0zSJkxBF1ojDkDhKASjKDMsyaLVadNomnW6LXq/DYDCgqioOX54wmy04OHhJkSkEQeORHQwG9PoON/c3uX1nh07XJEkyDg+PWC581quQ2XxNlhYkSYM70s2S3e0dhsMxaR6RZxmGpuPGIZ89niDILapaIEkiqATyKqQUK3TJYHr6FeuzBbKmUtclf+tv/S2CwCMIPUQRkiS+ukgSqeuSdschy2IsW7t6vwu4q5QgiK6OtcGL54cslh6SpCKJCnkdURUZTstElgrm8xXdlkMT/qrQFYuld8qv/+AdWtYEUpHOocO/+zIkL3KqWuAffucm4b0B/YFIVYnM55cEQUKW5gxHA1TZIYynFKXMcuHRbvXY3Brw+JuvkOwY3ytomR0kucT1p5hGB0U2cdpt1u6yqReOV7irmvtvvIUiN/aU2JNJ8yVJUTOfrdjc3ePi/CVVBRujDRRRodUd4i1zsvwSSayJEw9Z0K86zyXGwxFRWCAqOeeXF+imTZJkbO1sIqkhTw6/RrJUNlv3Gfd1fvxnP+P27Qe8PHzOg3fu4V5m2EqbJ6dPGOwMEfKQbnuLpChZzebIlYyiJlSFhm455EJInRtkyZzt4Zid/S0kSWM9S/jN3/ouv//P/hmJa9HpS7zx4AYyFi8Ov6Gr72K1l+SChrsqeevBLh99+hE//OFv4oWX/Mt/8ZTv/cY+P/3pl7z5xg85Ozvi3gOVk/MzqtIiz9v88Dfv8o/+q/+clnKfrT0FL3KRFZV4LXDn1ojjgxmDbotF+DWGOcbzZHQhpTPYRzR8JFVhNV+h1Db9oUYY0WwKKpFer3tVC2vR7XZ5+uwrJFFFUHQsrUWrrfHV4yc8emcHfxVQlzaSVjO9PKKsCpJIxOmULFcT6kIiCgvefechg+6In/zpp2i6SV5EjIYd8nyKLt3mw+/e4p/8t/8Ng8EOutZm7V8yvUjoD9o8e/oNN25usPZ8ijojjSwe3N+hKhubU5CtWawS9nfv8+1v/RajscV/+p/9P9FVEcMS+Ad/6//+5zuhLGqZMIauZYG0oKgLbKeNl1xSeBLucoKix+iOTpqGxLmHnhnM5i5bWx1IM7ywpMLH6lTsP7hDuPK5XB0jOy3anRG7twx0U+XoeM7Z7Gtcb8jw17ao5IibNx8xXR4ilV2WyyWSoHLjZocwjMCr8f0FdldGFjU2tlocvlxy41aH8JVAfzBmvnApa5eiEFCkHpJc8dY79zk4fIlmhdjCFkg5ValgmDZ1HeE4EpE7Y29zyDQMCVcxo5td8qgiXCvEdcrupsHhhYuipWz0unihz/bmHnHiMuoYzCdzvvPOLZ4+PeHNu3c4fXLM7bdMFKVELmocQ6Pfs/n6F2vuvrnH8fkxNTq9ocoyPKdggFy1aA1MNE1mcTJnZ/s2i9mCKolpqQrOqE+2znFXAXkmUAogygm5kCEWFe8+2qcW2hQvS+ZuxOnxipu3xli2iioX+K6L1dfJ/IoalWno0e22SaqQCpGjx8fcvv1dTk6fMehvECsujthnFUSMh7dRjIowChD1ZrokU+AvFqRVTSrkqKkIeUGcB2zs7uMnl9zcsci8Ne+9+wYfffk5K3/G1sYuHUfl+NTj9GyGZSpEPmi5Tcuwefr0gk7fomeLiNSs3TWaYLCzfZesTJAoSCufWqgRhQRRiEkKn/7GiGQd8O47H/Dond/mk2cfcXbxgv5YZTZzQS5pdRSKRKJIEwKvZtTrsFoGBMEaXVX5Tldh2FP56fqQMCzI5jHmhsbm0OZy4WK1LObzNWfnc8abrQb3U+tousLujR5VAg/euMvaX3M5eUU33yLPUzY2huRxm9VqheNYVKJHgU9ZlY3gEWUEOaeqS6JYR9csJLkGEuo6hFKl3x2RRjOyoiJOC4q6IsliNMsgyjxKMUQRTWIvQDBBEkwUVcIwJfK0pKgSgjik3+kS+DGKBtPZCbf3H1GVEmk+pe0M8NymLjDwI4qioN3uYmpNQ09VCERhIxqkWqDdaaMoEmHos16vURWFipooCJBFiaSKSdOcvGjEZbvV9MVKQoMxKsuSUhDw/GaylGYFvhdzkc+YXK54/vSUfr9Lq9UiTXyS9OI6FS0KMqZpougGk8ns6msieVEhiDLyVShJUcxmjV/l+F7YpM0tE0My0K68j1phoGoaVd2svcvilxfgVQmtVut6uvo6oFOW5TWzM4oi6qLxftqGSZRkaLpCFMQokkp72CEMfURBRrdUVEVhsVig6/o1wui1R1PTNES5vmJeZlRSTZzFtFo2xZUX1LJbzWNe+THjOEaRJMIgIEsS7I6GWKf0BybjjRs4joMoyCwWK05Pzzk7nfL5Z09QFA3LstB1lY3NAYNhhxs3v4/ttKnqgvV6zcuDV1zOjjl4+ZI/+kMTSVTp9R1297bZ3Nql3y949LbMaDzA8zwWiwXnFy5nZ5d8/PEzoiTBNG0sQ6VSa3R7hyCIqGrxKij1Sz+wLNXkSYogC4giCJLS+FlFifHGDooiXQn8Bv6/Xq9ZLBZXUPcY+SpAZTsKo/EmkqTQ7QxZu1POL44YDscEgYsgXXFFixpRtnDMBEnS0NQGYC/bIbvdfZLcIs3atNotdh5tIhx/jm2YSFJNu1dy7l7greqG9UmJphqMhl0uJydYdpe153Pr5kPiWOBiMkWQEkbjXdbxKZaVMZ1Mubm/g1FoKJKNJndQ5JwkDLD0Pi1bZz3Nef74kNt3dnj54uuGOWhYbNwYMhioHJ0esDVyCBIRz1shKzVz95hw1We0UaIoIrOLgk5HZ7RpsV75nJ3P2dy2cd2K/riD54cUJZh2xZMnp/SMTcY7XdrtEYvgFdaogyC2sJQh0TqlKAUm2RlFGXJn7y1+/vOfs55DXUVotUJcG+zdHzO5XFOkCWkZIlCShBWuktD2EnwvYGPU5xc//5T9m/fp9fqslmcs5yElAZE0oa1afPr5S5zNXXZvWMi6wY9+9y/yT/+zfwFygBeuWUy3+LUPv0Uc5zx6b8g333zFcpIz3Ozxznt3efr0gq5zm6pwEcUtVBmOj6bsbPY4Ojjl4Rt3KQuPotinRqTd8ZGSDqcnz0mFiLu330YqS8SyYr1IkHUFhBJJMjg7O6coCu7dfsjJyQmiIGHoDmXlghQxmypsjlVePvuGxVSnN1gjizq9/ojj42M67SGud4Ii2ZR1iqLWPH9+Rjhochib220Wc4mqMhBkm4v5CR9/ERHHcHZ+RF3q6JbMyn9Br/+QO3d3CX2JN24+IEo9gqixZeUZdPoOs/VL0nST9978K+ztbPDxZ39Alqj0ugrr8OtfVSb+6oJyvpoiyAkLz8GydboDmSjOEWWFXnuDIlpT1CaqMEC3dHzvmPUqwrY0krjAnQaQ12i6gZ+smLinaKrJzUcNQ+nsNABB5NXRjI3dFnHioVoaTw+fIdFj2JUolTXuSmP/tsV8PqOqRRarGVanQ5QJRK5MZ6iR5ipv3NujFny6douz4xCUCNswmbkpZqc56H/64y/oDxVEWScqA4Rcww+XyErNzsaYxWKG47SxzS6zeMJ4a0Bdijh2m4vTb+gOWyzdgCiKcZWKfg2S4JBWHqpVcXJ+BFmGVA9JkgllKPLo7rdJinNs02Bk7XG8nPPk2QGPHj1AsW1e/PwTfuf7v87JzGCVHROuUqyuh1I5rFcLhv0WWRZSiykVElFW0O32GA9thknGdHJGHEtUgksaF9y4scvHX32KZjtIqo3aMujofRazObrap2frVHZJKUvo9iZRsiKqPAq5mV4dHp9C3eV0PmHzvk6wiGibIEUe6Swj3/RR9DbH31zwV3/0fc5mMfP1KZ8cPgVDYmvUpyO0SM2I0+MZoqChSCMqeY7Vz3nx6oDL2ZQP3xsze5Uh7xj0LAGjNWLhu9wamHQ6HY4Pl9y5+ZAky6hLneX6AtuWkQUHEYlRbw9vecLl0RmKqmPQ5fD4gs54gOsuoSzpOZtU5HT0OfQsbm2/w2Tic/dtifBcYTC4Q5p5XJzNef/t79LvjXDaIu46pJvMyD2Bfq9PUqXUnZBa2SRTRdSeRhJUlJLA0fkLnN6b1KVEEJWkWYSixGhVD002KLIzen2BPFljaC0oNC6OL7hz/wEFU7I8pN/toWsJeV6AINBut9keOBwcHNDrd4iiCFMzEWuBQb9PXRfUVdqsNwuRGh1JhsvLlwBcHs/xlZTBYEBWrFEUHaNyiBIoa5kgXHHzxm3m0ymddo8gnGHZKknqEfg5na6FLDRezNdtNJqq4q3dxl8sCPjeohE/qoaiy0wuLmm1bCRZ4NHDh2yMTba3tlivvAZ3k+e4VxO6ly9fMp+HVyveHgIShV1SFBVhEl9VNDYsx77av2r9iQhOLlGV9TU0nKqi02mhahJIV0EXscKwZMIgvw72FEXTavO6i9vQLXTNbBLpskRdQhJn1JSIkkRZV1RFjaroFEJxfU58zbt83azzGmtkXvV+G4aGu1w1r5uuUlUZsgi6omLpzTRWlmQKzUAQsgaVVGSMx2MkSSIIAoSrAFK318N1m8arqqooqgK1r1LmBbIgMp1MUGSFxXpFp9NB0wySMGIwahF4LtoV1uh1ittbx1ycvUJRFExToz9oc/fuXWyraVI6OjrBXYcUec3pyZQvP3+OKEogSPQHHfZubLG3d4t337ORZQlZblqITo7nnBw/5+nTb0iSnLKokWSBXq/LeDykN9C4++vvoWoG62jNYrpivQrw04Spm1zXaEINQkktVAiVQJUl5FGCqsoUVYVQ1pydnf1rMPyqqjD1xn9rGi1kVbr+XhP0SUgjlePwkiSN2doKoFavOa6KopCXAWINYVRQ5grvvfcOUVjw0cc/R9dVDCVj0JZZuz5KZbJez9iqU8oiJ6vL5iLKmxJpKZ2+w6jT5eT0kqqCJ4cX6EqLiiWuJzCdr9ndeYf58o/Jiozz02OMDiiiyc1bFrPZKV1nB1PvM1+esfZCxqMhljXg5NTDMDTeeOMel5cXKLKO1tbwA4/F1G9A93FOZOlYdpfJct1QBpQSs70iTWxkWrz17oCL80tmywWem5KlLUbVHmF6Rpn7mJZJGCUcHfkYZo+CCLnscvz1OXFR0dVbqCVsb2xjjXvUYcCf/OIbek6X4ycvqWuDzpbJ8jzFsh2E2ufVwTnTy5C79x5w884uiiZwebFGVw1ePD+m3TFYuy12ttsomkheurjeJYfPI27c6CMWI26/+QhLU6kth739Hn/2h79gd/8mdx9u8OLwJa32bfz0hOB4yJ3bDxgM25T3LM7NM8bbFj//2VNsu8cHH36H+epLTg5PCIKQrXEfqRjT20w4mx3RNsfcf3iLFy9PmJ4UbO4o3B3v8rMfP0XdstjcvMWz54e0jBvcurvFYjkhCROytOLGjbuYjsZ0ntDpbDCZXKDLV3W0UoFMG6qM3rBC1SMujwNGW7vEIWxv9Gj3an78L3+B4zg8uP8Gk8s1Z7OndNsbCIJImsZsbQ/46BcTBLHGWtns7OxQZBJL/yt05Rbj3hucnj9nf+82pSZitXIKV+TW5pDFNAVBohYSygIM2aLbz5hOn+HYKr3uiDiO2Nt991cWlL/yyvt3/sZ+bZgiAjKzScT+nT6CUEOtUBZKMy1o9/jy66cIkkiS+riui6obqJpCEbnYbQM/EoiygLJKMe0OAJPLhI4o8OCdTeqqxen5ObIRUqPSHekU9ZzxlsHpixRFFhm0xnR7DoPOA2Q148tvvqa/IVJnLQbdAVmSMZlc8vTpM9ariN3dbXSjw2efHjPe6pMkabN2UyriNGG4abJepbiTiJZhM94eQFmQhSta/TbPTya0OwZ+WDDqt6/YjBXD4RAvKUiSgEiOsUODreGYI/eMUmrSUu2uynqZ8lu/+RBTGiNVEmglQXSJUK0xnCFnlxV1mlLUU56eeNzaHpOXBk8Ov2Gvp5KLPYI4IvYr9va2WC4vCYMYXeuSZhk1OZolI+YGi8mE+3du8/zpBWZHwDJ7HBy+ZLw1RNEF/DCmqgRMTSOLKxTFYO1OaW8N0OQRs8kBYp0w6nbx1iH7uzv4ocmzg0/YvCuy9tuUcc6Dh33SecX9N3b5yR8cM9xs8ca9Wxy+OuP46CWVriDoKWJSUWoGipZTJSJ2u01aZBRViKEKREHAaHOILhZ4bkp7o0uSeOiSjmPtkQQBktoimi8RlSYIIUt6w1QUUhAKRATyOmfcaeO6Bf3xBrook0YpdkfB0ESyXEURajTZwksm3Lj5JppaMJmeE5c1bXXM0eErbu7vcPvOfdbeCY+/OUTSMqIo4fnjVzy88yanQcginKHKGbnn8Nbd22SEfP7qczbHt7g4mVPXCb3WCEWQiaMFm6ObaIKI1VI4P19hdwzCaIlYdNgc3eDi8hRd1zk+e8aHH36I+Pkx/84nLkVZoCgy/4eHCl9pTX2h4zj4vo8gSFhG4w+zDaepIExTHKdNmhXU5AhiRRJWhH6F3daQlRpJUVi7c7qdISAjSgVr16dttmjZG0SxT5ItEAQJxxogCBKaLlHmDY9SkVTCMKbVaiGJSvM3rmq0u53rIMtyvkBVFcbDIePxmG63fQ2jVlUF27axbOOqnSa/Cs3FeK7P2g8a5iUiq9UKSW0mS1UtXae2BaGmqpuObWjwR/bVZK5ZP5uIooium/hh0HgiqyYdDlw32NR1fb3iBq5DSXVdIyKRF2lTzaepUNVXQueXN11vIOOvQ1ANpzO+SmZbJElE27bQNQXDaFb/hqYhiiKL1ZokSQjDGE018IKG2eh5zRo7yTMM07xGO71mTuZZRn4FqJdVBdM0cV2XIstxHOc6NZ6mKWEYYhoGUGFqOnEYURUJnW77GhNU5CVJUuCufaIogVrCaZk4LQNNkxiOOrTaFrIsEoQeUSAQhSmz6QrXDa6Zmf1+j1bLpjcwaLfb9PvdBs5fVfhezOXllMnllOU8J44CdMOgM3BwnDZbm9vMXJcnhz6alrP2g6vJZHYt3KvI5fCbLxFV7SoIZfPtb3+bWrwS8VehqzLnmktaVDlZ9stKT0EQsGwDXW8ugvKssaUEQcB6vW5A8XrEapGThjnUOU5HQCj7/O2//W/S6oK7WPLNk5/R7W4xD2dQLdg7q/j3noiUdQ61yP/+zYrZjkGrKxIHCtQaZ6dTNB2G/QHdQZujowU1DWpp6X1GmYpU8Qb9DZW93Zt89PHPmzCaILN/8xai2ITtNjaHuOuAMMiR9YTRaIvVaoHnBeSJSpJPSKIcs9UmSzxUs00WubR7fVYzF6fXgUIn8mOSdMbmVofZpYrREpC0OYtVRdsZoeg18+WiEfBlRp6AYVXoeo9hZ4toOScMK8b7bTRJ5vB4glsnvDq4QDIldocmaqGTVjZhcsnYHLK3a4BYsrvxNkUZ8+z5K+IiJI4NHr67xfODZ4yHLebzCULtYFhQpUojBnOBIl+yd3PMV5/O+dF3/iZ+/hVaS6ZMO9y4OWDQ7fDNi6948dzlr/z1b3Pw/JLV7BxNUfgLv/E7xNmaFwfP+fybP6NlP2S0o1LkK1pmj8vTGdQKqqVyeX5Jy7awrWbepsgmvUGfk5MzuiOVyWGAmGncu72FYklIss1wtMVsfsbx6QnttkNZKNQVtDtmQ/3IKlarGbKm0h5sEMTn9Byt0RKVxco9p20ZFFFCVqR0W/sMhm1eHb1kNO6SRDF1DYqmcHF2ynDUZX6ZQqXzxsMu7kLj1s09SjHDD2d8881XDHp71FRU+Bjibd542OeTz75CV03SzEOWLIp6htPqc3aU8Zs/+hu8+bbD4ctTNEPnX/zBF6zCz/FWFf/H/+C/+fNdebd6fU6OFnR7KoqZc3A4Q1El2l2RWzvvs7t7g1989C9R9AxRKtHNmroyCAOZsChRNZHZeo4sjgm9lO6gjVAJpOkKW7UwnZLJZYyqNH84mpyzXPpEuc56mXFyGHPv7i7vPvo+X332giyt+OzLTymqkFu39zCMPnq34vNfHJJlaxxb5IP3PuDLz19xcTKhM8z5zb/4Nqt1wosXr7AsC++qNcCbZ3jegp2NIbrkQCExn59yZ2+Pg5Ozpt+3ahHlHmnpIusZZd7i8OUltAoGHQVNVajigsXqnI5to8gWit0COebXf+ct/uynB0ipy5vvGIjCgrJyiFYqqmrhrV4QVgEbpoVSiTy+OOPW6Dabxl1yTlEVhdNXc7aGd1i6GUUhN1Mm3USoE/IyIo40tBq6wyFutEZUBBRNRhDLxvda6qxnl/RHQxRNxXMDXC/izbfu4qcxYZCSeBfk7hp9oJOEAX6cYHR7tDsdjs9kygBabQtlc06iL3n03l1+9nufYWUqf+e/92/x//pP/ilvPOxg9e7z1TcvcDCoOyJe7NK2ehQ1nDx7xf33bjJbJNh6F13KMFUJfBOn61JECTI9enYfMS5QDZXFfEJSJFiGjr/2CMMaRTYRBEiSgF6vi1hbrFcRw8EWp8eXtGwb09SZLgKCScqjt35Ey9ZJuSTNKi7WEzp2C9PcYPrqGE95xtbNHfIy5ecffcT21oAb23tM5k9xLIM7H27jxyl3Orv435wSxiW6CIfnS5bBE5y+xenJHBkdTa9ZLqe0rR623YQ8kiIjnM8xrTYCCmlYcf/ePlVRMhy1idJLvve9d/G9mE2nhSC4SCIUec7meJe4K10BwhUMvUVZhiyWl1SliCTJhLGPIutkRYpmSNSlzHIRMOwO6Tg5brQGSWE2C+j3e5RkqIrI2psgKxplHRCna6o6I048hoMtqjpFFOH4eE630+Bc2k4HS9fI4oThsEHIxHGMqqvIYoPb0VWVKIqYzGZUVcU333xDmcsNLzFJEBWZVssGocJQNXq9Dn4U4Hk+YRhTVtDr9ZBU5Ro+XuZZs+oUBCRJJklKDMduerSzjCAKUWXlquGmwLZbSKKGKGTESUJZptfcxrIscZxGhDe4IxFFkahqrvmZAlAnJeUVr7NpqrGvvKHNLUmS64rGPM8py5J2u40kCdep8Ha7WREPBgNs20ZVqiu+pEaUZmRpQbvdxvP8BvIuaYRhzHQxJ/tXethf1zyGQkUlVKim0qzFKbG7DqqiNyIsrXB9D103GY02qOuSxXx69RxT1m7FejVBURS2trYYDR1sW0dRRRAKJpcrVss1J2czJFHlchKyXi8xLYN222HQs9nZ3WY0bjciXJNZLV1mswVnFy85PpNRJfUaT9Xvd2l3LCzb4te+8z6qXpIlOZ9//hRBlYlCn88+/5SwyEEZN1Phq871uqyoBQFJVIjSGGoRRZLxk6au0/d94jhE13VKy2oEu66iqOLVe61CkewmlBMUTSCxyFBVkCQR29HRTIPBRp+aLS4uLpBqBel22YQ2RANFFVEVg48//ZfopsN2/yaK3Kc96DFzp9S5iKw2qCtJUimLpLF9CC3Oj6Ykgc54PODmVpdOXyauTpjOV0hqjFTbvP3uLrpt8+M//IRQKDh6NaHd6lJXIien5zx8eB9JFvG9gF53xIsXL9neHnP4asbmVp+L05Ag9VG1ikIIKSqFbq+DnyWkcUa7P6YMKwTBoKjXdEYVqSciyCVWMUbXOujWhLzIMawtwugZojzDFC100yHLU5Ik4e7t23z55Vfc2L2N4sjooo5XzFiHOocHX6LKbcRWzmhQIKg6SSDQardZuxfoSpvRdp/PHh+Qui4/+F/8XbZuFjx4a8DXX53zxZdnnJ8ucBwbSRFJEhHqDM1UiGODB/ffZ+0ecHw6QXMcfuevvUm4ntPptbm4fEkSRzg9jYOnL7lxZ8jdBzUnZ8f0x20kYYhjbBHEF/z8Z08Zbevcuvkea3+OqjgcvXDZ+eAR6p5Flq05nh1jGm22NzeQ6pz5YkpehhyfZjg9lSDMafW6KFLCx88/Z3t/jK4NMToq66VPEhfkWYAoVhy+OuDRw/cxTZ28dukNB8znU/xwiiDIPP76M1AuEJUdsqjFyGzTHqhML3JCP0JSffJUpspt0nRNr9dnsLmDokskrsCbbylMTsGd6JgmXJ5PyISInZ0dHj74NQw758d/8jPee/dDprMTfvwnJ+zsO7jrBbKkslqs2b99m/Xa43vfu4+mH/P7//yU588ucNo255NLTKfm9q03f1WZ+KtPKP/i375dC4KAt66oyegNNPJEp6o93rz/LTq2xXIZMZnMkOWU6fQA024hqW1eHB6i6zo3bu6wnickUYx0BU9rdSRCNybLFaLYw9a7qKpMe2Agyw7z+Yy1P2V7Y5OdvS5pFENlc3l5hiC7vPngHQK/ZOUeMRp1uDheUJUyti3Tam2iaAaff/Y19x/tcHas0B6mZJnH/BKC0KPT6RCEMVG8Ync4ZLX0kGyLNAsgqtHMLmq/jWXKPDl4jGMKbPe2iPyUpRsS5zW+d0lrvM3OqMP56RLDqehZfbJIZrihYYgiiqnyxhsWh8dHBJ6BIJbIoohYmaR5iTbUefrx18j9hEreovISqDIu5yVCLiLIIWlYce/uG6xWPmG6ZNi/yXw1Ia8DHL1DESYYHYfYC6mJKQWFlqlSaRrJPEFWRSzHhlrCdVes/YDR5k3C1CNXauRKp1VVtEddposLOt0Ntlotvnp5QGGGaLnMzb0Oqn1JnY+QQ5HnX9SMd3Ru3bvB+ZnH+eQb7tx7ixdfvaCSKpa1j5jV9Fs6iqghKSZrLwUxQFcrDL3LxfIJhrKF3K7pOi0UzSBaRmiVQSmICFXMbN0gRzw/RhJ1JKnhNsoSaKpBkYpoWoao2OQZtHSDTl9nsV6g1n0ePfgO7z16nx//4v/D2XKG1XEY93tkYUZ7IHBw+JI39j7k+OSA/miXKCgR64S8XHM+X6IILeyhTbxYMF+EGB0Lxyo4OVqxNWwT5yrHRxd0+zq14LO98ZCXz8/Z3+/grX2oFSyrmfyIQoxQtVHFHoghiipyevaSJM7Y2trj7dLmr/7jbxAEyIucf/zX7vLSsTg8eUKv3+LyYs5iscIwJYbDIXleUsoFWQxhkHBzdw/HarFauQ2M3F+i6TqmY+O5TWuLrDQ92lkes7e3x8XpCe464saNG4RRgKZoBGGIadq4rsv29i43d2+zmnm46whdN7FtG93SOT8/J8sSxuNN+v1+0zrUHVBVNfP5shFwdrsRfaqKqqpkWdaEROKQ9XpNu+M07TUlFHUz/ZNlmTBuep5tywCaytOqAst0mhOY0PSUu+uAVtumyLJmUlXl2LZNdsXlFMVm0qXrTef5Ndg9y66A701Vo6abVFdrcWhg7xUlaZyhadp12xCArChXNXL1dRBJv2oOms1mzbSwLNja2rrmURZ5fA1bb7U6hGGI7/uYpnn9+yn61eTiqo98uVxe1SWKiJKKrusEnneFXKqRFJk8K5FlFUMR6I+G+GFwxfQssUwdRRQQAPlqahiGPl4Y4Lkh7jpGlTSoZay2jmFomKbOaGPIZDK5goGXLBdr0ihB15tpaVFmGIaGJDW2DFmWqWvhWuQXWY7neVf8UKk5bo5Ju2XS6/VoDUyGgzF5kvN7f/THFNIYoUpIs6oJ5OQuVSmgqRZnR18QTpeoRrOd2Nvb4Y0H93FdF2gmsoIgIIlcsUdLWq1WA2l3OgiChCjKSEaJIIjkWUmel7jL5v6v60NHYx1FsHny9FNa/RTfLej3b9Pv94nSczwvY+GdIKkVVSrQ7414VMj8z//MQ9N0oOQ/fEvg07JENSV6/RFpUnJ5scJpi+h6hSJ18NwL6rLpQH/z4R08f4Ftm3jrkGdPz3njwR5Z4TOfZGxtbVGWLopsc3F5TLvd5fDVCXfvvUWWiCTZGqdXUdUZRVURuhn7t+/z7PETNjY28PyG92oaDqg5RSpRlxl5rLO3tcdsNaMkxrBVXP8c35PJSg9ZLSnLnDyRaFubxNmEdZywf+cm8UJkd6PP88OX9EZjnr94it016XcVwihBk7uM+h3Oz84o/BbbuwYL1+fy/JA3bn+b3/6t71FUp5haj4fv3uP/+4/+iNPJMZZtkIdjbt83+dmffUrXvsnf/js/4PmLj5EkAX+1oMz7/Pp3PuDLx59SSy3eefsWh08O8dyIu2+8yc03uxycnHJx5qFi8/0fPOSnv/h9Br2HfP34E7JMYLRpIUopRSSiSiaWVXJxGrO9t4lAhibDeh6hSA4HJ89QTBXVqAlDhbt3R5imwacfHXL7UYfz4zmxl7Ez3iPNKu7c2uf88pA8T5lMfHZ3d0mKZuq+Wj5mGYh86/3vcfp4imF5rEKBzf4DDPWCINHQzBBVHnB+fkxeZBi6TVnWiHJAoYjsDB+gKQHffPmSH/7wR8hKzX/5X/xjykzgwx98iO+HvPFgm9k0wFun3Lq9Rylc8otffM325h3SKOTtt97gT378cyLPYbzRYnNLo9M2eXLwGET4+qtTbt99hKxUbIz2+Xt/49//8+VQ/sW/tVdTqzhOm/OzCe2uhL8OMA0NWVLp9Nq0Wjf5+pvPuHt3jFQqHB4fIRoiktpiOV1x++Y+F2fHCIJAFJZoeockDVC0HIQGeSFXOrIWEa5Vuj2H/mCDUnQhEXn2/Ev2bmyxs32Tolrx1ZcHqFqNrg65cdtgcp4yn6wYbysEK4fBlo0omVhtGaHqMJ9HGC0XUSrxFgJVnbNcTyhKCZEWZ4eHDLfb5EaBuw7Z7W2AJLGIchSaVpK0nGNlInJlILUl6rwmDApUqVn3xcSoWpt+SyUNK1Q9Z9geYtgdXPeYrb1d5vM5UTHFaqXUSUWVbJGlAUEaMs1LpKCmbXaItZLwUiGNE0TDQBBjdja2ef7kDEld07I3CIIIP1gy2uwg5RpJHVMmMVVVEaNhWilR6dDRZEyjTbetc3G6wDRF3GCBqe+SVD66DmUIhigxvrPP4uiA3ZtvcHD0hJPVCW/cf4fEDzHbEzZvfsD55wvS9YyW0eIiyNhzetz+7i3+9OMDhqZBFhW8mh2y0W4hlB3QIalPKQWRy9kaXS0ZtbsEcUVU1cRxSs9pM9rTKJKI06MFd/b3iWYBrrvAsMcsl3Nsp4u7jlAUCctWCYOAupLomCZJUdEdbXN5cUDhr3nj3n38PMZfJQz6Y25t3mE8sjk4fUkhRtjWTSS55nL9Obe3HrGeBeTlguHOHb56+oQoWCErY/wywELDzwrS9IKetU1ZuHR7FtPLmI4pMur3KCqRrIw5vzgEwaDbblOWOcHK4fbdLbxggrf2aVtdHLPLZHpGf9BCEmz8MGBnd4wfLDCeTPh3P29wPGWZ8x//wORV20KW4WI6w7a6uP6KIFyzvbVLmogsgkskQUZXTQzVoN2xOTk5pNPbJIs1NLWp5PPiCYbeIgqbdWK31SdOVlR1CkJJVSqslh6WqVELoKk2pqmiqlDkMOxuYBpd5rNlg96hIAga0dDp9EjijOFwTFXWtNt9qEXiOKGsm27grKhI05xOr4tlmNfYn07XJvAjsiyj3x+S5tl1y04ch2iy3TTKFA20XJbl63VmWZZIcjMVTKIQVWsaezTVQJDEK4EhICBdMy3TNIW64UVKNOfA18lyXTcRqoZFKSoiZV1Rl7+srLw+eQrClQjVr1PcpmFc+zQFQaCuBCI/wLIsVFUju+JI1nVNegV1byDtzdo8yyPiLEVRm7Vsg0SycWybuhLQlRLL0Llz5w5lXrBarLi4nJJlBWdnZ2RlE2rKqxLLaoQbVXYltAqiwKfb7bJ/Z/+XgaIgoa4FDl8ccj45JwxiEKVGtEsivX6H1WpBmmfYpkMcpddw+SzLyLKMJElodxySJABAFOWmLegKFv+aaeoGPoooIYkqpVhT5M0kslIVgtxCwiXNaqqiRqh86kpG1W0Onv4ZlZ9TUiKKTRhqf3+fIAopyxpZlK7S+wWGoV3VehZXGKkcVdWxrRaKaqLrTViuQSwVOC0T6hLXXUFRcn5+yjvv3ufk/Gu++fKcBw/vE0ZrrI6ILDr4YYQownxxhuLI3ItM/v3PBeqqQJYV/i8fSHxapyBVjMZtgiAhWmlkeYigZmhKiyLW0a0FuuZw/GpBJaRohsOb9+6SFwmDwRbHJy9548ENlrOMlwcntDoFSQRZomE4BSvPQxBFqFV2dkes3EsERSaMQ0btIWGqotQxUS6QBI3VRlF1qDWkuiJNIqIwQ0DF6YpEkYysZiiySZZDVk4wTZMXj5fsbo9w10s6nQp95EAxYG+nw1fPPkOq9hDKFXVW0xkMWc9DnJ7Kxk6fw+enSEUBkoXUjTh6OqEqCnr2Bg/uD7B1hXZvSGeocXJxyU9/9oTb+2/z4bcf8eOf/CGSkrPZf5eNTYgjl2FvRFCmpMsYx1AwN7YZDlPOn0zwY4Gh3UVtaZzNfAyzi2EUlEVMErW482aXV0cvUBSJ8WaPF09eMOpuoEotguicurTw3BTdqtH1LSQtZTo7QZOHdLsW08kCL/DQ9YKOtcfNvX3i7ICiLHnxfMH3f/RrLM5D5qtXIGRUpYggynhujNVyiJIFdWmyd3+Dw+cTdtt3OD79lIm7ZHtzi/t7e7w8mlKyxml1iJIpgV9we/9NfvHRn9Ef6lyuC966f4s8ThkNhsh6ytOvl3z44UNOjp8ShCqT6SWddpf+oMOrl3Pefu8+a3dCGsv0ByLRUmFnt8WLw2/Q9S1aHQt3UfKXf/vbvDh+zj//7z6hMywRZYv+sE/gBfwv/yf/tz/flbfoqYiGCGVBt22zXkxo6QPqOkaWZTx3ycZ4H1VVieIFbXtIUVS0GSAL0N7aJHDnBEGIakgUUk0STanqFMPaIIl9KkFssBiZQ7IOuKxm1FZNnUtEgc/g1jZBofDx10/Y6MmMey1MbYQX+szORdq2yU5/k8PjGV6cEF6sqHOfJJpw+/4tRKVmOReoSon2MGNyEQAmugo7Ywtd2yYXBfS6xuk6LFOfWpVRRQ011kFaIUcSrp8w3nAQcomSFKkN62UMQoWuSgT+EZawTxIm6KrKZLHEfXbK+99+wPmFy9JdYTsO6bqDJomspxmttsBi4pG6Cq1xj9UkRHMcHNnHaXVYJj6iZjJZe5RSTFmrrHKf7qhDUSYs5xG6UWKbBvN1zng0IL5co7b7LFaX2KNtSilmNQ8hKxBbXQQ5Yz4/wXYcnF6LWbDAcUZ89ckXqGZKevqEQsrY2LDJpQWyI5IFOv/df/0HdLoWv/ntN3GXAn/pw9/i93//D/jsZ5/T0greuHeLL758ye7GBm8+vEtdqTz++AmW3mFVnbC5Y9B1NhCrGmGd88N3P+Tzj/6UeTglOelgDtsMe6Ajs9IjpFQhSUDVDMq8RpRqRNWilktWXkzLaLMuEzYH28RLF13QSC2LS89HUTQUW6KoPD59+TPsiU4Ze3ScEXH4ivsP7rC6HHN+dkaZ5aiWxsHRCUERoTg2py/Oube/jYhIkM5pCR1C/xi7N0DMe9zZV5gvLshik6BOmXseQt2lrTq0HJvp8pRaWXFyPkMWrCZQUesswxrR6hHXJrYpo1YmWVlycHDI99vbCMKcqi5BqHFXCeehy83dW5ShSSUJ9OwNWtoYsSzo2TqisEctBKiaQBgkXJ6X7N94RKttMp1eglCSxjViKVOkHo7ZAkFhHUxJ0xTbMdDEHkEyp2vrhJ6AKAmUdcQySqnr5oOcykCTYyQZZss5nU6LKJwyXV5ydCJx/967vHz5ovHVjfpUZY2KzHw1p9frcfDisEEMVRGz5Qrfd4njmPnyhOFwSLtr4/lLDL1NWQjUYv1L+LpQURUVBSm6XpOlNXleoagldVZBXqIrOpZhUdTFFQxdI89L0iKHqkSRZcqi4Ro2XlQBVZIbW4sAmqGT58190yKnzpvAja6Z1GqNKP3ynNh4G5Wrtp4GBZRn2fVq/XUy22ldHfeiQFcURKluGm40uQm6UGOoTUhHkCUMybpOonc7HXRdxzZ0TNPE0GVatkWaxty5fYNbdzYZD3+I1TKYzWZcnM/Js5LVMuD8bMZi6bFYpHhxSlVBUaokWcTp+WcURc7G5ghJEnAch9HWmNHeGO/KGzlbLBEFmTBKQFBQZJksz5FV6eqYCMhIyKqBrDTCvdsbXfekl2WJZjfC2HVdJEnCabUI4wRZrVELibT2yFUDXR1j4JEHClQFVCWq0ibKA/J0gRBVFHWBqmuEYchut4fT7lyHcYqiEY+CWJLlIZqu0B/0rv2zVVWBUCFKBVGyIE5FTN3i4vSCjdEYQaqoBJGjg+c8efqMJMno9jsMxh0u5ycYlsnRxYRRz2C+9HAch3Z3hCyoOHJIVSUIgnyVTpdp93SWKxfXS5AkAbsr0u3cps5AEGp6wy4ffbRC12x++KO3efr0KZIokuYZgVegSGu8Vc1nnzwmK3wk0UCut5CIcRyD4YaDH31Nu71Nks2ZzzyiuEI2Suq64vhs3nhqteYY9bpdTE0jSH3SJOPGzR0OXi1J65Juz0Qz26y8V3hJSa/T2DvyfEBv0MHfE5ku1ty/PyDYiPCOFXakFraxR1x8ztiMSVwDu9sIUzepGOwOOFydsMDjB2/f5PCrKaLgMB5ltFo2L58d8PRxjGaB2Q3Z8oaUkcfI6XFysmC0PeGdR9/mpz/+Q8TiglpUEROLb737FrP1V3x69AUnZwN+MLyBEoxRK4UHux3GmyNW04jups67338LQaqpiogyM8nlY15+c4wx0PjxH3xFHBV03zNZLc9RjYSzizPEWuHgIOZ739G4uKh5+913OTuaYXZ1tiQbZyYTyQqtocLLZ1/T3tb44ukBw/6A81OXzFtAreBGC9IoYXtjn2+995AnB7/AUEdsb++QRgHzoxXOzQt+8MMf8cU3f8CLZ5/jTiI2tgeEcYyY6qz9M+qsxWq5QKPNzvg+trEgXXdoD2C2XCFJNYYFn31ywb17H2C2D1ksI+bLOYulx71796AWEao2dT3l5KXIh996G7FeMhgPuLw8xey8yZuPdviTH/8C07H5G3/zr/BnH/9TzqcnGG2ZZXj+q8rEX31C+Vt//a1alXNMvcur0+fcfbtL6DnkIfjuOe2+jqJaiFJOVRdMThP6Q43VMiLwFbY3dbKywE1cBNXg9HJOyxnQ0nXSaEFRlWhKD9vKiBYlVaogSDpGRyTN1tTqJkWyRCwTsrBkc7yB4xgkUcRiFnDr4QZZWtPqGaRpzsnJEYJgIcgCYRzRa280PiQjZDab0e3ukCY1dV0SRj6q3GVjr0eUiuSBQJ4uSIQ1cZ6x2R8RTiNSCrrtDooAJ6cvkWuRspAotZo8zSgLEVUZkGQXKEKFrpg4joUoGNQkiFJOuzXg5PQV2ztDqjpnZ3vAn/30c956/xEzd8HkLKY9uEkRLVFMSIMKWelSVjH+2qVt2URRRCYLyKZO7HlsGW3cJCVJQzRdoMqbAMLW9i1WwQI/DMjEhDzV6OkdIjdiHnqMtiwUKSXLBcpUpNd3yLI1deGQVysksUdW+dSUJJVHx27WkEZ7TOUFOFKOZFqMulsIlkiZ6bSdPpqacXr+HEW0mFzO2Ls54HIR0O01v/vmxojjV4dsDvcwrC437pq8ejFlEcyJVxmV3HzY66JMpVV0jC6zSYGqiEwu1zhtE0kxOL14yqC9SVVIiEKOIlvoFiBnlNlViCH36bbGmC0NBRt3fsTGxgZpLrAK1uiyRce2yJSINMnRDIO8kAkTl7LMEUoLQwexEpnGHnLeNNhs39hHqXUW8zmGLrK9OeCjLz9hvp6xORjQMQdMV1OSJGFzdIs4W+FYLcRS5vxsysbWJsPxmCiJmc1dLBUcyyZJQ95MFf7HP1k0huqy5P/x3T7nY4vYz9A0AS9wMU2T0XCTp4+PaTk2vXGXk9OX+MGK7Z0dwjBFElUkSSBMPcRaRJFtyjJF0wQU2SROMjSzgX0bqs5quUSTDFZLH99vaiO7gw4lCfPpiuGwz8sXz3jzzTcIogVlUVEWKns7d5ktjsnKFE0zEdAo8pqNzV1WKxfdEKkFgSjKSJOC4XBMGAes1lNEqcT3V6jSBvfuPiBPCyzLYbWeYZgmqqo33rckRRALBFQU2aAmp6qgrkRkRWguBlSVLC2QZIE4Dq9bZHTdbJo+sgyh/OWFdl6VUDbsyaRorABV1VT2vW7MaTySCnUtXE8dX98Mw0BRJMqK6+9VV01CwPVq+/U6/F/FCv2rq/I0TVHVBlcTRVEDiE+abu/6CoVkqM1q2TadRsxREccxtmkQJyGOY7OxscFo3IisjY0NVFUFaipKjo4PObu84KOfPWtCSUlOnCbXVZ3DwRjXdcmLGKfTbnrgZQnTaHBOWZZR183r+jol/1rIyVehlyzLEK4E3utJsCAI17Wdr0NQaZ6RRDG6rCCoIn4uoSpDsizAnS8RNJUyy6DKQKnIvDXHz58102RFxHVd3nnnHe7du3eNWIIGWSaKMqap43nN5Lsum+MkyzKqJmMYKjVl0/FdV4RBQFFkVFWJF644v5wiqznLRYgpDxj0O1RihCBDmAVkkYDTBs+LME2TbmfI9mzF//rrkpoaURD4jz5o82dpgNVSKWufVquFKrZJ05LdrTajjTGPHz9lPltRVxKqqtLuOKRpzMbGFmHoE/klab4myfxmGixqJIHExtYGs8UrBsMtLmeXdLpdJFmkygx6Aw03nmMYBpcXk6tj2+Cq0jhjMBjw6ugF/W6DWZosXtGyR2iWTJqUKIpK4BeUdcHOTgddgZl7grUR0271efFJylqIGTgD2mYbSzF4fPgC2awQ1zqdoYzvRQi2iSRJhMGUMJTpaBbDzoBCSykDmVQrkBxYTl5y+LnMux/s0R1pTC5nbHX63Nh8i1JcU1QKL1++wlJ7vHn/Lk8Pvua7H3yHg1cfI1Vt/vv/1v+QftumTBP2d4dE4QpMC8e8C2JNkZ3w7MsF56dHTM4PePz4T1hJKjduvYPRzfj686fs797EsWVOLs+olAi5dMhrjf3NMV8efURf3KJScnZ/4z7TP76gkiJESeXtb32Xjz7+AxavXO7du09YrpFEnbPljEG3x/GrE27eskg8me3xm9y7u8UvfnGAamZUZcJoNKLOZT748Db/5J/8HqPhJmHkYRgKi/WETz7/jNs3b3NxsqTb2qTTkwlckU5fYbghEq52KaUJ7irnu9/7DoevPkIWRuiOxnzmI0g+jmUxn7kISHzne+/w0598xc7OFjU5dr/Ni28+wwsj1Fpja/suYRhzcvI5urTLd390lxcHTwGB2XTFf/C/+f/9+U4oLVVGkFTCMKJIRIqgQMpz/EikIGQ1N3jr/Q556XFxXCMUJvPJHFVrkeU+YVhSZX1UucJbhvTUTVQlIasWKIaDJsgUWcJ6VSOrFXmZkgYeVmdMFOmUxSs67U3G4/uIikgYBzw+PqJlarRvdTi5mKGbJuuLGf46QtF9hDpFKMcMRx3qXGYyOyQJcuyWRJq6RJFImha0WhZ5JXB0+Yo4WNI3t0ijGssx2NkcM7lYY1kSl0cLTL1mNg2oUo1b91X6vQpB0ZmvIuxezKsXC05fmiiGR5UbSJKAKLvUpY0gyyxWPopmslwHjIcdvCBk7+Y2Tz87wWjJdHQbschYrZaUbo4m2WDqxFFAGoa0hgOkuubSW5MUJaNWhzSIqGURVTLQLYiDFFGoWKynZFVGVhbEYdFMMRbHyLKC2VFIspJaVqiEiBqZIFyzmK8Ybqi02lt4/hxdNalzGVQLlISWqCGm8HTh89ab91GknHPfpW91cdNXaC2Ljz8+Qbcq7E5CYuR88uoLbt18m3Ugoek9XHfFoDPm8mKOIK84nZRIWKiGhiDWjT9Qd0jzkiKqKFxQVAPXnWNaKkHooxsibatPp90m9H0UUSTJY6JIJIgCBn0dTRMQRYMsr4kufER5ieO0OJ+fItYKAiqSkZGJXc6PXqCbNnkhUZVN48x6vaQUY0xJZywPqSoZP/EYD/fww5wsWVAWULgxF1LTdXz35pA8LfFjEcPpY5gZQl2gaW1Mq42j9Vh7SxbuKYJa0+v1ubG7R+jNUDSdMA44Oj0lzUQUVUYQBeIkRVJ1FL3By+hGjWXLRFFCu9Wn09GJAo8iqem3Rw1earRBHBUkeeMXtFstBFTiIKWsIPZ92u02QdxUA8ZSiCrrqLrKrdt7bGztc3FxRlZGiKJFvxeSBCV3bt8HocSxhmR5zNbWBrPLiLqSaNttalEkCjPyqqCsEmpSJpMVgpqxWnoYehtpnZPEKaJQUlc5l5NzPnj3EecXJ/S6Q+I0QDMaBEq/1zTPUNXIYoVhKqRxiSiJFEWE64Z0u79MFOdFSk0jKkxDodbVJk2elxhKM50ryxJBUjAN/SqAUzW1h4oCiNeiT1VVDEO7bsRpX1kYXt8EQSDJUsqivgaI11fg9yRJrkWmJEmYV4lt4LrC8XVi/fWULc8b3+f1yhyorx4nLXL6dhfd1Gg7LaARsVkSoeoaruvy8qc/py7kaxD67t52YyEY9tA0BVWV+c53PqTT6XB+fkGWFQR+1NgOophev0OWNQl5LwkwFe26ajMMQ5QrlqOmNY9nmiaObXN8fAxAp9NBuvIqQnWdqhcEoVkzaxq+H5LnKbpuUlcZRVEhyyaidNW5LkvUdXV9f0NTca+abURZIktSNKVJ43ueR13XxHF8VcPZUEhApN8fMxxuXve/l2VOGPqQm6iqyPnxBWnqI4kl440+SRogywmdDYcgumBjx8AQdXa2NpjNJlxOJxgtkyBZI7Q1xsMBaRazXk3YLEEQJYS6hlrAcwP0joFpGrheROjXZHLObH6GjMJqdUpZC/RGzvXzL4jRDYnz83NM06TfGxFnNUfHPqqsYtpjDFulJqTdbeF5K3b3hpyfLRiPxzg9nbqoyXyJKskoUqA0MMwaTTMw1E1UrWQ0brOcuXTaGl27iyjm+IscsyXiLSu+851vc+keEq7g3Tce0F7XLOvnVJJPXSu8sT3g8eNDOnd+hCDndActLi7W5Ospwxv3sYuarw4nvPP2A9TSZ7e/gZckFFpKy7Lx4iUHz+fceHOL/VuPaBuH6KpHUdc4PQ3FAktR2Nq5w589+1Mmq5Df/cF3SIuXSKqCaakkrsm//W//D9je1cljj9nFKeOuhWMJfP3Fl5yu/hGff/GExWWOossUsoCmw598+jGPT0J+9zf2+Av/hkVZHeGGPXbubOKFG3Q6Ei9Pz1gvzvB6HW5svMXqdIqi2AizkDN/Sb+n01Z0PvnsJ0zDiIicl6fHJGJGKsZYRZeClIFW8vzLZ+zufQvXP+Ef/VcfY7YctMyg15M5OXvF3dv3+fFPPiLJYiaTkHe/dZvHXx5TZV3euPsu8+kpAiaDsUpVFWSliyR0uDwWUKSUwY7Gep6Tpwobwzc5OX1OlOsMBgMuJz69fQc/TJAllS++OOXuG/dBjplOF1R+j0rUMOyYi4MJg97bfP/7b1IEI4RyzMnFY3K/4nR6Qrt171eVib+6oByPdjh4/Ji3Hjl4FzLrmUh3XOEdrHjn4bc4vJzhuxGnJwuqvEWWCFSCzvbOiEqMUEydLJqyNWzRTXQODmYUigiShpdkCMIaqTQoCplczNEUlc1bPSTBZJVM6HfayGaHySJhOT9jo6fwcO8mC2/JOjyjUgLaHQvv0qLf7yJr+8RJhu9PCFcOUXGBYcs4lkarPWY6W6G3c/wwRbNVhDTm7CRgsz+iyObotsb0MmB5vmDm+dzYHrO9tUFb0+hvWdy916aszqnSnDBfY2ghN/YMjp5lgE9VKchSwmxa0xvrpEGGoArMvSV2x0RTZE7nK2xdI3JDjqcJvdoiSZbs3qhJspSus8n2tsV07lKkGZpjMEtW5FcfRrog4cUhra5JC5XLy0tanRaRkJMWKVZLwV24CGKNKpUoYo6mC3S6GvNogedV9Kwt/ChH1wtUWWW0vYm3zgmSFwgoxFHKqD9AyTWmi0s8RWWrLfNr/TtkLzyWik9u6ixP57zxYIvV8QVSEqBqJYvjJaY5IC9yFkcLfO8MWYKySum0DMoyRCxblIXEKvWIkwBVdzDVLYRaIs1D+t02i5VPGq3JixpRELGtLmWVUJQRk9mEPClR9RLDsPC9kMDN0eqY8War4cKlKR3HYbaeIKtQZUqzvlUjlnEOYY6qmNSVjL8OkfXGy1YjIgrQ0mzEWmymSIJNlPksFgvGwz5JkqBQIUgt2rbGYjbHbjlEZYRtG+iSQ7KOCTOPXmeIn4bkaoWiamRVhbcKoQiQNJkwzgmSlI4qggBFUcKVuFnMPfo9h9DN8Dx47513CYKAqjwhTwveeet95HdV/tP//D+hP7BJ4hhF1VANh9WqYLlyydOSTqdDVjTNGlnZdFv7foBl2c1ER0xxgxm9YQ8/mqGoMrpkUUkWpZwjVm0uj+doVsRoNOLsZE2crHEcB1238IKA4ajDZDbl+cFnIMoYeou6qOl3O/S6fRRF4en8BNtpM53O0A2LJ8+/YHdvGy9MOT+bcu/uW4hCszrOsog8r3Fkg6KskWQRz1tfhUEssixqWnaqFMexMAwNxzawHRNFbKZnvh82pvjFEoA4SUnyhLqqEGQZTW46yUuaIExTY1iR5w14PQgC6rps6hyvboqiIIkKolJT5HkjnEyTJEmum2le94K/9hG+9loWRYHjWI1QEuVr0fa6YvH1Y67Xa/r9PpqmsXTXRPGaKPauhZ0oCWiaRktsIamvuZgCWZYxXywQKgE/CK470AWxptPp0O61sW2bdsdCN+TrlbHnJizXKxRRuu4ML8sSy2zwJ1VVYRgGnuuSZxll0aTUX8PXs7yZziZJhmFI1xPiNG0mnGmaIogCNc3EVzVsJNkiyZoudM1oAltIoAgiVZKQBk0XulBXKIpGEAQYmn6FhmqmyZXA1YVAQZQWWLKBt2qCSYaqMRo3ZRZSndPtWQiKjCi0uTw/I88TZvM5sqowW583bM+6Qu+ILKYrAi9he2uXy8UJsl4giS18P6bdtpBFEakIqaryyqNb0+/3OJUzTo9P6PQcBJTm64MOk8UlGxsjVuslqqZddbzL5BlEeYyuq1SlSFllRGFO2x7i+yEvnl1y59YNBMFElkDXTNxFyN72DgcHr7CdBqcUuQI7O3vcvz3m5dFLHLvDdHZBt1Mjih0URaPVKbi8CGnZOqYpU2cKo5GE513w9fPPURSFX//OjyhjAUnsISdbDFp3uPtv2MTxlLqEnXFOrkJpyNwdvk8lJ0RigNXaJigEonCCLbUI3AjZgCePD9ndtvngvR+g2Qe0rD16Vs1sOad7z2G91kl9AWVY8vTpEX/hh3+fP/3qp/QHNu2+yXzSAk44enXC3/67v8touyCMp2iiimIk/Ef/8f+J2M9x8xVnkyUffPA9CucZoiVQCSb2Rpf3f+cH3Dj/iqz6CV8++Ra/9Rf/Pt7qiNW5C3WJot6la0T0b5u8fPon3L75F3j3Ww8ovD/i6OOfY0t9ZvNN2vsgeAmLswWiBXLX4dlPD/DrNXdu3aOKB7QMAUXdIEpLbt+1mF6IIMYIkoCqbUMt8fTZIUkyQxQV1otTzs/7dHoqrpfSsYcs5sdsbQ/Z3XnIdDrl0ZvvcmO7y0/+9Gf87l/9kP/iv/yHjPsPWbovOH2V0R+qpCUgVARejTsVKTKf0eC7DPYC1qs5jjFgNltw8dE3aB2F+/s3WHY8/OA5n/w0pDXs0bE0tm98j4fvqEiaz+FB8OcvKBerp1CVOJbOhx8+YuIG5LnH/TfeYD65YLgT4gcVptVBlRUu4jkbY5vRWMKwN/n0szWjjR5HlxFuuEbuaHS7Q+oqJwkjvECl3W7TGei8OknRNZH5RURZ+gw6JlIqUoVz0jyGLGDYfsDp4Sm6bSHTxRo2FPqsykESSeKMJG6SpFmSEOUpuiVw69Z7TBenZGLMep6g6ypBFCGmBT27wFAFFrMCkogsK0CSkI2Stefynbc20aoYnYzl/EtMs0ISmzq0bK3wR/805avPCwbjFqYyQlZEBDXGMAzc1QzLbGNZFoZuM5lMyJPmqs1UFd64dYOsDIjjC2RZpBQgKioGezf4+NnvMzCGxEJGIlSkVbPeWy+WOIMWsViQzF10SSVcB3jziE7Xob6CHrveklZLZjld0XFsdE1hp3WbtJWTRTFhLFDWOWmiorShv6lxelKjiAJVGXExu6AyoJAyFl5GxxlxGC7QDIXuZpfJcoa+oXG08FnPY8Zjh0LwyeuMi8klna5DVuRIikwQxFSlhmI4dLZHnB7P0GSNpCgw7S6CVLHyJjiOg2LXpHWMYcu4S5HBeEQQeLh+k16OgwpFiTFMlaKQWQcxspzT65scPD0jSVJkK8Jpt4lyCUNz0EqF1dqjNAUUx8bRbWYvX9Hqj1FVFc+bYEola3+NWAmoJaTpGmkooMkSx6crnK6CJOR4ywzdtMjDlGeHT3h07z5V5rAOXXS7qdwz7C6bm5usIoUwWhNmAbJkYtltyqTA8yLqPMHpDDEsldV6wUYlIismdVVDXSEpMoPeCEMTEU0Hx9hAkVrMpkeMx0NkweTFywO+9cE7bG2PiKIA13WpKNjc3qHX6xPHMb1ej+lkyebGBi8Pn7G1tYnretS1wGhzh/WzT/ATmRs7d/nym0/RFBVdHWMZBtPTkuGoTxBESFrO9s4mcVQiyhq1WBMFNYpgkaUxp2dTDLODqgnEic+9O+8hVBmXl+cookEU+U1QQpNIixBF19AkqKsYq92l1VJ58eILNjf2CKKS9XqFZdnESY2sNFMqSYYsLdF181os6bpOXpXIeQ6YrJYuG6MRum5imjZUNYNel7oWWK5XzJcrfN+nLJvKw9cr6yzLmgpKyyKKGjGmKAplWZKXv8QGpckvA0KNMBSvW3JeTymzrEmHN8KhuF79vp58RlGEZTnXPMw0ybEd63od/noVLooivu+TZTKIOrPFgrqu0RX5CkNkkSQJUZg14Z4sQ9UsRInrxzZlizQuSLKCaukzny1RVAnT1JGk5rkbpkZP7NDptJivlhiG0Ty3tGA4MAijZgXbJI/L67X+69R8UTSgd8exrnmQWdYErOK4EUy1UFHkYFg6kqThJjlF3rCByyJDqiuoK8q8wFtOSQIfRRIoy2bC29gFBPK0YDKbEgQBt+7e+mUtaFVcJ+PLsiZMQl68WGDZBsEy4KvHM2SlptexKWqB2dxlsQwJk5jOhomgqpBDGiecHB7R63dw/QhJEdFEg3XgY5o2y/WK0XAXJcupqxBBFKnrirW/QOi12L95l9nqhKKMmS8Ser1BUz+bxbTabeI4Jo5TQMT3VwAE4RKhsOk9cFAVkzhdYtoGmlpTSitevlyjKibdnkUtiBy/WvLOo+9x604fb9W8ZpezZ7w6OGLvxh5L7xxFUtF1G8/zuDyvsW2Hdj9j/+YmZCrj9wyePvUZb0ecTHLeu/cm1DnHl68Yb+3hHxVMX/l8tfgKxJQbN+8zO1tSSC3y0kDKYro3Ks7nU5zWkKGjU4oyaWTx6NFbIAWMe9sE6xV/+C9+zt/7H/0uX3/8FWcvM7a2+7x4eYKhtvlLP/wLHAdf0tqTSMopN7d22N6S2d5rU2QWSGOSNMSLz/nm8RGBnzOfrvj5Tz/G0Cu67V3e/OAux//1HxJHProFj7884zu/9gghWpOuYiT9HSzdo/ArDr8JiThH09rUVUC5rtnavME3X36J2Wnhro/Q97p0hrt8cFvj9z865uenHc4mC5K5Qh26DPs3SJcBN3tjDk9iDp8ds/VgiDcPqCQZTRX59OcLsjRBkQQuZ+dUhYFttbh5s8tXj4+IXYN2x8D116RhgBuf4cQ32bmxR8fY5+Gjuyz+eMbmTp/5bMru3ohXR4/Z3OpSJTZ/+Xe+y89/8ROefjNjZ2eLshS4c2dAXWZsjvYRyZlfLlgvRIQ6o9Ma03pocTK94HRyToXCeHPEl58/42H7HX720e9hWQLDfouOvc377337VxaUv7KH8h/8g+/Xi2VIUc+4/9Y+CDHzaYwotqlKGl9MGVMhUtYeslDQsjXSBEAEQ+TiOOLkaEWnbyBIEu3WEIQ1klgzuYS7b3S5OF/irQukSsRSTUYjmyQtKcqMNAPZrDC6Knml4PouYThHrhWkvEOt5MhahFDK3L+zQeDmnF666J0E3y/Z2diiSFQOX85IxYTRloEqaEi4RBOFpJzhdHSyXCVJJSgrLMsgEyryIEcXR+xv2gwG5036NJQo84DzZchiYrD2BHpDkygO0G0Jx9zCjV/S6Y6b6UCpIojQ7wxZLteEcYRmGOhm3UzNpBIviWjZDm6yIvBC3KjAtm12+gN0Tebs4pRWt0PgBqzna7ZvbHO5nrDldFkuc5yuxGpR0OtayKpIGCesgzkmJoIio6kytqGz9OOm/k2p0W0FxVSZTyOKIqc9KolWBlke029vksQllbcm10qyOqXXNrn54A6ybrA8ueB8OufeG/t4nsd0siLParo9m4qQNI1RRBVHbdO1bzI9PyMvKky7TS0XJEXOer1m7+YYVTdZzRfEKUCF0xbxQxdDV+nZG4RhzYvnL9FUmXZLZnIxQ9N1qgq8VcJos0WaBdjtHnna9BdXeUyRRFhOB9O08RcRg6FIKkTkUo/N0ZgsCMnrBo7seWtyIWXp+ZBD7UdUHtz/3gfIssxyueLCP0CqQK2G6I7BwGn9/1n7kx9p8jzND/vYvpvvS+wR75pvZlZmVXVVr9PDaQ7ZJMgZASL1Bwg66SJAc9FBgggKkgBtkC46CRIggAAhQTMnCpoRRHG6e6arq7q6MiuzMt99id33zfbddLB4o3tuJWAc8ENEuIebm5vb77Hn+yy8m79j6HTZGxxxdXtFreTstlu6TpuqTpENhzDYkGcBrnlMrzfEcgRubxYItUjL7aJpGq/e/JonccH/9JVOkZeoqsL/6fddzv7b/wH/7P/2z9nfGyKIIpqu4Ec+YiWjqDmaZRInPoaqMJ+vOTzZZ72ZgSDx8MGn/Nlf/CWPH54SBhnbTcDefo+b22tUxSArKrrDLs9ffM2Dk0/IM5m6ygl3S05PH6FJOlEUYlt9VCNBUmK+/vodrVYP1QDP8+i1B/h+jN2SCVMPKpM8TymrBF1zGLT28f0AEDl7eMpf/MWfc3R6wNZf4QVbiqzCNFyqQmDQ7fDh/B1f/OgLdl7AbudjGAaPHvwAz/NYrqacHj0hjUVUVSeOY1odl6qoydKCLE6QZIGWYzdxO5KEbdsYmoKmqI12sGoyJn2/ya9Mq4Llco2u6//GqLpxCMvEcYwo/a0BB/5WNykIAsXdWJw7YPp3GUlBEO4BVQMSZTzPwzRNwjAkDGMGg0Hj9k6aqr6PIKyJ/qlxnLuYJGR837vXZCJUfwtkEahFAUkSQBQIwxChru763pvRr1j9LRtpGQ2D2us1rHEcxxRlhqKq2LbLxtsRBFHDnIrNe6/q4t5w8xHIKopCmqbEcUySpvf6yo/60I+AOk1T8iwhTHIURULXJKII4koFoUAoII5DREEgTmNkanaTG1bLKYgCQg01InEc8+WPf0S73UYQamqBexZYt2zabRff94mSkJOjYwSxJotjNFXmw4cPRJFHLaWIQkFZ5li2QRiGbHc7XGeEJIG3W3F69IA8TchLjzBLyEuLPBew3JJ2q0eeZcRRyidRwT/5Jrtz9cP/4lHG7nTMoH/AenuLojRNPUEQUpYqslJRlClJEtDudZEFGcdpsdvt0NUWy/mKp4+eEEURF1cvefjoByTJhOVyzcH4E65uX9JpD7GMLlG8YL0oaLldFstbHjzYR1EhL2pabZP1ZsftzYrTh8fMph6trsHav0aoDUb9AaPOEVVZECcppXZNRJfPTocoqLy5foMgOFiomHZMnOpotsZiMcPSDQ76Xb755h29ziNOH2j82a/+nHANP/7yU169ueZP/sP/DkK14Jd/9TOKTOHzpz9lMnnH6ScDfvHzrzgaPyIj4GqywNQtDvoW44dPKdJruvYhrtPjdv6OR48/5/L8nCRe0+7qBFsF2+pxcfkWq5Nx/mGKZbZotwa0Rx3iTcH7D6+RDZNuz0UoZLz0HbPtLUo5IggTTk+HnB3tsbjx2ay2PHnyiJX/msPjP0XwVnwz+TPU1KKuRD47OmSv/Jr/z0uRuG1QVwq64RL5G2yxxS4MWXshlqxSFAlyWRB7MfuPDsjFhGADz7444cWraz797DEXF1fkqczRYZe89BBp1i3T1ImClO1ujmJkpL7No4f79Acd1suY8d5DdrMbWv1mKjKf5pw+7LNabpupRO6zXcHZ6RO6fShrj4t3EZ2+wGIi0B3KOO6IdxcX+CRoYkmSz4gmBnklMdjTGbVkBFRuZ9eIgoSm9FmtVvyv//N/9ltpKMXfFnnGSUG3BycH+4ilyWrro7gZB2cpmra5S4NPiIs5BRFe7LHb5Lz5RkItDzElk9ODLv/Jf/QnjK0ej4aPWVxs0Kou496YByc208kNkqhAqZCmOV/+0QGz4JZImFCPdojdjIPjEevJksWHBVqq4Yhdsp2ILGeEXoi3TcmSHH9XIksmSb4kCBN+8OQL9PKQ6dUNf/qP9zk8SdksCpa3a5RSpRTg4cHfoy7HaK6O5ajsDQ4R0LEUm67dRzMKbqYrLs67vHxr89Wv4Wc/q7i91giqiFrbUEopiqYBFVbXx3ZdZLOkKAXKWiKJEqqi4Oz4BFWW8DyPq8kl76M5N4lPqKhMvS1eEiErIv1eB9syUBwTzwsYtAYoyIShT3/UIU9ihm6PjRfRHQwRNQVJk7ldTUjKFMUwcDsDJFGnFiCNYbkIGQ5dHFdkb7hHnkpML+c4hsL+YB9/oqCrObamEfkpWVwQVTFP9h/Rax2gMkLe2Zy/vEWTTB4NB4ixzuZ2h64qaLrIZrcjTVTa7SOCJEfSXaIyYJftEIyCTF7gpxtkSefTz46RK53NOqKIUvKsIokL1rMNhtHD2+YIlcBiNgcxQ9MUqgJcc4hluJiGhiSrjfGk7kBlcPbZAKMrEqUCg9EeVV6wmM6QnZxULDkc9vkPfvgJqze33C5mrGYT5rMJIJKmOUlckhUiw6NDOifHhGnCi9+8bOJjKh1JsWm1DMLUI8g9FL2H3Rpit1qMj/voZsnBXhe31WIXh9xOl1AriLWNoevc3l5xcXFBFHkomswuWJAkCV/+4IfYhk1d1ciSiCSJd6PImO7Q4eWbb5ivVsR5gWFLaI7Mq7dXXM+uQKyRVIVarGnZLVyzR5YUfP3VX3O0N+LD+RtEscl4LKuC8bjfZL46Fpv1kqODY2azGb7vM+jvUwuQZCGKZoFc4HZ11tsAzy8ZjHtoukmNzmDcI0kywmRDXpXUtcJ6u2wW+lwAoSSpQtIqRlbB9z36/QGKqOGvIjTRptNqoykqRZqRRCkPHz7E3244//CGMNyxWk558+Y3vHv3G4o8YDK7AjkiSpfopsRutyJJAgSxpj/uU9QViBJeEDJbLLmZTHl/fs2Hi0sW6xWbu7iioiiohY8tPjKWbSNKEtmdU1mSpLvxrXUPGD/eVLXRBQpCM3b+yOZ9BJ2e5xGGTUd4kxlZNSC7LLHtxuxiGAbdbpcwbHSCmt4Eln/UXApCja7rhGFIXhQkaYgogaY3i0otVBRFhiQJSKqEJNVARRQ0I35JUSkKsMwOkmggKiK1KKDoGoLUdJev11uWyzXr9ZaqhN3O5/b2tql+FOqGVaSkqgvqIieNQpKkyQdN05g0bVqcdF3FMnUkEfIsIQp9pLtd9lHXqesqjuWga9KdjhQ0VabMK0ShQtUUBAEqQNLke3a2FhqjoSDWaLqEpgiUVUpRZqiKRLvl0L9zdW82K2pyotjnN999xeX5O5LU4+3bl6y3VyhayWDoEkUBsiwznazJc5GskPG8HcvNGkSJ69sFUaJQ1CayYmCaJm67Afa3t7csF2uESiZLgFqkqqCqgUpiuVxzfXPJdLImiSTSuHH8J/kMQUrY7TYkWY0sOuSlyPmHC+bzKVFY8smzJ5RlwWazZjg+5PLmA0mZEyUZYbzEddqUpYfvzxgNx0DBZruk3eoThDE7/66OtJIRqHBbJhfvbjA08Lwpe8MRilpRVSKGJfL4wWdkaYAsmJiSyPXFFAGVm4uc92+WzJYhX/3yluOjByjGjmhXIUkKt5M1UR7ipXOCdUW2zckY48cBi+0NP/vX/5KTkxaj0QhRMvjRT084PTyg1+7xyadHnJ08oGscst/p88mjDr/7o58gVwE9+wGynPDy1S/I0zUXb5+j6hVmWyIONFptG1HZ0e5YGEqLp0+eUdUShTDH2/o8evQF44Mue/0DNqstkmyjaibeqkIxbUb7NePOkOVkjR+VPPjsEbPNiiSq+f/++f+Z94sPnFiPCSKJ88WWm/kN//RfJWwKh93OZ3rtk5Qbrm7nfP3ye95OFkzCLbexh32wT9lTqMcy02DLelfgtG18P0BTK968ecfhwRmmlfDm1Q3eLma19JHkgulkjev0sO0hLbeDIFcgWATpnKvbG2zXIKdit8vRTIHVImMXrNls1sSBjGV2cXo1XniB76dQWRwdnhB5Ju1Oh8HQ5Xb6nvl6yS5YMxgN0OUTiiKjP9RJE4hrmdvNmvHpHlGuUykSJb8d6fj/F6Dc69S4tojdVZmtrqnjnGQecnvxAcWK+eHv2Xzy7CmjcZvBqGlH2D91GY0N3HZIulIJVjI389eMHti8n71meFqhaAlvv/MYuC1kDHRD5sGzir//33L49uJv2EhrMiukLJb0TjZspeeoPRFJEZBq2EwCEl9CkGp+50dPaBsdUg9qdDLRp9XucXp4iCooXF99zf6ozc/+7BUPHvb58e+0OdkfIhYWBw86mC2f3/3DLwjTJug2DALEWkalqfErq4RaTdmlJWENZk/HTwp2XoJiSkiahqwaWHaXNBWYTbYUuQWChWn0iJMSxdIJypDr9RWKrSIoEv1+H2mToYYyHamFq9SokoylD9HsGkHNma+WLJYrdluPreejGjpQoevNSGzQ6rGYLmiZFnG4oy5T8jRis9oiVAJ5lZLnKd2+xv5eF3+bU2UCvr/BNG06rS7d1pg0TpClBCHt0+20aHcqFCXnyQ++ZJHFaFbN0dBluboly3OSXOU6TLmdfUDWBfwwoixEdFVDEWQ6Woe+3mU73xJsE/qjPqIpktdNdeDnD48YKS5x5LHZrOjYbfYGRyRxybA9JslKTF0nTxPiIObh6RPypAmFdywTS9OpSpFWv0VdKSilzPFgTOSnrHYe3f0e23SHbOr84LNjbMWhP3iGv5WYvLzm0X6POoeUFL9IeHt9y2rj0XeGmIKJZVkgiXjpnOHYYbO5oqxEZNlivZkiCALT9Ry9lfPh9jlff/8LPly9Iy3jRrQfbDDaJm3L4fT4GWGosfBmSGZKlOQg1AhVflfRtaOuJEzDRBIFEBqTB7XA33z1SyQl4+jkDMPS2AU7dmHMq/cv6A4HBFHIZhtTVjLUIt9+84I8lfnxF39AmTVApd2ykUSBlusShj5BuEOUBM7P36OJOqKYIUvw7OknbHaXDPa6jPdPmc4nhJHH119/jYDKu/dXZGmOrGrc3q4pChU/DBgfjBGxSKKmCacxHAjIosVktmGz85rxeLzFtkUEMk6O9xn1+gRBgCxWyFIzWrf0FleXS0bDI/r9PqfHT5tRsSnhtmzCaMvt9DVROqWsI4oqZDK9JMt91pspCBnT5TVh4mO5FrqlU1Q5QRxQljnzxYTldkEtlcwXtwRRiChL98BOkiT0O6btI6CrawFRlO/vRVVSlo0hR1EUqjy7b8iRZZlOp0Ov17t3OxuGQbvdBrg33nwcgXc6HQSke5NGo+Ns/qbrKggCURTdj9iVuyijJM4QRBlJ1EiTkiItKIrGZd1pNfIj27bxfR9Jkpr4H1VvaiY/5m9mCUkaoBuN0UsQmh7xJEnugHBBVWXM57f3zOioP6DValHXNXmes1wu70LfK7IsaVqBVJWqqsjSmDSJqKuCNA4p8sZ0o0gyUZiQZY3mVJXFplazqhqgXBeESUhW5FSUTb5m3mR37nYbNssFaRqz3a7xvC23N1dkeUAQbvDCJXkRUAsJy9UN0+klcbql3WpaoXxvharXd7KMHQgF3Z6N2bJArOmMLKJyhhfNWG88prMd2+2WzXZOEtfouo1uyGiajKzkSBLId9mcg8ERaaoQBhmdTovLy6Y0II5KtuscSbDptIeNTjbckmUJ+wd7DAYDLEckilfcLl+hOzUVNTv/ht0mQTNMduuYYfcheQYCGucfbgGZLC2ZTdfUhUMWdlgtEv7qX3/Hu9czNksPVTaJwwx/k7CYhezvPURVRa6vzonTJX/4Rz/A0CS6rkJZCPzm+xcoasrjxwccPzjj3/n3/gFBcMnzXwecHH2Ot5hRqhYT38dVRmi6grfOkVoT/vXPvqc7MBkOFJKNjGka/MGffMLrDy958OURstEw5YbU4sGjNk7bpjPoM/cmdOwevV6BrkmY2j4PHxzT7Zq8ffuG1Vbm6OCQ+eyKYCuxNxpTl2BpbQa9AYoM0TzC99b0uz287QWj/oDCuCZX4Qc//AOGvTZ13mK+XIFgcTN5zWZVYrVU9LZNW3+At5zw/PUbDsdn/NEPfodvrq+Ydws6I4M47dLd61D6JUUUkCsipVjT0hWO9/fYrT4wnU5JcxXb6aDKbcpSxdtq9HsHpPmal8+vcGwXQYrx1gWKJOK6bXzfZzKdNnm8yYDj42Pev7vh6mKF2Up59faXqEZFJXr87C9fI1tb3r+dkhb5XTLBlk57yHKzZrGckRY7fvXVb7i8ec+b829ZrmLifMnJ6T4OGkWZcNg/4bMfPqNjqnz+4AGaBLYxZrqYsvAWOEOL4aPebw0of2sNZVHItGwXzQpxn7gstinbZU64cnAEg5UkMl1ekBYbBo5Bv95n/X2CXC9xWl8QpRtamsbaW9GxDvnRpxJJAmkWMz4yuFmu2DvrkmszSjHk29c5olDz7MFjtsuQrBJJQpGqkEjzBGUvZv+wyy6o0X0RSXdZ70LGwx5CXVLkPppcMXANbBxuJu9JipyVtyWJRFaXJnuHFuJ+jK4axCuRVRAyf/sbisDFW64ZD2LqQibLN7Q6NmXYdOX294csdh5aC8aCj1iLCE6NnIpIukTo+8S1gKn2mc42uKnE05NH+GsfUShYr7fYto0slZztHbJafWDQ6jSZdlXINhPYOzxlttxRRTJ5saT2LARBI6tLTEUjCQuivCCnpKpFRpbD0X6HcFNxeNDlZpqx3YqIcs12u8VxDcpMJchFtvEUVRORbIvVIsSQQkS1YRHW2x2jvUcoqsAu2iDrCtahwXJ9haGreMuIrWhSItCWbBJ/hSVp7AQfJREYDo9J/Jw82aH3nMbZa7o4EpSqSSaIFGFBUWwQ2gKT5YQP35/TP+mAWHKerOkoCg9OhuRhilKlSLZBZpr0T0Z4SsHxjx5R387QFImL9Y4wTbAUDckVyUWTKhfpGR3m+YooWdB3jhGVLUZ7DMsPTK5eMj7Y52odYpk6dq+Hq8DtboaSatT5FudIpEhj3NYxeenge++pRAXH7fJ41OX2fEOkakiqgC5KpNsat20xvX5Lq3XKaimhKRV6BabjsE48ZuvXGKMay9aoao1oE3DcsZhsU6wsohDXnF/4fFIoVJUACNSCRF5V1KJAKVRk5AilTBTvUHMZQZAp5bypqasiNqsN4/4Jmg7ffv8dsqFQyzXT+TWGbbHwFoR+xKDjokoqOz/g08+ecTtdUAYlZ8cPiLwt24XPo8fHvH7+DVku4LoinW4L3ZCQpQpFlVFliV7bxt8ssVsGSayx3i6Jsi09y2W7WSIJLbwgJIkjXKtLmaf4cUqvd0xZxFQsEXKZljpgPFSxlTa7lYefLJoMNcngevY9kqnR3mtRZQqb5ZaqqCmKGuqcJFthmS0EtaISIpI4RFYTFLVPp/WEUjxnE0zQpSfEfoqoJdzO53R7HVRNJMlTVAnyrLqPDRIFlcBvWlSqQqcGZKW4+1yamyJrQEZdi5RVwxC2TOvOcSxjajpJEqGqMmbLZrtbEwcV7XabMIyRaEbjcRw3DUKaShQFSJKAINRIUuNY3mx2hGHIeDxuYnpkuekNDwI0VUZVJKLIuxuLN7WXhtFoeD+OnA1Da1zQdyP0PM8b9jXPqCQBTdNJqgooEWsBWVNRNJXdbkeWNSalveFewxIKAuv1+t61rhr6fbNRU325I/T8e3OTYRj3+6xEQJQFttuYNC0RZAld0amoSKko6oqiyjFkiTyOqSWZVq9LGHiIsgilhlipGLqGLCtoYsHt5AaBNk67ZLa8JE8Kel2HJJ6DJKIbJrPlFNvqstnlGKJBZpT4SUGW5tj2gFIs0SoFy5RJYpEkknCcEXHis3cwJo4NhLqgpAQqDKUp+dhtN5R542qvAVEokZSAqtqC5LLZ+SAVKKqAY7hkbQVVMQijLYaqsVlNKasmGstxLW6nbxn19nGMPvP5BN2y+PzTn7BcXpOFEg/Ojlhtz6krF9MV0dM+lhGxWKyRJJm9fZO/+eUbCiGnVioeff5jdtspJydD3rx5Sae7h+GuoZ5i6hbDkz6/+u5X/M5PviBMIJ0viWodQRUZtjrs0jXr67cs65yR2OM//OP/mJW34pvnOaduwVFrv7l4WyR8+eXvY/UCvt6959PHT+l3j7hdzjkcP+L0WOX6fcxmGlDXFaY6YvigJBcreqtDnj0a89VffcePPhvz53/5X+M6J5T4hOE+SOA6HQxR5nb6hl53D5CIo4rRocL7d1NkyUKWR9Rtma1/ycXlJU57xMqfkC98vG3O8ExkNlvQdS1+/le/4uR4xGeffcZ6fcF8WaOrLQZHBYXfQddykmzNr755y4NHJ1xNZ3z1YoNmr6luHTquwYNPTwgikbLIiKMFUZSQhzWW1KeliUTbLWFVMUtaDAcQr2tkacByfUF4rmJjMHBabJY5K2lNLtTstluoFbLyNdtQZ/TgmChMsMwxIgWrVUEQ+bTtHsN+h6+//Ya20+X0QOPq/B2CHCJWNdv1Dsvuc/yZxrs3M073nyBXJh31kLoWGe5bvPh2yp/+e0f8xb98R5wG7Oole+4XDPcSZq8rzh73Wfqv2Vy7vzWg/K01lP+r/+Xv1poxIipvEUST5TymZbscjJ7y629+wfn8mloUMFxQ6w69ds33X18iKoeMTh1Ojlq8fzuh2xkhAHnhs1zsyFKLwxOT68maRz8qeflmznYX0HaPkKWKtnWIqffw82sCLyJNJWoF5psVaeax1+/gTQxkVcDbzHl8coKhqWRlRL/fJQkrykRndNpjs1kxnV3x5GxM2+5Q1xW36w/cLDec7D+iO5L4zfNvuDm3EQQLt1Xz7sU1B8N9em6Xq6s3RL6K5moYHYFO7wRvt8PUIS1KFgsPxYK8yHAcA1WUCFcFmqKyP+yR5SKKLpKmJVUd4YcLOvYeSRSjiim5oFGoJbImEu6KBnilAbaps00zZKXGMCyKrCTPAtK0xLI6JHnEuNslSwXCJMR2NPwgQNMd4jRntbll2OsjyApVKVBnFbVQkNYprZZFsN3iqjZBEmK0B9SCw2q1wmkLxLFPtzskWxUUhY9Q1WR5TKvTI8sr8tqjEioM2UCIZfwyw3HVJj7FabNdbCjLHMft4rg6NSlVVbFabLGtFhU1q+0aSYFuv8duHdNruciVxs31Of3hiLTKCGSJtmJgazKVULJeThHkxsF50BuyC1ZklUiVVZiyiW46xFJIWecEy4z+kcl6uaNlNAze9DakzivOHp7h50Aagi2ynuyQ5Iqykpv8SVUhDmpkEcSyRDb7VHioZRPqEkY5g1aHRMgoooRNrjAaWQTLGbnQHJeGZCOYCc9/8xq3ZdCWD8nlkNZ+RXIOe90R8ywlr9YcdtscL13++z+/paxyagH+j39gsT0Y8eHyEj/26HRaGLKOt96hKRqO3aISFRaLtzjWmLY7wgsvyfMaSTZR9JgkrgET260oKp9R9xlf//Ibzh6O2Rs94G9+9XOefvIARTZ5/eIDCAXDURdZ0onCjMPDw/tIk9V6gSzLDIdD8qykFiqCKGXnbbFbIlvPp+V2kWWVPKnY3+9RxE1UV8tucXkx4ehszM30gkF3j7wIkKUSseohyQmKqiLLGqttQG/Y4+XrN/QHLpPbps6wqqDTcbiZXrG3t8discDVx7S6JVlsouoFZVnhuG2G/TPeX3zFoH9I2zojjf1GW4qL2xqQZwGaZhEnGQjZXXezhSSYhPEaUSiQBBvTcimqEOq/q6EsUdQKVTEpC5Ek8RpGm6bn29AtxBqyPCHPc7rdbmO4u4viibOULCtI07/tGa/rRpv4UYf4cYj00XCiKEqTI5mm6LqOINRkd2Hqqqre1x7eu5/v4og+VjsC978H7rWeoigSpwl11rCmoiKz3W6bYHbHaUbudzrRPM+bmCVZvgt314izRkMpCAJ1Ud7HIn00S91rQGWFJImQFJntLmS7jnE7HdI8RRBKojCnyBNUGdLU5/ryAk3WEGrxri89p8xzjo6HqHLD1hqWiO+FtLsdJrsPCFWNbVqISkVeRMRRgesMWC8XONYYS9PZBQsKIUVW3TuHvc+gPaAQI5I0JApFdM0BEhTZbADJwCKpYwzNJM93RH7K3viAo+WOf/JthiBCVdb8757KBM/G7HZr1iv/DlCLWEafxXJGf+CwCxZkaY1lqwjoaIbBZjun1+6SJTnT21v63QNkXWBvf8Bq6mNZIutNCBL0+mOub75j2BtTpBXtdhffEynENUUhIWsdBCVCNyTypKbXV8kSkaenD1hsXpJ4Np//4Cnffv8VhiNRJC6qIbPNNhRlhRDHrAOb3//pI/7sz/4FVW2T5iV/8sd/Sh1NWK12+FVAmmcMlDMO9nSu5jfsjY9Jk5JWRyaIMn7y4y84e3DMcr5ClFPKpMvNzTuGhyK30ytm100+6B/+wWespktUS2O1WeFtcxS15utvfs2nT3+P8aHKbBIQRwXDkUNZqOy8JfP5LaZjspgHHB71qFH58G7CLvB49vkjbufvgOY4VLSAyVWKJsGDo5+S5lt0ZYikb7m6ukKXR7T2KrSqg7cJcFotykoAMWW6mnI19xi1202l5PUVcRbS6Zn4XkidGbidLqIItzcLOl2T+XSK2taYrNeIQsnD4VPG+wcsbiasVztabR3ZUGj1O8RrD6FS2QY+olQw3nfxowTTOGbrXyKkbR6ejZDkksBvvpeaofL85WsUWWc8HKApGu2uSr/TZT67QVB0sqpCkA3yNOLB6UNIc8Ki4N311wiVwx/89E+pyinv3gYMDhOyMOV6ktMZyyT5km9/8R6pavNP/y8/+7erobQKg7auc3PusVzNGfRNgsTnNniDNDY5Od3nxz/6koF7RFnUfPv9DeZQ58EXbR598pT3b7Z02vvkWYahO7TMAz579phBe4BUGjx42OL9q3Ok3OTTxyPyYEdH60JWkIZTpFRDTA10McXRRA6HY/b39pguPGJxTp7vcMwma2+5mqNJIvPpqnEuuhUvvnrPq+dv6Pf7VJXLV7+Y8PzrK0bOmD3rmJv3BV//6+d0hRFfPnhCEoS8eP6Wft8gjzw264B2e8zDT9oMx2PyQqYi4f3799zebEi2Oa6m4AgKemagxCpdXcfUQKwl8lokLaK7mBafPC8p8hJNF9F0G9GUyQhJ45A0iLFlAVOEve4+qmSS1QV+VhLkOZUCYVLS6XaRlWZB0QyXNK9ptx3yrMIwbKoqQpVq9gZ9TMMhjpsRSlHtEMUM1+mQpU193GqXIKodBFkiLSfY7ZQijxFKCLYbsjSk1XLvtV1lVqDKBkkgsFvUZJVEhoSfBKwjH9V0yKuCQq4QDQsviZhN18i4rLcRZZ3hOjIIOZKls4tjAs+nSmPqKuVmcU2lQOBHqKVCC4PNZMnicsrs/RVkInUMdVwRBBEiLnUuUZYCURYjqgLzG5/dOuX48YjKSFBbGYtgiRel9Ho9nL7L9WaCZSvY7R6eX+L2NYyWgK33oB5SC11MDWRVRWsZ1MoOz8sYjbuoakGZF1zd3LDbXFIlEr2+zdK7xdI1eo5GnHjs4iuqJEMtVM72P0dWSrzFhsV1gDVUqBWFIFiRxwpJkrPzpgiigHy30HvRlsvltyRVxGi8Txzn5KnK3uhhoxsVoMgifC8mCDeIWs5itQOlppZCxnvHGKZDFG+wHZPBaMT5zUvQSxTN4Pr2HZ88fYzneVxenmPaBoomM5lOOT+/JA59smLHbPGe9faGsopptRwmtzNefPeS1C8JfZ+O22I83EOsZbztjjTy2G5WUMhYuoRrtCjzgkG/RRIX2KbBfH1OmkBVqBSZQhZZlHnN5HZBWUbIgkzH1diuNzx4cIogSPR6HaqqYn90iFjLjAfHPH30DFVSKSqPw6MxILLeTnh78Uumq1esNxOycspq9xrHrXBdkSCYstlNmUwvKOumHlEQatLMQxALDN1FUQ0krdHjlYXIZru4v1d1jKYZKKpEUYWIkkIQ+RRVjigLZHlCmIb38Uye57HZNHrzxXpFlhX3jKGqqnfAq4Vtu/fd0x9ZPuFOQ9g4p6W/o7EU7vvRocm4FEWR7K6j/GNFoiRJaJqGrutNl7sso6rqvUGnKApUWbmLf9IRqpp+p4uuNCzlR7Co6zrtdhv1LsOz0+mgaRq2YSILImLN/YhflmX63R6WYbJcLgnDkCRstKTb9YYoCCmLjNDzEKuSLE3R1RLKBEWU8LcxWVSTJTlx4lNWMQDtXhtBEMiKFN1sjlWnK5BXOa12Hz9eIsglRSpT5hqKLFLnCS2jiV/KKGj1uqiqzma3ptWxefLolLW/xAsKxvsjTo73ydMSTdYQhRC3VbDZTVmudwSRj6q5KLLBarVB0xxEWaSmRhJlJFkmDmL2hnvYlgJCxeR2QZxG7B8MSLMQRZKRpCY5wNAtJEHAUHVURWG3WWE6BnEeUdYFr1+/RpIFLFOjknJ0u0Ut5VhuH8M26A0dXr97Tm/kMOyOm9avfIlraWxWtzhWhSFrnBwYHBwKKLXLYCizWL0njRMmVyVe4FOLJf4qxtAKVFOkrHbUWkBVy7SHMqLk8V/9v/4LJsuAq8k1g94Q29CweyK9scsXn/6Q8cjmcL/Hk4cP+eRJm5ffXWJozffq9jpGkiO6fYuW3WNxW7C3t4+sbnj9/C3rzYS/+sU32I7K33z1r5jcTBkPelimzmbp8+bdV1SlxHQ6Z7WekpdbBEzSKMexW2y3Ae8/vKYz7HF45vDh4jlpqnG7SrBap3TMp9hGH1k18cIlcSrhZ7ekJeSFSpbX+L5DUq/YBhPmC480C3jx8iuSOEOmKct4c/2BUlGxzENU2UESdfyoRtVFojhHM3SyrKBCpK5LOh2Fdltns13hhwmiqrA3HmC2TBIB3ly+4WK+RpQFnIGFn8WsNiphnDOZXaLIJbWw5Ltv37BYzDk/nxNHIt8/P6eWBLxkwdvLc+a7HUs/4NuXb4kzgeurCZPriJOjRyBoXE/mLPyGvLGNMaM9pzEyZo1MR5WG/PrVSyo5oT8a0G0fEccCvX3lt4WJv/3Iu+qbXExuGHf7lHLJPLzB6Lf4+c9/iaNr6EaH+fU53tbj6KTHkz/6KUVhowga0zfnVFVFEmcUiYlQaCTpBW4HPvvsD7i4+g4Kg3HnM1RDwXVrhGEFoo+iFWw3NZ39Et2oCDYVlg6PP32MrndZLgNs0+arX58T+VNWy1vG3WM6dp88u2S9XqOpLrZtkpcKsTehpZscPzzANET8bYSc6ojkaKJL7nXIpUt+/6cPmNw8wLJ2xLsYVRpyO/1AlFX0xhr+bY3lxpw9HBOscnStpi4lnpw9YL1ek8QFm3nCwcEDJK3i+fdvcFoyNxOPipKqkKlKmV24JktLgiKmzGJkQUexVeI6YZ34XK22HB3sM+wPWSy3mKaFqkooooEo1miKgFvVhFGC2zaYTC8RBQ1RqOn1bcqsJAwrZuGSWi7JswqtVpArkyqu2K62DAdHrHKftBaZn6/YG1pQZATrALdtk+Uehu1SS02FnW5IjPe1JqPRabFZZdjdPlcf3uE6JmWpMLvc0B7pKJSQ1dRl3dS4BRlVqVCVMueXN6iajCw2GtBo5zMctVnsdrT2WhiKSxkmRPOQoWtjjlosVgskUUIqBURRQjdsihokCRS1xm0P8OIrVsE5h4cPma4umK+uWSVrXEkgS+qmFcVJ8aKYm0VKkQvYlkUWga3bFEWIoSQsN0vypOAnp4+5WAZE2ZLD/aeo6Yy+MeLnL3+D6qgcHR+wnF6jWSHb+ZZ1vMQcjmFbEa0rZE3AMWR0ocbQpoT2BjfrQgkVJbl2x4qWPnHRp5RT8lJAQKAGIj9lLapYZhvPC+k6DlJVUtchlqvTHnWoFh77o2P8ICNLC2zXIvTh02df4u1u8HYRve6Y83dLFE0mrUS6gy7L9YInJ0+5vrkkSXMkVUEUa6xWF1noEoUhUq3geT6D/jH9fpeLy7d8OH/D55/8kGGvj++t+eTJM777/lfYrZIvPn3GixdvUEQZxxCgzllvdlR5zd7oENOsGjPINiEM5tRFjajr/MHff8puG7BcznE6GcPuM+bzHXWhYLdcvHCNbkpEiU9VSBwMjpDqCtdukeQhVanQcgziqKDfG/Lg4VO+e/Uzjs2H3FxMEYBey8ayDDRdZe1fkZQ+RabgSBIvX12iGVDhc3b8BSJt8qyp+sxzD3Jw3b9l+lqtFuv19g60NU7lIodcrCmKEkkCELAM6y57skIQG41kMxrOEEWRbrd73zm92TTxMU31XX4fyaNpGlVVkOflvenHMAyKIrv/X5Ik3Y/Pm1F6imFYd1mSDftZVQ0jCo056+86yT+62j863EWayB9LNyg0jazIKcoaQQBNMxBrWK1W969nWU3cURAEOI5DmReEYdjkXbZaAPi+TxiGTQe6aWFbGnlRkKcFAiV1KTRgS6gIvBWaKiDLIqA3Wu9Oi5oSw5RRFAnf36ELoGoSeRGSlTmWfsB2ndNyTMRKYbstEB2FKgUl90glgeUkREXmaHBC7efEUYRaCyz8a26vBU6OTnHt1Z2BrE9ZpeiagZQWROGOyMvZGw7wgy3L5Yo8L5EkGVESidKAOBZRpBG6qiEgczA20FSJxWJGf9C+Y9oV4nSLYWrUZUFEzW695uB4H9N2uLi+QFZNbEEnK3Z8/c1rdNdENUxubzLybItj6Ky8Lbqus15v2Dvr0T/oYA9EdkGArFqUgsCjJz9Bs+d8eHuNaVoIyorAM4jTCFXrsH8y5PzyPdvFmvksZTQ6Y7Tv8u3zn+H0DDptmbb1AEcd42dTfvfv/T1ub9/SNg54cNohDU1OzzosJtdcX52zP9hHpKbl1vz6m18wW0wYDvf49qsZn/6ozz/7v/81//Df/1OSYsFm4zAN5ihqxZOnR+y2IQejhwilSqdvMJ9/ha4d8+jslBfPv+PRw2doukiaFdRlgKSYxMWSJN2RZBLr3QeioEY32sTijKQKWcw9ZpGPZhdIos3zt2/oDnp4Xo6my3jbDW0roa2bXJ9vGQ+GROmGwnPQ5BHXl1c8/uwz8jji7dUazUgwidHkDjUKklGwiwKyqkYzFNJ0h6SWiKJKlYkMOz3CIuf1u285PH3IZr4iKzNkRUATS5ZFzTwNKOMlaW6yf9bl21+/45Mnn3O4P+Crv/4bvGCFttzj+OgEP1yyXjXZzJopYZiw3lwwyySErObJ4UNkTaROCgJvQRZHnL95yZNnT9DymJoSWVR5/+6a8aiN7W4Jwi0Pzp6ymFxz/t0Fh0d7fPHDhyjm3+mb/bcFKJ+/+p6yLjD1EWtvwnDcIlrmfPrZIzaej2E6lHj8zo/anL8S8NId3i7m0yePeXuxRlY05tMFbsskKUK6gy6mIyNYE9BSXLdPupDwliFX7y+xTYPNqiCOPUQlJZc0ygxkQaOIC97++hbbKtn5U07P9lCAzeaG8X4LTUy5vHrXnAgKCUkykbQCLdHQJZWWqeC0Dd68nvHq1TVf/vgEpUwQJZtCWOIYhzh2F+HolpvLBNvq4hoGsnLKZDlntfQYjVXCXYltyRyeqFSRim7J+EVEpUuUAhi6SVJEJLsIy9awbY0srjBMmpYTu02VB1RFhqM6hLlCXqbMvBW64aC2TGxbZLfeIWglo77Fxg/IZQNFhqwQiBM4OX3I5HoCUoVhWBwdHROHEVWR4nRkFNlEyiJm61vyLEPRuhRVTX/QjFdUxcYWcuIopkpzilBnsw44OjiiqFPq6iMzA67VozvoohsG3tYnTRsHbBImqJpAUeYkSY0ui9RZQVnLaLKIadTIus5qs8LRTSStT1bG5FWGDGRhjlBLSKpGnK7QUhVF0PFjH6NtMk2mWLWAaUnoLYdhZ8i7qwtCKcRBJCsqgjCmEBVqSWa7ibG0GlGsWUwTJKumjkRawpBrf42uVLTsLn64IUhmiPIhO39NWRpUQsIuXdHuSUiKglK36Nslk6TF69ev+eMvnvLqNxcNgJ+lSOUlR8c2v3nucTo2cFSXzaZmrytiVCId3SIQC46fHGDrJr/+zSWua2G2JOIIJq9vwBToDzWiKsRPcgRRpypLJEFBxSQJSsjXWKbBbLbEUCQsy+D46CG7IKLf6aNqAl54SZrFSCIMBgYvX/6So+MDbKPL5fkHDEvHbnfYLWfYFiiqyHQ6x/d9Op0DltsbesMWUZQyHvUoyxBN0rideXz67HNMS+fy8hrbMigL0FWdQFBYzaecnRwxuT3HetBhbzSkFhKMoYvv7Viv1zw6+ZKdv0ZXXOIoIIhDBp0HdFtjvGDNi1c/5/HDH+PtQFWP2GyXhEFIFKxp2/ssl3MMswlwTrKcINxwe3HDcDDA6bqsljNUzWa9ueXg4IBvv3mFFy/pdg8Z9g0oK5aLLdt1iqJ6xHHAcnVDr7vP1r/EjzYUdeO6ns5uUeQVSRohyyaW0RiBdP3B/TlxOlkThGtarR7UkOYejtW/G2nnWJZBFAcEcY5QVyAANAxrGIYYRjMe3263DdOo6JiGjarJ94adPM9JkoQ8T3Ech6pqWMibmxs0TWM8HuN527saSOU+nkdV9XsA97EqEWjaZwzjfiQtivId09m4lBVNpcqbrM0a7iN/RGoQhUY3WteEYQh3zKuiKAiCQBAEjZlJ1+8Bpuu6xGkCZdEEsYtN7E9RFCTrDUUFsqyg600VJVWBIslsVsvGIKiIVEiMx2MGgwFFmSBLOpJyB6TLANORsYwu5e6cq8tLzo6fsVk2kqg8ExiNB/Q7LSa319Q6WLaB27PZzFasixWarZMFAVbPogz3WM0jiuiSvUGbOlfRBIdtmGKYJo6WUgY1+/tDdquAsipxOgaSlFJVZXM+03UqoeL125cYaou8aCpTZWraro0oilxd31JXCk5LZLtdIt99n7M4pyxrbudz8qomSQt01UJVJMpyjVirTasWQwxXZLfZoCsmgqqwmWfI9gLEmtffNWxor68CAi9f/5K6Crh9n/LJ04eUsczrN8/R9UMefXLMX/7yFxiOiHts4Shj8k2KF8c8GT9gIW8ZKT0Wgs/eWZ9+IBH5K/YPjpAFh9VkhSis+O75DuItVS6y2U1YLVM+efYZN+tf8OL1t7Tb/4jf+ckjwmKK1Q2Zrr9ltV0y2Bvy/XcX2OaA1WLH5NpHkyVGI5OqVNBEkTLNODpps5hsWM82SJKAZrQRhRxdM4mriO1uRpzrFLmELPeZ7d4SZFu6zhmmuWUVrCgjBd9bsnfQxg8SdN1CVVUenvapypjp9QKxNIh8lVqquJqe42p7HI8/p8w8vGmAlNXIUs3BoctqNkW2DBxLZrPwcLs2cZhSV5DmCVUsIVQykZ9h2w7nt1Oq2yn7oxab64SB2UbFoihvCbKajt3HsD2++f5njA/22G19cs+m1zkm9H9DVmyYTnMMQyfyUx4+fka7o+NtV4hSwKLIOT4YIEkKaRpjmjbv35yTZxVPHp+RhAFVoSBWMt7Wp8preqMR33z3gu+/P2evrXN4NCT0ZTbLDWlS4nZ+ew3lbw0oVc1l0DvFD695OHhIUYq0Bx3ev7skK3I+Hz4hdxRy4QPjnsKDo6cE3Zw0vWH/YESvs89isWaz3tHv9xjuS2R5xV//9V9TFCqBv8J1x4iCTbjrsd2sUNkjrlaIKFy+aJitTr9FpKqE8RWGtSDwSm7nM7wgYzg8YjzsMbu5BKGmyCXSpMJwArxZgeFYhKnJ5TTG2kUIlUVelpzfzrBlmxKZ5aYktubYmsXk/YIk8SkTGUPRcJwWZlvn/fsZll0j1iXddpvuQOTlt++p6DOfJ/SHOoqQEieAVBPGS7rtAVUFQg3r1YSqLshEqWn0MGzmKw+36zDbRDiGS+oniEKJaBjUUoGWi8w/TJE0HcmUiL2YdrtNLuS8f/0c02rj+Vscs9f0AUsS66WHIFrsdjGKAMQiX3z6Ey7evKMUUsIkxGzbxKWPU9j4uwkPT0cIcsX46IDlfEVdyXQ7YxI/R7dldMUi9FO83QpZ0ZnNl3Q7InUlo+kOQllQiQWmCnklYLZHaHKBqsDtfIVlW6iIVGVKXmWEqd900aYabsfl/O0NrbZOvIpZ50v2D8/wo5AkSlEljSDNqeScVb4giUMkVUesRebzObkIlSySpwqm5pKUS+JkR8fpgeQiBiWqKnJ04LIJPCo/pes67LIQtBi9FWKZNV4aICgjEGTiKOI8OscwVEQlxmqVbII1qbbl+HBEv7dBTfoYYsmzTwoWFwVf/PSH5NWKX//qPZUp8g9+9Ig/+8U39L885XYy5+DApSwUlvOQL3/3CEOWWewW7JY6Wj9ENfTG6UqNKMInlUkSL5GLGqPWqUuH1WyKbVW4xZa2ILFefMCWav64s48Qy4SBTRnE/KA3QPIlXry44qc9F1EsEDcbukFJVwJvt2E0tIkyHWERom1jpE2AUcrIsx0nrk7LFumXGsU3v8YajPgdqY2h6aTzNXma0RNNDKlACQ26mYPydsVZ1yGKC9Qoo65UFgV05hfIgs1wVPH69TseDcaMOw4fXl/S1w2KdUG6/JojbZ/xXovn76446/VwUplRrFHqJ4gyFHWB1C85v3jDjx89xt96bN7c8HuDNu/fX7F/eEb84gadGKuMsbYLeqWMZYlomsLkeo4gavQMhRO5T7rwyLKUE3tAHtWk6ym23QKhwg+2tNs94vCakTPESK7vz4lxlHNs91CqCM+7oiWqWEaGIEnEcQSTBQPHRBIloihA1RTqQsAwYmoE8pUPgsCepNzlKHoISEhyI1f6qJuM44SiLCFo2EtF8RGWO/r9PvnbC7qKjOxnZFmjq4yiCEEQUFQd7gLSq210H0OkahoA+d3I/WNm5EemUhCEZgwmNsxERU2eF6iqQlnXcHdXJBlvt8R2HMqiYHQ3lpckid3OI8uWtNttiqIgjiJ028JQVM6MFmprQBAEZFmBJIkEQYgsyWz9LaqqkWUiDzv7WI5Fp9NCMxXyPENWDKhlyjwlC0OORYXJ22u6bYE9U+PAGlPOpxyLOlKtsIu29M0CYbZC9XxMaUQ+SyhDn2NNYxNFhOGOttFCmCR4cc1DVaGl6MxfzHg26BOubzCyjL1xl80u5MvuCAcDTy6YLTzGmcbHRKm6BkkW0AyNm+USpWti2/Zdf3iL69tr0iJlOBzihzmilhJ4IS3ZIowTTMVgOp8hqgp1pVKJKav1BqlSePj0hDQOWCwWPHjo4nlbPK8iU3fEkYBuamwWEaZZU9kOie8xDWPiUODhA4nNdkG7e0Bdq0ynAYgxqmYyW8wRJIHFdo4iqfzJP/6C3TTgu2/eIHe6KLXBZLtizYzoZcaPHj3i9TdvefDsRzh7EplcsbiMONwfss1ijk+f8ObNJaJYM1ufs5ornB7+lPPzc0b2M0QNJEXg7ftz8sQhTa9I0oyCBWXmIUg5O6/J9bR0F+oW8+U7Wu6Pefy05NtvXiAhEfseYRShOQ6et6bINPaPetye31JWWxy7T7GLSbxb2spj0kgjq2o+ffaEq5tXHAw+Y768IgtSSkHi6v0V9mCPMs+ZLy/oj/qUhYw1KCnqJdObjGG3RyYWzGY7bLfb6KDzktvZLb3egLwoyNICahHNtLi5npFFEafHp9QFKIpAGSlMbxKiCPa+PODV82/YG7usZyG7aoWuOXTaJtvVjuWlz+c/qEn8Ere1h6pG1LnCZpVTFyKL2RV7e48oTYU81amjhCqC68WKsqx59InON795yaC/j5U4LCZLBGmLbqjsHTu8fH+JqmtM5jfIUsWg3yermxSbSnSZrgMGe+PfGlD+1qac//x/+6f1zgdBq2lZKoWfoMsKeSVhtzsMW2cU4poPFx/YeSFhkIEYoRsyea4TemtCv3HByXIzUpElgzgJSfMVuiqhaYNmlILF1dVV08FbVoz2xrx/PSXKFmhmB7urEsUbjg8/YbPx8f0dPWePs0dH/PznP8O1unQcF8vU2HobTMsl21bovZptHIOUIZNjKU0223y1outoKIbIcrdpBPTmAKmGJN1Q1DaaoKE7AoZxgNUW+Oab99TiBklRiJOKfruFQIVau2RpTFmmTe0aEZrWotVyWe+2SKJKWsQN25BDlkR0Oj1evr+g3ZeYzxeYsoumSojA1o9w+i5ekBJFJZJWY1oakiRTk1GLMbKiYVVdZvNbZFGm3xsSBgG6rtLpNL29YpYjWjKyZHH+ak7LtnDHCpVWUtYKx/aA95dX1BK0ugaKKrLb+GiqgWGZiDVsdwuSOEeSVXStRZJW7Lwl4/GYYLmilkVMU8PPdwhJjq64pJqCbUnMliWilKMpFUkQcjg+wQsiMiKyMkDIbFquSRyk5EWEgIzZ1onSgrIuUWoZTVSJq4qWbeFIIpv1gtawRZ0XXF15tA4tFtsAMVORhRzTkakKAUe3UPQauTDI44i5v0K0bQaDFuFuQ5TX+KGHUOj0bJNYi0graAmQBzK9bosoSDj5tI8oecyvS/Segesq1NsIk0PCKubkxOH585eYTp/TswOGVp+//K//hoSSk/0B309mOK7Oo8cnSJXF7e0trYMWHy4mZP4Mf9bGE17yo9zkf/bSbUwPaYYsK5RFRV1Xd9pKmbqqKIsCQRARRQlBBEmSSZMURZGQJJG6FhBEiarKyLOKGhFBqlBVhSwp7rRuNWUFkigiAFVdUVUFsqJQl/VdcDeA2FwNIVAWFYJAM26VGyeyeFdxWNUFeZqjqtpdfWSBLDdxO2VRo2kqZVVTFE1vte+F1HWO47ikWXQfwQMCZVlQlQ1LZzsWWVpQ34XdFEVGXqToetPIYpsWSZpS1xVV1YSN1xQUeYkggICIokjkRXG/8Fd1jXLXNCPLKmXZ7OOyzJBEGUXVmkpFUUQUBCRJpaz+tilHAGRJRRAFyqpAQEKWZLI8wzDMpmZQEJoLA0GEuqbm4z4VqaoSUZSg5l4P+XfvNVBVzSj1PjxdEimL8n4sXVYl92p5QUAUBOqa+7DzjzdRavrCBZo4KoE78CgKUDds5Md2nvrOsPPxcdCM8GVZoqqa91DXNaLQyDKaix+an0XhPr8TuGNCa0RRAEGAO8Ba19wdV9wbhJqcT6F5/bu/1Xf7rNkcAYSKsqqp66YiUwDyPEMQRKCmrOq7w1RAEkXEu+9BVZXNc2geJ4oiZVEgqTJIIkVeIiPcxRbVlGWFLMnNcVLdAWhFBaFuusZrEO5AZFlUSPKdeaqu+d98VrM9GZLEKbrWJDHomk0QrpBF8MMUp9PBDzIKtoRewunpAzarDUpZsdxuWWx32FYXSSpwbYO6UKjqGFMX8YKcvb0xvr8jjRWefLLPZhsi6wJhvCPwCxSxz9HhmLX3BlmWWU1DHMcizmYc7T+gZe5xcfmB0yd7zNdrrmYTzh4/YSDqlHnEdBfwH/34D5kmN2SoSI7G21cvEeqEv/ej/5j9schf/+Kc/QcD1psdy9spf/w7P+abF78hTxyOzsZcT1+SJCrD/hF7hzWX51t++OWnXNy+5sXLc9qtEcPhmOnkHbKkMp9nPHt6xJvXH9jfO0KUd3i7BG/p8umXHc6vvkXT9rBsmavLy0aXvIto9RRmky2H+59wM70lw6M9VEkikTyumdy8wDB67B18gqrmXF9NiMIVVaGRlzKDvkbHaXNzuWCV+jg2eKsUUe5x9nBMHK8p8pJSqDk/X/Hg+AR/F7B/dEiQeGy2S6J4g+koWEaXzWxFt9PHS5fEIewPOtRFyduLd+wfPSLY5KAWnD04IsllluspphZBaYCskJdbdPmQLIwZDWUsI+Pt64R2V0aVDHRVQxR0BqMhq9WKL7/8ARdXL9msluzyDVkkItY6tu3i9hQ002B2PeX06BGSFDPo7+N5NWm+5bvfXPCP/vGf8OrVX0ElUtUCutWmO95nsV0SpWs0pc3//n/4X/5WppzfGlD+D/7Hn9ViMuaHX/4e0/kWzY2ZrT/w8OBHbFe37KIZVSVTiSo3F3OGvRYCEZcfApyOiucVhMmUbrtD7FVUZU4t+kS7Fm5bIgg8slSnOzJwrDEvnr/GcCLCnYnpqKTRgkpwkXTwkx0CKopcQV6jq110QyGvUrIipy4hjQMenZ0iyzbBDqa7S9pum9HQ4erikqoWiZKCvdED/HDOXn/ELglY7hZ0Oi7beUyeZjz99IDtxiffpZh2j8G4c2duCVmut5RiRVkqqKWIZucgimRZSpYUlInMwXiPLBQI8iWSmjPcHyCKJrc3C3abLfvjAbvtlloSieItCDmyZpPXJmHkUyQBaZAiqTKmbZBlJePxGFWh0Zq1HLIyJPENkrhAUWuKAjqdFgI5k9sltqOhKQqoMmmSkQUJj06PiYuIRBDZ7VLSJMB2DPI4w9Z6+Lsdo7FLVhZECYhqhShCFlVUdUEcpVAJDAZ2o/8SKyy3h1ClrHZrOmqL3XxO6ZbkcYIkjpAQiNIIp2vTbdlkfopYadzONvT2NbI4w3UsgiCnVgp0zcaxdW4ur5CMmqQQcJweRZRgazLH4xGLxQLDcYm9mMTIeX99Sd8FU3SIE5O8jBBDBcmMQFLp2V2QRGbRkjDZcDo+5dWLGR3HhVLmZM+lauu8vD5nTxdR8j7uUCf0K/LiiuPhCfPolnkk0m3LxIuUH//BMwS5wixknr/+nuHgIadPHyBOf4W7ifgXkxRdeIjYFhDygLSS6YxdWnLFzW1IkOcURUayCajVkn/HHfPf+xcLKiqESqYWaqqyQBQVoFnQREGkKAsU5W7hrkoURW3G5JJMVTaLeFEUCKKEKFXkeUktiEiiiCJLlHnTeFJRQn3XsIJwZwCBsqibzEWpvl9U6xpESaYqyzsAWiNLEqqiIisKQeBR16DICoqqNgHLQgPCRJSGMZOgLEHXDILQa0BmUaJrGll+N+otsnvAJEsKiqoQ+BGSXNOEzyfIsohhuEDzPpIkvXPagmFqxFGKLN8xfpWEfJczKSkyZdkAxSZYW7urDKzu2k4EROEOpMtCo4e8Y/Cq+m9BWl19PHfeASRRQJFlRLFxbJdVefe3mqqsECURWVLuCL76HoQXeQ4IzeNrkBUZQRD/DaAoNLgOQRAaAKw0YAdq6juQJ4lis013j20Y7mZbmq1sftfgLfGuf1pAuKfX7p7zEdDeAc6qqu6BavURTN0xmx+BsCT9XX/nPRq8u0CpkWXpHrQ22uCPKLa6R5YfG4Y+Auqyqu6PMUkSqeqKIs+RZJE8T++BeZFnDWBFRBCa86+qKnefSXPRVNcldS1A1XwuoqRQV1UDMsXmwqsuC0RBRBBlqGskSaDIyzsHfoUgSNR12bxmmSOJ0v1+EAQJ7gxU/5OzDO/YRDcUppMViqJh22Yjc7JU/CjGcrss1h6KllKVEoqkUZWwZw+4Xa4QFYvp7BLXtug4NqpWU1Y6WZ7Q7qmUtcxk9g7X7GBaAxQzwHX3mV7MyIuEZ89O0FSTMqvZrCK8XUq7V7EObyAbksdwcNRl48/ZeBInT9s8+vSYL/ce8t/8828ZfyKzmsyQ1Jp379dsZz5nZ2e4I5F4KXN6+BBrYOK2FWzR5MX3b6jVAkW0qIUE1cqRRJvZLGI87CJQ4vshtm3y3fMbfvCTHqZywGxyzsF4j++fv27yV4WAPDM5PDxm679nMalxXIuyWnLxYQ5ai/3xkJoc6oK6TLEdBVlpTGCbqU6svyeMLQZOF1OsGibwtMtyseHl2xtmtzsMq2TYO0K3YqgVpEpjOd2g93Ucu+b8/QLXPaa/5zC/vWJ66+N2B+RFzWF7H4EtnXGfD5dXKFKBUNVEaUSpqHQdCYEKP40JdxYn4y5JvEM0JG4XEyzZ4npxze/+5Cnf/GqN2bOxrZS1V6MKTemBomVItYMpiyznc7qtYyp9xs07j8ODAUUpMxh0EVUdSTQ4PBry/LsXzMMLXHNIWQSkScXZo4es51t6HR1bttHtFkEIYXKJrNg4dhdJislDKLIt7nBElcg4TsqLd5ekYsb48IT/wz/57ZpyfuuRt1QdU2sBf/Obv+TZw88IvZx9d4y3vWLhf4DaZjHzGIwcTCshTmyoZfp7Bq+/m1IJNaZbUac2ZSawWt2gWSCKDTu02tbYtsTsNuW2+BbqipZ2QlzfoKKitz9hF14R+Dm27WA7Jrt1SZCG6K7IcrFBUUpsc0AheAzG+4RhSZ42eWiq1sJPNgTXK6glyhLCKCctAoJ4zdWkJooSRkd9lsspsqAjSho311seDQcs8hLDFHj+m+cIos7J2Zg8TmjZHWpRoqwCbFVntS5xui5+vqTdl5uaLwSEumKzjPH994SRCFREcUCr7SCKOparU0k5i80SSyqpkpBsUyCoBt09E8VooYg6YTClqnLWy4IsqxrTkS6g1H0yeUFRGvjpDiWXMA2dspIAgU26Rg41aiQOHxxyOZuxDRMyoaDbMdGlHDKLIqmo1AXdvkyeZdzOdggqKBrotYiITSmpSFLNaKhwO1/gFRkPzs5I8kbALigywdJDa3VQFAW73wj0n798wXivxcneEFmwSUWfjbehP3IpoqSpkdNEyiihSiUkReRg/4T5ZoUiCKR5xHZzg67ZyKpKVEYoeovZ8orhoMtqscLVSigsKt1hG80Ytjo4bptFUKOUNlWWMT46wKXLxeQDCjZHvRLBCunUA8rKIkp2lLuCUrVxOzWz6QVdZYTeGnCbJHhFSS0kKOI+yqAg8ROm8yWWkXE41siKjGxxizebYp7+Q75seVxsdrTaPV68v+aHz54S3Ap4+jW+sGE4fMjbiymfP3vKyl/yT9+/4Y+sFk8SBUkWEKoaQdbJi6xhYO5YJgQoypK6AmqRQiioRYmqKhsKrpCRFYmirMjzCsSG4QGBMrsDO0JFVYEkNECsFirqutHpCZJAUeQINABQkWSgprzrs5ZliTovEGiq/TzvTuwtKVQCJHnSsDhVE2ItiRJZnpGXGYKgkpcxjmORpskd+1qjqgott8V2t0UUBdIsQtMk0jRH00TSLEfTJKg1jLtwf1lSKKSaJKmoSxFNkxAosEwdBNh5AbqioqqN4zbLckRBRJYathcKoEYQGna0LAChuXCikhBEibLK71y51f058Z7VE5r4KoEcSawpqpQoTlFU7a6msEKWRPIip64LBOEOWAsiRZFR1XXzGYo1kqggCiJVXd6xlALiRzbxjvFtWLea+k77KAgNLmu2rb5nPP8umLxDkVCDKAp32910TzdcqNBcwNyBugagSncs9R1LJ9A8R2j+p3jX5FRWDWiURPEOIzZMuiA0B6kkNeykUENdNUmO96BVEP+NbazqEvHueQ1ybbYsy9K7Y065A3VQ5BWSBILQgMaqrhpzkSxRUFPnFaosUgglQl1TCxKiUINQNUz3HfMulM3XQhQ1sixFUSvKqqC6C7IvyvIO4JcIdcPUS4KEJKrUdYYsSVRlsw9nbQ2/LyFUOV4oso499FRlb9BHyBWCMMHpaORFjiI3Guk0zVGMxsG/DBdouki/b+G6J8yXS7SORp6m1HjIlsZy7WMZOrrapxIlNsESfIH1+pJR26VSNX79/Qe6lsNqm9A9MVEcgcuXG7BVVG3Hg8fPiPM1smFy2DZwnTXXLwLidyLPfjwk9iKs7j7z2xVGq4WuCEiCiMUxmrvm9NFjVGlL7nWQugGKXoItEW+XbDY72u4RliUj1jnb7ZS6aLG/t4eo1PyjfzTgv/i//lf88R/tU1cy1/MpvfE+UbLgxbfvce09vN2K6+sNnfYQahHD2uf3/ugJ7z68JU18NKvm9tajO1J4e3XL0f4Jq+UFWarTkzrYQoYuKJRCjmyHLJYeb9/fkIYVe8MOw32gLDD0Dlkes15G7B23iPIQfyrTNruMhhrb6ZSqEJA0HbGs2euPOTkd8e7VhjyEtmUwXV2jKi6j8QlZtiNNUzyvoDvoEMs+C/8WqTQxahVVNJDkknHnlOVUYdR1uVhfY1bHPD4e8eLXz3EEByGLWe7eEW5tPvv8MeOjLrIpUwUWptrCdBLCdIMm94iiivUqwNR0VN9gfjPD6eQEoUUYbWk5LnW1Yzg4otBsNC3FDA/QWhIvnr9h1NPZLGvaY5NNuMMwHEQdHu4PsJ0216vpbwsTf3tAKUob4nyNqZ/w8s1bDvYlZGnMh4tXPHx2ysW5h6KmXF+ugYwsnUDewrJlWt02n3/2gF//6gNhvaLCwN+pUNsIakYc14SbCksREEixXRnbGfL+zQJF00FVuXi3ZHyiIooFZVaj1AZpNGHQddgsl5imjqpC6Kd0u3ts1h7trkKS7VBUg5ZjMp1myLKKYZhkZUK7b7Hxdmh6G8vUKKqQMMyxrQGSmFBUoMgWgmyRCzuitKCgZDPb0O/bmLJN5PlkUcVeRyTfeRwPu+yCnEHriIubN1SVSrfXIk4z9va7+J5MqYUIgsD+/iFx4lMU0BV6zK5fUYkyYSbQbsFg1GEXJ9RyiqkbFGmBiEQal+wND1huFOaLKS23z+V0QpyFHJ3JSFabohJZ76ZojsZgPGK1qFE1g0IUWXsecZLQsmyWmyXe7RLDtDFF0AwdxJIgKMnTFFHQ0A0VuW0S70K0OmXUtwiDFpezBbJs8vnZE2bL99SSwHIJEhrtfovIn6NIKXmao8stPvvkS/x4imm1UUSLt++eYzstZL0iqnPclkWQhhQljLs9FLnmzfNvOezuc311werGx+2LpGnAzbpkbU04OXqGiEKeVbi2iRDK+JsYqoTjgxMWkwm6a2GbGpqo0bJ1Lq9miLqKqlgUSHhxiKmUaC2TyXqDIEXsdVoM3A7r9Zp+u4tU5UznU8JawukOqcuE5XyBJlfI4xaqnXB+OaPzzCVNttTSGMn+B6TSMQfjmu8vfsb+gYQpFwhlRXsgMl+5HA4tLEvGvRF5cfGSJA/pd0z+5z+Eg13JQHVp6yZlWTHbXLL1U9pum7xKKBEY9ges5lN2u5KTxw6mbfHh7RWWLtIyhywXG/K6JM9kdENml26xbBdH0TCQWe3mSGoXTVSBirXngSzQ7bbQVIlgu6CWa3ZrAUOS2BsPWWzm1HWJrZn02n06gz7v3r7GcRwkReLyfIpiyLg9l9CLePxgH6qcxXzF8eEDrqYvURWNg/ERslAT+D6Pjj+jrgUkWef4tMXrd2+4vpmi6YdomsZicUNVS8yXK46O91gtdzimhSTVaOIeDx/s8eb8a3qjA96+OSeKPTpdAUPrUbNPv6vz4eqKONeoCovb6zm//4c/4Pb2BkFOabfbXJ9XaLrI4ydnvHzzDULdIi19RMFErGrWqxWObd2fE+OkarRI4x6mLHN1/i3IMis/RtUd2l2VPIZh55D56oY0DdA0EV2X0TSDKJBQ9QJRkDD0PpIEkqijyAZhkKBqCpZp35k6VKgltt6Gfr+P73n3EUGKJDfShbJs8i8N414LWVVQlgWq0uQ/qloTQC6JIggCeVUiC03daJ7nFHWBafytk11RZIqiuneRy4p0B3gb8GkaJlVVEgfRHUhSKMqm71tSlDsmsmFKszxDQKTVanqri6KgoqQsK4S6Ge+H8ZqajCBq3POKqlIVNYJYY1kSptJnGyx5/+ENEg6GJTcRT5bMdhtjGAa2LeGnJWZhYOs6sZCTpTmVWJNXKSIaQtUA5iTNQJKI4gjLsjD0Np4XUAslimKRZj6a7KAqFVEQMewcECcRLcdB0xx8b45h6Gw2a2TD5bXkU+UWXdcg2xSIMdSOxMqPcRUoapkq6hOnE1StQJU7CGSYlkyxTUnyAtdqs1pvkRVwWipVmSFUsDd+zIfrl0ReQV0L+H7O3vEBuhRR5DWb3Q1qWLGdlljSmHZb5uL6DeX1Po6tMjo6odXrIqkBpimwul2xt/8ITa+JQ4vDwe/Sb4vsoh2llPLXP3/LflfDcTMM8zGoDq2eRuR1uFlcMnAFdv4MKZe5uZ5z/FmHm5sZD88+J4ivOTn7MVdXKdQyoqCwC1aYlsrL50v6/SFXk19SVjL/8N//Q/7iz7+j7Tzi6ecwn22R9Q6tbozlCoiolIXA1fUFWaJjugJBsqY3PGC81yIJIM0rth6c7Fl4uwjH1djtJnR7PW5v1njelM6gxemxhapobNc+i8WMwyMd1+lQ5B6uPWZx+Zqz0xMEOScva7p9ndqfUNQFFCKKNeHF60scs4uf3OAHFWksIUpbev2HvLuccXW9wlAcijJBUyvCbcx+f8jeyMF7F0ARo4oCaRShY/Lpkye4kkmc+hzsjzjYP6Sq1uiGjH12wNGZgRff4ig9/vjv/wRNK/l//4uf0eod0u5YvHl9wXjcJctjDGWI0Qvxs1t0pSALKhRnTbgu+W/e/EtOHj5BKmy63T4tq8v+wQ5NVKkHJYvdOw5OP6EOJE5Gn/Ih+g0FEra2/1sDyt965P0/+s/+3frlq7eIcs7hwTGSHHC89ymaeshqPUe1t0wnIarcIgg9ltOAk+NHXF694Pi0x+Urn+lkg2IkKKqEIJTEsYEkSVi2jm3nLOcBIjZ7+0Nubm65vlnjtlQG4w6CWLFdwWozxzCbUZ8sKqiaiaK0cByN5eqKLJHxgjktd0gYNO/NbtWkJey8Bft7h/hRiKzU2E6LNM2wbJ1gu8LfSpSCDGJI17Wx3AHeLqSrmlxdXaBqErrhkiUSg2GbqmpYkLLK2FyHHJzukQs+sqqx2a05fz9D12wOH7hkcXmnCU3J8hCRNqarYFg6hqGzm1X8+vvvePT0jNlswaBvs17GxEWAbMrIKAz7AySxJvJyAGRFJI5SnK7KdLPBdnRKAcJABWlNWW0xhHEzYgxKLN1islqgmRqDdpfFYtWM3OIAq9slywrarsPNzQeoNKrCoN3vUhBiDSTsysHbrQkVn+H+D7h49YLPnjxis95yPfEwVBlDVwl2CyxdwG5r1KJEEGVYyLTcM3JSWu0Bz59/Ty0EmEaHkpxCTnEsF28dcHpwCmXBbrOkqCv2D47ZrEOiYsvDZxZxVOD7c1TRYr0sWC4z2rbI4dmAqw8B6+WGw+MBeQ0iBY7aYr6aIgpNXp/jdtjFG+LM5+GDR7x7/5JR/whT6lNRswkvqHx4/PATlqsNcbzCNE2iPGS+3VLUKpat03ZkpFoliSTaXZ+Jl9CWh3ScgmHXxdCOeX05QSq2KF0wdZvzV5cIcs4nn/2QNKmI45RdumRgunx38wFJMXAkG1WxmNxM6do2mqUyuZoiWxKKapHFJavdFZWsomodXLvEcFoYiowh66wmGy5vbmn3dB6ePSGOlkRlyXY9J8s0ikpB0gs6Wgu1LtltIyzbZb328PwQtVMz6B9iFCPWu3Nk3YNCp8wEev09VpsVru3QdVskhcfKv6LvnuKYFkG4I8lFvGBDy7VxrKZOrvAj8iLm4cMDFstbBqM2t7dzdKmFpZoMB3vkWc3hyR7fff8r/KBCFDRarRFb/5pKyFjOI6I0YtAfsVzNMFSTk+MRhnJK3+xxs/6KIA9YRxdkcYmjH/DjL38XRQ/5f/yXf8Hxkx6mI/LhfErg1fzkxz/EixboZuNaliWb45MBy+UGoR4SRkuCZI0idlisF5wdj8iC5P6cuF2n2C2dsgq4vdqQhjmnT7rs4pokkLC7OlJdoucWg8M9iiRiG25ouUOmsxfkmUCr1SbNU/r9Lt62QpBNdM1qxtqiRssZN8devEUUVQQJqEVUVUVTVWRZJAkDJPmOxUS7DzL3vO1dRuWda1wCUZDvQtHN+5ihj25vaFjlLMsYDAb3jTpN1aR4b7iRJIntdn2fj2maJqHn3xt7PpYJfRyHf7zJstw4ye+c6ILUjLk9z6Mui7tqyIp3F7/GdV2SuMJ2a/IyIEsEXr38gCoKtNoORVUx7B5g2hK7YE1e50iyTbQNsFsurY5OvPFom0Nm6ynz7TV7B2PeX01pmza62ISie3GCn0SYtoWh6dRCo+XcbjxkBaia2CjKJhzbkF0s3UHVc1TJpYgFHNciiC8J/Ix4F9AZPULVc969PGdv0KYQRUbHp+z82yauDR9dMzl/H6AZYLQyVNUlzULKQsQyTFp2i9l0xf7RMS9fPqftmlAXJFkIiDjumKysGOzt8erta44OuxR5zs3NCttoo0kJVekRRTqa0eXhY5tgI+GYDo4rkORTqkKjrGS22zWWXXI8/Iz2sM/V5CvWVxLtfZmO8oxV8oof/+CHHBzuUyk7yqTmw8WCcCXT69dMZlu6To91+oYigk7rmCC+xnFaJFFTK6qaFVfnO0Qx4pNPPscyHd68/xXrucCzH5xxfbmiLASMjk2VZ3z++RdcX7/kb/7mK3ruIfuHHS4uPhCnJRkxGy/kcP8xy/kFWZzw4MEDPL9CEVeUiUtWrBCEmPk84vjkIedX5xRFgqpZIFQcjM8o6x2T6QWPH35BECRMbyIW/hWOYdLqdJuxctdk6/mIUgdFKrFaFefvb3BMm/nCR1FaPHi0x9e//it6vT6WqREGCWFU4Lhq49LXNKpcpmO7mLbAbjNHlA3KOmB9UzM4arOdr/GSJf3+MQ8ffMLF5Xv6/S66IbNazgj9Ase1GOzt40cfyJM9Pv3BAzbBLUG4It22OBjuMzhIeP3NFbu0wtQKttuCo8f71NGK9+dL9o5O8NYFB4cGURqg1D2KIsIP5hydnHI++RXpwuSTx59TaRGrnc9Pf/SH/Hf/0//s3+7IW0BtWhPsPXzfx9vFPDrtoKkqeVbRNQdslksMM2fQP+XRWZtXb75GUVWurqe8fbPEcQ0k0aKqAr784if86q8u2Wzekyc2334d41gyx0d9ZpMF1x9uMFQbU1A5/+6ag8/3UbsLhlYX2zJIog3TmwxB3GG6Ce/exOwfq1Qk6JbE1luRZyKG1WK9yZFlmVH/iNl0RavbpiTkdnaDphqEyQYhBVHQ0UxQNZ26lImjCt/30aySbruFaUkkqYrRrSnLmm0woaiGKFqB1TWRHB9JUAjD5qQ7Hu+z2lxjG3u8+jAjTrfomsig8wzDzXC6Kre3Wy4vZ6R5AqrMcnvdCNzrNnUqcTAeUYslKBph4iOLAnFWUhYRhqFgOy3CaI1liRS5QJyliGJFGPnIos7wxCH0SlAltvMlhqbiOja71bpZeNwWYRgTL2aYhk3gg4CEqitIogJ1gixVbCcxtVNhOAKO5rC4eMfeQZsoD7m+XiFXAoratHuIGmSiTBi4iEiYakGVVfjBmtZAZbl6gx+sGPTG7A1PyHKf1fYDUipy1D9iM58iSRKLZUSr16IQMgb7KqIhsVzsODwa8cmnx6RJQF6kXL5bsLxRmdyuyROdRw8es1zfIGgaliGRZk3gclFFKIJGUcB6tmb/4IDJlY8itVlvfJZEGJpC5MVYso0oaXy4vMDtqKSeRJoXOEabJA1J/TWp2KXdspjtJmS1zuDJgMXNmttzn+7whPerc27iJUl0iVF2kcs1o46LY7coZY+b6wU9s0NbPePy+h11IqMpKrtgR5mt6RgWmi6zTSOSPMAo27iGgSbWTGYVZltDFQ0UuSAOZDZBibd8w9MnXR6cjbic3fLm/Q2PH45Y3E4QLRWpEig9n97YZbPZoFYKdtsgjQJGe132RmMm4ZSyigjCN8iKQM84YbpaICsK14sbOp0W6+0CQazZBGs0x0YzLTq9PVbbJXGR0+4O8TdL9kc6eVWS5Uv6gxHnl+9oOwOW8xQvCpFbLSarBZeTFc+ePeVy+haBMWHwnnY/JkwCJFFhsfaQVJme3eZ2co2igqLLxGlGGH4gCK9Yeefs7R8SJF1+9PvHWOqQlqPzz/+f/4pCCrGdffKipuU4uJZCEO7otnWWqwmnJ89w7TGT5a+5fpdx9rhAUWOsuke3Z9MfyXjrAG+V358TDaVLHu0Q1Irf/d2fMuod8vLXvwF5ysHeiM5gzGp5ydHZIzZeQL9vY3cE8qxEkSr2jsb4XsJ2FtNraxTFFl1XyYotil6SxSVZqaDWKZvdLbrsoNvtewayKAriOEcWBCRRIUkSICVJGjYRSUbWVNI8JUkSTNMGUUS3bMIwxDSbrvogCDB1jSxPEEsR13XvDT0fu8Y/usc/5lTKcsNol2XJcrmEsnGkS6qCgHC3Ldw3AH00CUmSQBhGDRitRaqyauQWYiObCKMdklyw86eYlsNis4BSwjL7PH6yhypqLBceg26L5fqCE/cYVTVYTNe0hxKymtMbmJAVHB0dsJguUJA5PnxKRUjHbFMWGbUhEpf/P9b+JNbWNc/PhJ6v77/VN7s/+zT3nHvjRpeR4azKLIrKEmXAIBASooTMAFEMa0iJIVMYwRzElJlVAksUoLRdbtLOjP7GbU63z+736tfX9x2DdR1mYIkoyVt6R3u83vVb7///e56KpoOuUYl3NUpPYxvuEMUa0zQRKIjDDFUT0BSXrhVIs5CybJHinK4O+NGXf8by8Ym6gzxrkBWbni2xCyMGJy5+HGApI77+3W+RbQdHLejIoSdwej6kKBPSMiVJPPqDMVnl88033/Gf/OXfo2sPys3PP3/N3luTJRWm3SeINwRxQm9ic33/kdn0lPX6HlN2qRoRVWqRFYGSGeefmeRxxLffXCGJA37wmUMU1UjimKx8ZPWU8fnnPyDcJ9jGCSdzmTi55OJnU7744TFfPptTdT/jr//Vr/jtN79C1juuvo0ZHcs8PTzy9Vudn//8L+mEWx7fbzk/e4ZmleRFn6LIOT39IdvdIw+rt4R7g8sXE1abJZKwZbdLePd+QW94eM0XpIRf/u0Nx/Mpv/nFLzGMhlGvj9ClLB49VMGlEWM+Xvn85E9/SF15pPucfm/IevMBSbLRlCN+8CdH/PqXO9pqxnRSEgQeJ9MXvL/+6rDuAdzd3WCYKrIw4unRw7ZN7F7LJm7x/T39yYDdLqbMGiQt5+WbAbunHQ8fM86OPmd+KqMotzw8xDjmkJ//9D/m8emaMj3A+L98/SXL7S1N15KkKaps0CkFq2XCcGySpQ0CCsPRmDRZ4oc7bHfCfO6iaQ3jwUtMM+Xm5gHXlbl+t+FP/z2N3kBG0z5j50cslj5hukQSS2RFpq5DXl/+FKMq+S//X79GHjiIrcB6u0XvSvw0o117xMmS56//PRqxJlhFvHx1xMe3CcezM7JkySLNuQ6+RdNaxNSiibw/Nib+8S+U/+n/6k1n2vBwHSMIBn13gCh1nJ2cYhg1QdDx+LBAtzTGwxOsXklRhlx/SJnMdMJtjuu61LnLzcMvcV2VJIw4PZkz6J1h9lTGI4vHe5+2khGFhqzY8utf/ktmk+eYzwz8IGa3FkiimPPzQ1AKggrLEaCFsopoG4eijBmPx9Rdy2YVYlk9DFVBMy02m4CyzVCMHEGG0C/QVBOtlmnFAxNLEoaInczSWzOdTiHO6GQTSUzZbiLcnkIc1HRtgYiDobfMxi5pXrML9mRFznDUI9h3PH81wfP23NysDpaJnku/NyCKcz5e3zA+lahrCU0bU+QxotCiSTrTyZD9tkCQWzopJcxrNFk6NPXihiTZ0es55FlHWftUlYJhOiCmxOlBfWbqPQYDmTRNiZMGRRBwhn32no+/2TGazhBVgyLOKeIdQ3dK0zSIUoskHXy+knSAJi8fU0anGnG+4/XlJfGuIwg8gihhPh0TizmaaKBLCl1XkKUlu11BWmZMjhyOByek+YYkybBdi6oEx5pgmgJpHHFz84nLZ2/omhaECgSVfZCzDdbodo3aGSDkjCd9PC/g9MLm5eUbpkclH79J+JtfXqGZEkeTS2S5ZLNboxjmARzczQkCD9ORKUuRrhGRpY7J4ITvPlwxmgzQDt+1tFXN8eiM0I84OT3n3dU3yHrH9dUa1z0wPes6RVF1JuM5fnR3wJoYcP1pz8iukMUTyjakP1GJUp1w4xMKKUPb5XSoUbUaYQNyV2JbHtfXEhcnL7i/vkV1dPK2pskK+o5LI4If+ehiS9PZdE1NtI+xXQdZ16hqFX/3QEGFYWk0hc7riyHL5R260yNMS/r9hiY1ydUKfxfiaApZ1aKpNnLbUtYNJ6MxTdfS5hm1bhAlCaXvoyoT/vP/9f+G/+P/9f+AZHTUNGiSii4pxFGApKkIkoytmMRezNFJj4f1kv0uom/pvHzxGVGZMem3SJqIv8loC5/Toy/pTQyWiy3efcOzF32SWKYRtgjIpIXPfH7M3c2W4djEi3ziOOPZxUvyPKUqDggc2xpxe3uL7EZQO0zHDlmSczR32W4Ses4RR0dTVsFbQGa7qrEdhZPjKbttgCiU7LcZbv+c8cRm8bjlhz/8IXmzYPmYY7sCUbZnt4zRNQ2x+Tde6jRNQUzo9U00xWQ8mhGvao5OFER9zHqx5XH1EceZcX56wcq/4v7pHteYYpoGcRiTpimmOUVQWrK0ZJ8+oWkmtC2O04dOpCoSNpsdujJlMr/AMCyKrMSynIMVJ8sP42FFoaoTBBRUVaehwzT1718aG3TDoqnLP/jARfEQGGXh+2lHkqLbDl0n4HneHxr3giD94TVT07Q/FGdkWaYsDy+aqnSw8bTCQTupadofVI2qqh6A65JMVmbf/++gfMzzg6WjrkvqoqTsQu6XXxEEAePJnKzwyZMGQx0wGB7WfsIwZDjs47j6ofSiWWRVQt5FWJKL4w5YfLzjsy9f0tQpfWPIIvIRxYK0rfDDkMFoyGazIQ1SZElHVQ3COCXZtwz7FpKSo6omIh1pcrAntW2LZSo0tUzfNWkqsAyFYf+ILMvQNRdBa6iSHbbVI+oSHvYbXKuHVDWkfk1Wl9RNiig19PsugZ9SNhENObJ0Sk/V2Ky2nJyc8uLVC/bhGtM+eNn3O58kSwERQVKwHJtPtzd8/tlrqiwlCnwmx1Msacg+eCJqWkTVx1VMVPo0ncJoqHJ/vaTnahhuyePTLSovePXqnMngiCxJCbKE84s+tx8CvniZ8NVXK/JSRpvU3D9GaHKL079EQkA3BB4fIn7+kws+3X7i9YsvULSCwOu4uBxyf5NAqyJZW0xjgO/dsl/LBxnEdklvZOM4DopW8+nqHkM9omk2pEmLIuuH8C3UPNwvOTl9Q1MVPHk+iuaQxfecHc1Zbj3CcM/p0QWWYfBwv6XX18jTGEW20U2H0WgEUsYvf/kbFEXGsjV8f49rjfn0ccFPfvoDwnhB1opIncxyHVA1Aj/4/ITlaoPlDDk7bxnYOo/XDpra58c/m/PNVx/Ic5Ef/fSCd+/vkPQGEYE0TfG95KBBVQ0kRHrmkMfdA6KgUtftQYmbqJhmRVF1fPH6DX/9z/8lX/7gZ8xPDwSMqt0hCyYXp5ds1ytW/ob1uuTHP/0Rit5x9eEj83mf3SLAshzG5jM+u3T53c0vuHje8F//1RpxYCNkGjfbj/QNhzAM0aQjfvonL1guFyTFhrPTz9CNjpfHIxIE4m7F6vqBF7NXPN1v+D/97//5v9uW99/7Ty+7NE3RVRFHe8Fut6OqCv7s7/wcSWgIQ5+ybtluAoYzDVGOEQWLppJZLG+xlSmrTYhli/zwp2cs7vYYVsIP3/yAd988ESULfvonP0ZXpnx4f8/8eMTN3Xcs1yvqvE9nB1S5jud5GLpOHhe4PQNVVdl6PmWbH0wN1YA43dB1AkXe4jp9FO3QRqzaCk1zaAXIcp/T8wvWy+CgLqxFkELsgUPXWHSVQCVV1E1JsctQDA1dV3GdIavljiQNMI0ermNSNT623efxIUS1U4bDMVKno6oVhi7yeJegmiqyJLLf5IwmNl6wQ9IE6lpkOO1R5Q1pdODCZbmPoqnEeUYtVExmY9I4w7IcJDoCP8c0NMIwxDIHiFJNmwvEuY8gJZjGGdvtGtMWMHQXz9ti6haDocP14oFakHEM9w+qt3yf8XR7jesM0BUdZ2DSCSW6ZeB5Iaqi46oqYR4TNzWS2tAkcDQYcX9/jz3R0NQxe9+j6Tr+5Oc/ZrlZsN8uMWSdNCyQVANFKQg9kV7fxOnJrNdLZLlGk8fkVYcktgxdlzxrqGoRy7HoKFgs76jqDkPXEVAQJR3LlonTDUczl90qYueVXL6+ZPW0oOsSzi8ueX99xWTq0MYu6+2Kk9Mpiqyz322xDJPIT3n15hXL1YqmLYlCH7URaUsVa2gwGs2o6oRKzFluKkRxw8idsl5vmU1PeVqsGM9FFMnFchvisCIPY4zeAEnq0EWdtBbppJKchLHropGy20p0msrZZzt2jxWp94auWyG0EgUyD08rXl68YBd7bHYr+rKKM9DIMombmycujk85OTrlYfHAarVlNpmS5AE4Bbo+RSkyhKrCGUzwE580zRkYIvuwJqXCmchsHgVsQWE2VVltS2x5zOPdR84vbGTLpjeaUqURVS5iGzqLPERQoaoaYi/h8uKc7e6JXq+HLFg0RUlZpvT6Fputj6qJtGWJqvXolAJ7KNG0CuQqFjmWdIJg7vDWW/47f/bf5/zygq++/j13q7c47pCbO48g9XCsSxBzovCO+fQ1ktAQJUtUaYSqV2iqy+JpR9wVjPsWAhn71RbXntEbGtSlhm3bxLmHqauYuk2vryFKUJcKfrBFbGW23p7JdICuq0ync46Pj3l6XAISNw+/o6sOJpuHh+gPd+LLly8xdZWnhwX9ocJwpDDq/4DXz17xt7/7lih44uWrSzZ+RuLtubp7i2aZHE9f4W09JEEmincoukAnp2jyOftwhWEqhEHGdHxBU5UoWk0cx3StjGUP6TqBqmqoyoZRf0gcpwx6/YPKUQJds2nbjjCJMU3z4NrOa2yrD111CICqfghBuoooQhyE6IZK00kHDJNt/yHwid+34SVJomkOpp6maTAMgzSND+Dz74tAdXe4a2VZRRBFqrL8fqQOingoVwmCQPd9ezzPUxRRIssS0iymkxMWqw9IskaRgyCWVGWGZY6xtD6y0mJYDWWZI0kaWQKK0bLer0ECWenoOSeodYPmqnz37Qf+/Kd/xj4Lubt94uz1BVefboiihLEzJItCFEVin/hIhsrcPqaqM9IsQOxkVMUmDCNcV6VuMqTWRFEFmjojTSrOT4+QGPHp0zXz+QRBKsmqCNd1KZoadI3jZyMePlyj5VCrOmUVUeYKTt8kzTyKEgbjEbtNSLHPODs7ZbNa8Cc//1MeV0uKuqLtStzeYee/LCq6VsF1BiRZiuvYtGVC19ZUTc1kcEnWLLj37jG1Y2zJRpYCyko4rJjYEvt1jGQkSIJDkjS8eHFC4C1x+kd0osfuLmE8Pebh/leMj85xrB6i3CIaKh8//JIkGnF+fE6U7hFEFVNJiKKG0bBPmlT0HZfP3pxzfX2FzJTpqcV2u+L+7i293gWD/pSqUlDMPaFfMJwMeXzYI0ggqzmBv2U8vMTzPNyBThQm9Poz3n37AXt+WOFwdYU4yGglDUMfIpYVvb5OkYeUZUqeapycnICYsN3432PVmgOWThBQ9YIgSKBxmR8N2O6WxGXNwJpTtTlZ1tB3R6TFnlqpGbgKlqiRBg5vPv+Mttuw3URkicizF3OizOf9uwV/589+wGJxz/IpZXY0oapjotBHE0fEVYSqQVkc9oZ122VgNaz9kDZXMVUN29GYTk6I0xBN0RGViPOj17z78CuGkzG7XYKsqZjGgOtPv6VnvsRyGuI4pk40mjzh5AcyJ8cq/+U/uELuGShCRlyauDbc3T8y6M8Z2DZ1faCuOI7FZ89+QLr3+XATMD0v2TwsMNUZx89N/nf/+T/4d+vyDpOSutY4P/kRo7H7PRdN5ub2Hcsnj/V6i0jDaKpguzFZHiJLGlkeYRojnKHK2cWAn/zJT8lih1dffEbVKnz7NqIVLaLQ5R/+w3/OX//Nv2LlL/j1V98QphInF+ccXer09AmXl0do0hDHbvnz/+AzZsM37LYhotyRxRqKIlFUe2zbxTLGSKhIgshu7dHUEAQeT0+f2G9XmKbL5nHHbrVFERskpWXYP0cWHRAOQXK1WhOGIYPBgLrKaaoWz9/RdSKt0FLUDft4g2xCqe9QhoCuEFcRimGy90KCMGc8t9GkAZqqc3ZpY9s2R7NXqFpHr29SpgKBv0USFYRG5Oh4gmHMsF0HZ9in6mQMU6OuOvI8J01jyuLQNvU8jyxtqJqQppCwzCHjkY0iGxSlD0JBmR8QGnleYtsug8EYWRSR6Xj31VdUaczzy5dYuk7T5iwWjzj9HnXb0nQ1vb5DI6VgHF4astUaS2xYZgtivWYTZGzDkIaK0/MZeZ7z/v17ojQGRcFwJyxWKUVVIMgRRZlQNw1+nJDVIqJuIGkqKFB1NUmRoxiH8X8Y7XAsF8WEWmgx+zJ1J7HNHxAMi21S0eoqs6Mpstph9lX64xFe7GMPDBo6agLcoUUQeOw2ezabLbY9RTVHxFnO/cOaRhJ4+exzzqcXjMY95s+G+EFAmZXUXYkgSyRZQVrETGcj/GjL/HhAkTds1h6Gdo575LAMajBUkrTB9zqiMqfUcoyBSEHKU7DDcmwuJjrr7yRuflPRlxYcjed4UcBic4/lyDwsP1FXOfP+FFUwSfwcoSuROoEoTNnvtyiqxHw+ochyNNGkyFvSIiaIUuKwI08lNssNVSjhLzuk1CbfVShig6O2nB47KCqYqkYlbbl84XB0ModWIMtDomqDO4TlfksndmRFg4CEZaooWktcpDw+bWkyAUXqKJuCWuiQRA3H7CNpGrpjIYgd+53IzW1Kg0AS1xSVz2L9yLMXXxBnKf+P/+ofEqUVo+FLVpsUp9+j17fQDFisH7l8/oYkqvn6u68p8oqqLuhamdVqRduVXJw9I88ypsPXWPY5+zhFtQyCxCPJC6q0oy1g0h/i7wKWS5+N5+HFG+LmltX2O3x/h2FY/PY3X/GP/tE/oMhbJLlmPnmO485QdZOzk6M/nCTcEwceqqQfJhXaKWka8+vf/4rp2EBWKx4el0Rexi7YUJQRQQRmX+fmbkUYCHStBE2ftjhitUhQBBNZhMloSFsdXvv2u8P9VdUpae5RVD5xukJRUqrWoyNENzuyYk9dpXjBkrrJcG2N5nuDSFXHVPXBM65oMlmRohsGumEBMr3hCEU2kGWZfr9/QB19r3xUFAXD0Oi65v/HsCMSRRFdJ5CmOfBvRuOCIFBWh5LPv0YCtdXBF/6vMTuyAHkSosodnZCCkCArGddXbwmCkKYuGE9N6rrF1Me4lk3b7QmDFEFS8IKU5cojTQrqJkdEQJcdJGVKGIb4Ucb9/Yb+0OF+/ch6syMs1ggFDHQHuehQEQ+A/jLDNXU0sSMK9wThFlnSmJ3N8MOQ8aRHlhYo8uHHSZkXSLJA37W+3wutubw8R1JqZoMB/X6PfZCQpxFCWvL49oY6aemPx+iyTV2DbgokcUFVyActcNYid0Omx86Br2r2uL9bYlkWTZOy3+8J/PTw4itLB+3lZomlaXR1S57nhEFKkTf40ROdoNLmJkXS0nYF6aEbQxAvgA7FrPH2JXmdEKQeD6s9knrE8u6JMjM5enZC2mYM+s/Z7hKKsuW7jyE31zG29hLLVEjLOzRNJYoEVNUlSLZkeU5TCwiSwdPDnv1+S5Q+cX3zHs/zMcwxpi3x7buvSIuSolKoWo2vv7mnFUC2axabgqozWa53tCisNksM00FTTfI2pkgNqDVWjwvAxM93JOUGsa0RpJJef44gGTiuxv3dA/ud/z1GT6GsUixziKo4lJVCVYuYjomiSkxnPfb7g/2qrCIUrSGKNoxGM5raxOkf0bQD7IHF/fKW2xsfWZARpIRf/+4rnjZ3DPsKm6cllqbj+WuqOmG3y9mHNV4SoakicRzRdCl+sKcRDne1YQ+4eHZ6kBWUHabRx7Udjk/GFJnAt98+oRsj8sDi+HjE7fUVghAgiR17/wZFtUCRCNpb7JnLp48x/+f/y9/Sn+lslylub8rAUgg8kc++fIasNwz7I1RJ5/F2TZWJfPj2ih//xX+L/8nf/wt+9OUP+MGXf47ZVwm96N8eCv8tf3/0DmVRQ5OWLJ62qEqJ6dQoeoGmjrF7IEl9omRFr2+wWiTIis3d7YqulZhMB4zcGR92b/nbX/7XIEiYK1BFm4Q9bSMiWCaudc4+zTFNgfF0jioauH1QtApBOELVRDTd4OPVO/w4ZeWv2Hg1Vk/F7qnsfZ/R4II0DvA9jyTM6Dl9Pnv1kg8fV9SVRVlWWOaArhKpqoY8qagsAdvtsdw8MJgMqWuR2WyE3rN49+E9mRxTSxpFV1HmCZICI1dD7hSG/TlZtuXu7Y7ZicLDQ4LtKmzie6q6wLZdagT84Jbj41N0vY8fRJh2TbQQGM8hyz1qQWQbPqGLLs2uoe3GiJqF1MoE3hJZbnD0MUGS4Domo+GA1SpEtWWqMsWyLeo2xTQGrHcPGJZEXpnIioHThxoRRdUxOoijArFuDwUV28BxLNI0xh3qqLLKPuxI8xQvDjEMjb2/ZhMtaTuNthUw7T5xNyQOn1CkFlt3EBWFNMvIy4zrb++RTRNFlVmu7nE0Bc0RyJuMy4s3JKHDbh/iDKeYloKX7OmqGkmQCfcRveGArA2I6wRbMenKimiXcn46RaQmTt4zfWYRBht0o0IUbBR5QJqmB0uQaKGaEkXZUuVgyh15XmLIOmEQ4LoOG39L0ZRswiX2sOXJ2/Lq/Ec8PL3DHErc7D4xm3xBm+TcLq948dmPWT4kbFYe/Z6A5ViUdc54PKXqZXz8+JE//fPnfPH6EcOS2MQNjRAzGOh0rYSemXzx5RF19RM2yxuG2kvcV+fMny2odxa6DaPpmOj2idn8lN3eY2o6lGFIbkCwzLGGNfP5MXGQIyoNiqjQNCk//smXfPO79wiijCRIxFGLqVeEUYZlDDGouTj7gqfHDXkrMEBDJCBNC2TTxFShUDukXMCyzlis3rN93DCYTImiiqBIMcwORRwg1AqaLLFcbZnMzwg3AYrcUOQRhtXDDwLGgylpWBAXJdagpctltk9PCJpCEuiMdQMv3FJrPVb7GKnu0xk6b+/e0xsohFmFRUvPnpCmCV88f0ae50haSX8EtmOSZxk95xm7zQ3DmQZZyfHoOcv1NaZjc/TihCLv0NQpuiJjWDrnJ5dsVmtu7q8YHg1Ii4aWDnc4pW9bgEDkS0xHM7xgwXqxpREDJpM5vaGNZSsY8uQPd+LD7RWzmcHnb77k17//PfswZGY6ZHXM17/8RKdIbPxHTubPiKuCz7/4GZsw4pu3v+Ho3KBvVez3JdPJnJvFey5eTcmihii9p2s6HMvFcqCsRAytj4DCerelaRqqIqetWvK0QpZU9nufumqZzx3+6q/+Ca/ffEm/77Jc3R92GKV/HQgFhoMRkiQQJ+Hh5bM7+LN1/YBh+teg8bY9YHiapsE0D8aXKEr+oFr81yPxOI6/t/polGVJXbeEUYRlOei6Tl1VyKJM2zbkeYqmK2x360ML1rTZre+I4h26rtHrDQlDgSKH/brgaHqJprQE/p4yFRjPhyiyzHT6jDjyD6IFScKxesRJQVKkXB4fsV7ckNU5qmiTbwMUXUCzbBb3jximRn9wCCKqbdIfTSmTHL1qSMuSk6PnVF1BklSYroQoKdRtx/npG+Lohjwz0EWTjho/XGGoPcoSulYmrlJ0qcURTTS3oyorHHmKX+/ZbCouLkckcYsiKUT5mvnREU1TUVfQ61d4XsPQFTGEhrTYs/70gNsb8PLyc1arNZpokoQplt1g6A6aCpv9Hbo64ui0TxDsyQuwHRtLGSBLBqKcAhZJ4aOikqYZZXUINattgiTL1A1sggWmWrBfLvHqFNEaYxkatWCgqiIae7wAzkanaEqf49GMm6ePlKVMWfXR1Dlx1PDi1YS20FguHzD1Y+L0CbFJiUNQZJuTk1PK9iPr/Qfm2jl3i1vqpiB4iCjwKBOJo6MBbd7Rnw+Q8gFlrBJ3S4a9MW1XE60bdCZUUo2uj8k9lZeXAk9bD0VuaGqVRhBw+xplUaPKY2TliTTJiQMPdwyyrDKfnaEoBjefAk5OLS4vvsCQJAQtYrHYMR0+PwTlMmS3s+iKgslkgqnqPHiPpKmEZQ7QDBnTHJFEAWldocmHzslisaAsBExDJc9SkmpMUvhoWsvF8ZdE1R5/n5KHFbWRYRgaVanz+PhI08bc3d/Q6+ug1NSdROCFJA10XcPHtwuEpkd/YLF8CnjaPpI3FaJQ8sMf/ozfff1IUZWMxhLhvqLrUuRGgGzE3fsn+sqGd9+uOP98wsp75Hh8xj/7R/8Vhjzg/umRH/7JawRRZLvK/t0HyjrI6ASVrEu4fdrRH1j0LAMvTLGcPlm7BlXj6naPqonIck6S5ZydH1GVJUEa04qgmxqjsY2mDKmqhvdvn3j1+oiLS3j4piXyFUxboC4Tjs/6+OGC9Y2AoPyWzWbHfPIMx+rhByvcgcVzYY4kd2zvI9JMIdw8Ier1Yb/z9ISmDMkqka7qMBSR0cShbAJ0Y0oSdswnMwYjiVaTORFmKJJMLlWgyQQ3O2xVQ7YV6lIkKQIkKUJCYGTPkaoxRRUQJzVHpyOaTqZnSOhWha04JJVKmu3QpT6DeZ9dElLud0TZnuKhoN+bcvewp+1qNEHC0UbUTUq/P2G1iJBqmThfM5r2iBOf92/vmZ0orNcxuhmR1xqSLiO6IoUqMNNHbNcRjSJiGwKK7JLnKQO3jyRq+OWaipxJf4YpQxTXWIaFpMtMXYMwyhAkDdN0yfKGvC3Ji4QmFFB1F1kt0BqRpDaJsh2WotCpBmmXoVYdjmvS1QJJ4HP5/A1x6FHUGopp07YxXWsidQ2r6gq7tAjCDl1SyaINQieiGiptpRK0KcQqjtRHbDJqEmRZZL14YngyZ372Eq1tiOsFdAO6WsCduKx2n3B0gd0mJhEyqlJhaKo4vR6a0xGuPV5cPiOJUxoVGqlj7ycIXYtZD/kn/+L/id0f0aw8Bn2Trk64urtmMDhiu3jAteeYncR05PK4f6SuJCQ6jF6Po6OGq99/pD+8BEnisx8+o2/bbJYbZEXg0/uGYK/z1//8nxF7Iv+L/+wLvn37gcerHV88O6cTMspySW/SEe582rigU/vskohWklAdA8c6Yb2JkC2ZVhYJ0y2irLMvEoy5RRaFaLaBMsoI/JQXz8d4UUOVFDyFD+huy2fHPVTTYLdJGTgCHSUXLz5nH6worZKqKfACH0EscFURXetj+zHT0ZDFx5CkSDm6sPjy7JLf/O6K3miO6mr0OWafxYh1j76lI2sZV19t+OzZG7woZDwYUzU+ChJRkVA3LdmmoMnvkLBou5hWTHhaVUgYtG1DFDSk+ZbZ/ILF/T3Hz0YUAYidgt7r2KcrBNfGD7egxszNF3Rqy6ZeYjcvKVKPqEk4085Z7VM2dkDctLS9Mav1jpPxnKCViXcyqq4hk2PrOUleMOrPMLUChAFxHCI2AvtNjK7/m5b37GzK3cMdUfo1YidTRhE3e59g49OmBtasI2klkixmE0QcHw14+HRPv2dTyBkfnjzmrkvBiroruflwT12vcftDBA46v7oWyZMaU655etgwm4/xE4+qbinrmq6WkFWdTx/ec3I65+b9FWNnSBll/LNf/47PPnuBM7CQFJnQX9FJNWm4Y9CfIUo1WZOgahZlq1PGBa4NadpimA5lFRBGAZY2Iq4b2jpG1CSy4lDO6dlD8qyCVkFUZLxwg6VZqJJMv2fRdgJFUVA1NY2okuYhStfRtSp541HjsfBkoqQkjhpCLyfLO159/hm3j+/QTRG7DyIiU+2E8ajHw/2Gu9sF/8nf/Xv847/6J3QCBGnNfr9HqEUETWG5XpFXHYPhiGCfILQCtqLRpRpR5pOIGmVTIlsqdVvQFB2LYIkiG0ztHpATBwkAg56J0BYMbJc8jqlrA8cqiOINjVijiAdTTM9yyOuYcCVw8tLk4SlkfDbl/dtbHHXE+Ngiy1t8v+L0/ILl+gl3oCPIGUKnc3SqEvoNwark/GTMzW2ALMbIZkpTjnH7CpEvYpguopzhhxUyMlkc4PR7NLlPFsgoncv5xTF+/ECY7DF7A+azMY+3TzSJSGO3RGWKKjr4kX/oFigSkb+glhQM08Ltm0SJSlnFxJmHpho8rtaUncjxvMfHd5+Yz4/5tP4WL8gY6kMUKSGOthyfPGO/LciTBT13TCt27KMGvVSJ9im7/RJJlkGuedw9UIsi46nDdpESxzmqYyD3ShRVpypSyjogbFoMPaKNOrZ+iKFIaJaA7op4QUOTtpimwKqUiEIR2xYxTJko3qLoLsu1R1PLOLbGYHTJ4ukOaoOy7Lhf3nH56hTVDAiDBqG2aMQC2zqh7+oYUp8saumZOmmwxVZ7dDkkSYAqqKRpzKg/YWCds/PuCTYetd0S+gFu38TzQvoDlzjKibMcxYkY9vqsNgvK6YYk3KObfRxBY7uLmB3rxPma4cik7x5x93TFahvRs3ss7u6QRY2zo2eUg4q4Eth5W7rIQpRj2hb2eUK+vMeLVqimgBdU5EWEBmh6TteZBNuIy7Mh202A7SoMTYedH/O73/weUSiRNAlJlym/WiE1DW3V/Fsz4b/t748eeYutia4olFlNsGuIPQHbOGWzivjFL/8pNzd3CI3BeDhFkVw0ZciPvvwhbSVTlAmyojEcjHCsY+gM7j6l/OoX3/H5T2Sev5zwi198x95PaToTWZdArlksN6TFFkVPsM0ecRhxd/+OrNiRZRl53jIYW8RpTJAUbB49RhOD6WyErSt4ZU1mNDx8uGMbR1SSToNFHMvkqYLlqkhuRq1WyFpK0auJioxl5rNvn8jdDNs1sI9HaGJA367RRRdbOEcoLfL0iSzJ6LsWs+MjmtphdmFQdjWSkZMWPpLQY7eO8MOA9XZLEEcIgoSmGbQczAqGYRCXGVmRMx72yMKY2chBkErs3oioKLGNIYosEnoCo9GUh9slXSkQ7zqKPCDcBdx9ekTisFxflAJBGOMHO+5un7h/fECoFNJ9zGazJK3kQ6mjKVivtiz3W5q6pMz2CEaJl3l0pYDcaIiKSBa2eMuOutGJ/YDZ3MF0LarcIgpa+r0xUVAS+zkn8zNCLybPGnStT17UdJWA1fX4zTeP5JsaVZORhhJW38RSLChldLmHYpTUeYYoZNRiiSRZFLmOYJqIjg4KGIZBGFdo4hC9sTBkg6ZZsPcT/KIhrDyU0qKvODRNw9M+YLPZoaoqlmUgySAJLXkaY6gGUiPRGwiMphKWVSB2Lk1msFzeYJqH5rVUO0yGHdNTlaUX0Ql90CSi3CNLa4LIx+mNaBuFx4cVi8UTQRDw9BAReiY/+PKcr779BSId416frA7Y+O85vRiiDcf4sY/QNnRRjdBENGJKo0ZIiszm0aP/fYHp2YXF5cWQ7WJFvgexULn65i3edodYGKSbDElwkXWLWMhpaoUyAy9PEDWLvOjQZQmJDIQC5IZ9sqeUWtbBhlpJGJ7qtKpEKdZobsfwyCDLOsyexPzMZTKZYJkOQ2dAm6Ysbt6jGz3asqAon6irlr76jL415tPjLe5YRFQ76lahJqLuWgajAYZTk+YZu+ATdSvhBxGKKhJHKUVWkBV7prMhsigxPT1msQnQxClVobFf7/HDJ3SzwU/2hFlAXRXUzQEqvl2GePs1opqz3GzYRhs6reL9p29puxpV1fH9kCyKCfc78jqgqjtWSx/XOkJUbL79tOPbj090yCiSircNGLmjP5zH6xVlfGgJv/3wiaLr8OKG4fSMZ1+8QnUdGgm8JKI/GfC4XoBSUIstT08hPUcjTRr8VYreOPz481fMZz9EpM9k7JLne4SmxjUtYj9GEkpERaZqOhzH4c3nLzk6HmNYCj//+c+x3cFhb3JgMD8f8fLNBWVT43sxXdNS1ylZ5ONv1zzcXOHtd4Q7j/uba57u3hMGtyzWK5abT0TZE59uPzIcjWmEirTcY/UMREGgzCsM3SLOAqouQjVbtvsNbatR1hKdVNI0ApIg09Q5dZGTRSs0gUNjf3tD2+7JYo+mbKiKBts0sF2Jy+endG3K0XyCaYoYap+2VPGCO/72F3+NVotoyPyjv/or7p7uiJuCUmzQbA1dE5j2xti6hmEIZHGDpttsvT1VYyCqORfHz3FrC6EuacKCLhcRZYMvT35MXzSpc5U0zuk7I0b9KXfXd1R5cdgJFgKOhyf0Budo5jmqZDMem4zP5mSCyPRoiDaWaCWd88s+vp/y7NULnHnHZHZEmSeouo2itsT5lrys+ebdFVnVsfM6bh+XjI8GxFlLUpSIqkGDhd03CJKEfRgR1zsaxUS2euiOgSCWBLstqtwjyUpUTeDpboMs9lAVgSgMuHq/oqktVEtju9rS5DlNXTB0bY6P5uiaQp6kB4GIpfLwsCTJ9pRVRtulCHLC06OHro4J9h3DkUsnhLSNQJandFJFGJZopkyedQhqw+Punk+3j4gIzPszaBR6I4cvPn9OR0metHSlyW7n4+0jVFVnOtcoioI6L0kCH7ff52n1gC7K6IKEt/fp2ybjkY0mWwiVRZknKGpJEiaUiULX1uz324MbXtbIs5rj41MURSPNSoqi5OWrC3q9EXV54M4WRYUiu8RJSNGEKGZ9sHdVLUFwz/nxCel+j1BIdK3I3d0dimJgWSa6ZbDzdwctcQ6qMqUTBAS5wR1Y1HVBHBRoco9hf4IgKGR5jWkO8TyfMldpa4ssj+kak9HQRVJg48UsNyF5KpAEOQ/3G2RDw+z32e0bNvsERAHFcFjtH/h0/568rTEEia7KiYMQuTt8fyf7BEFIydOKWqiwBj32ZcpTsibXSj4tF+SVjKaOMPtDnPGUTnLZeSp+oiEa1v/ffPjfOFAeHfVQFYXl3RbbAFPXeXq8x+6HvHj+jMvL5zw8HHAeg6FNXgTsgxVZFuN5O1abG6KwYLP7RFUKjOci/73/wU9IQ/jl3/6Ktu6RFiVB9ohhK8znM86eXSDLE16/+HMUueazF58x6k1pcoXBYMTN9RO/++pXqLLLT//yOec/HvHmyxdMZjZFr8Z8HqCOEqKuoj9RELqaPA7oGxa5X1PFCpoyII5bwl2BkB8+sIoh4wVr0tCjkRRWt0u6JkTBoIxFeq5NEq85eeay2cQEUc314zv87J5PNzvysuH6doeq9fD8LUKXUzUHv62u67SNSJwUiOJhyb2qKpyeS5T7hGlK0zXskyWS1FEUBbbRIywzND1nbCl0ZYio6+gjG9XaMZIN+sYIx3IR5YLJtE8YNvj7GsuyQWxoahGxa2jShhaIyxxB6ZiO57g9GxBpKhnTGZPkGWUd0DQltm59HxALRiMDzR6hyOBvF2RVgyinDN0Je28NbYlpaAxdG5kOWRJwrcPou983acWOzy8vmY8HOJd96sLn6ekT05M5s/mAMNgSZQGO2mfkjojyEkGEuoMuBce1yNI9friiUwSMoUMlF/jVkvVmz7PzIxbrHZPJnM/O3hDvQvJKJqk8yjpFEDt830eWVeqypef0KeL8UCigIIlk2kbBMARkRaAoRLarkl7PRZMgC1Oq0kRz+jhjmU7s2Ec1ZbGj7Ryq+tDuE1sNf91yfxWhyCZBsuHT9VsMq8fJ0TF/+R//GQ+PnxArhTfPPuf1D1+QbDVsbU7b6pimjlRLiO1B/3b2ak6R12zXOx6f7snSGFWRGfRdijxG6CqOpyO6okVsBDpB52h6QU3HarEErUHSVLImwx3KNHLEizfPaVoFRXFouhJROqBgtts1eRown0wJvQ339x8Q2wp/HfHs/AhZbLn+cMtmtcW2LORGRGwFsjJBl0XqUiXNM8o0Ophb9AxZMYiSGF1zUQ2DJC3xw4TTi2eoikHTNERphCy6KJLJbD4m+R5sXSQ1utaRNBnIAl3TUCOhigZxlrLZbNCtKUWekNYZfXdOlVS4AxHLMZGwcFydwWmfb2++IesywiRCMx0k9dCeRTpgYkpUgiLnw4crbj+8Y94/4nh0goHBepGjqTYf3l/94ez2G/xwx9dv32HYUwRtxHQ8wun3cIY6ZV2TxDGCaPN0syaLSnpGj/XKx9J6lFlOmhVcPHvN+fkzlt6GVi7x44A4zfCjLVHmo2k9Hp/WqJrD1f135EVM1ypcf3jE92LKtuTd1Vvef3xLWMYIKtwsPtHIhxfIOPNZPN7RVBmO6TAauhwfD6HOSEMf6gJDAUvv8LaHNZL7+98R+Bu8/Y443pKmW8JoQxgk0B5c8CIShqGRFwmGoaCoHZ1YkWYlaXzYodQVGamFIvXZb94TxO/YbW8I1iWmckxXVxhGhiAWTGZTqsY/KBJrgcSrCLYBT483iCgMh0Mi2aNSIwQl4eRkwHjosnh6xJA1bM1BrUCtG5qopc4TmsLn7GjO6XRKsMopa4HjoyFvnv2A48GUkTkm3UK6DTntnzEeOkwGR4z6NrNxn9Ppc4b9E3RdJS8q9smCKNrRc1UauWYTeWx3O8o2J2hLMhq2XsrTckFRqmy8lLgOeP/+gWFvzur2AX+3ZmBZKJ3FrHdKsA2JdxWaYCGIKrt9yvzsBD/ckdcVcZEgqQPOLp9TJRneyscwNASxpCxzFH1AUAossy2rMKaUSlA6HHuEo/UQq5rc32EJJqezc04ml5Rxy3g4RugkEq+iziSyMGcfLlDNDlnSQJDpWpVPHxeo6kFp+bR4gNbBC0Lu7q8RBAHdEim7EssaEWd7wixDs4copk4cegz7AyREJFlH1w+FM011UVUdVdGJo4woTvHCFcPRlJeXlxRxC0JBkbWk0ZbF/ZK6qfjy1edMhwPiKCLwYmRRQpd0yrilTAt0U0fXdTTVQEAhihLyoiArqu/3Kk0UUaYuD4rXXq+HrtkoqoGsqmiyiSio3N88YesDRiONLI54cXF26B1IEj13QBTUJHGDLLqIgkoYH3Zcvdjn7PKC0fA5i4eEyfgYEFBVGboCWalp25o8L+kalaZSkSUNRRUYzWWazMKxjtl4Pm2noGsNeVpgGTqqYrK63+FtPNROY7PYIlZ8v3Y2BqCMS4q8oukURFFFbg2OJ6e0TUUYtWz3HldXVwhVgy1rVGGF9xRR5RWaWRD6NUVe07Q5d4+fWGy2vLt+/HcfKKMow9vtmR8r/OSHP6JvDg4Ih72E2zNJq5TL1xcIksNvf/cO3Rhxc+OxDTbojsFm53F9+4AizqDuAznv392w39SMx2MUw8ceWFy8nKPIKrJkEfoVQjPmt1/9Dav7gjgKDoBQd8r2QcFUjjg7OUczM6YvFdxhj7vrkG3o0+otZZVBI+Ie95mNThlORMbzBqcHqt6SFgvWq4/omkh/5uD2FCSjoUwyTMnhB5cvGdo2ciLy8tUXTMbHDKcGnRSy2nlc34cMjvuoPZk4l9BslSQNSFOfIk+pm5xef0pZGSia/j0Q2AcOmKPxcIIkgmWYZEnK7KSHnzQ0mkLWFZStRN80CJZ7glVMVQwQrI5a6pgMbJo4QW5cGjSSeIfpgqzWFEWGJClMhmdoQp/RYE5WRNRlieUMaZWGIN6ShjmLO58w2mBpCtbQZDg/RmwVxFomiFI8vyRYFphmQ9sUZKWPgkbsaRS1hG2rSHJMWdYUZYrvbQi8EKETMVQFUWoRBQlT0+nNXDpyarXjcbmjSiQMuc/7j9dkZYDtCkiSyX4ZsLjb0hvOSdI9iiihOCoZBZp9MH1kVc1uGyJUErrskOQ1qecxUabE65Cnx7dEaYpsSCS7HAGTvGwRZIHV9pE0C0jCCFUySeOOtnCgUpAFnaKoWG7WmD2bl28uWW23KKZEz7XZbnyaLuDtb7ZMBlOOjy5pS5Nhf0aRN8RRztP9mjwCVx+ThgWbzYa2lFCwKcqGh+WCYF9wPHzN9jHi+tsPbHceg8mUpuvwvQzL7PN055HEJZ0g0FARRhFBkPHh0yOiYrLe71BMFWtoc3d/jaRWzE/HqLqEVHYIVcd0aqMOazTFp2syVqsAL4Lr2y1ZUVMWGa4u0tUNVQGKaBDuG4okQhEUbNVBbAyOen3SsKCMSmbDKWXV0rQtURIiqzZiV2EZM0ajAWnhg9bhDi3qEjabDafHJyAUtKWIrIggSCShgKIbLJ4OZYt+f8jT4z3LxY5ef8zx6RzVMpA1E1nsc/fwQNX4VHJBlCaIqo5t2CRpipwrfPdwS9sVHBl9BByyqiTdr9F0h+V+zWq3RjEUBEEkLUsEzUCSBMIsQK4ldNdlW0S0mowit1xdfYdkdHx8uGYfh2zDLYu9/4eTtTKFILCLU4Ki4n55z9Pa49P9iq8/vOXqbont9FFtG1WX2WwfCPYNqn6waKxXPsO5y7fvvmW5uWEbBixWTwj6hrQsyEqBvOrYhCHWWKRVSkxHJkoiVM0kK1q8IGDv7/DDHbqtcnI2J69LEBR23h7EiqKJMB0dx+7RNhKWZYFQMJqZ6I7I55+/odfrYZsWs2EfR5HoGzoj10CoU4QmxtI76iKk3ysxzZgsW+Dvl6wfd2RRx34bUBUlaRKwXaxoW5+2rJAEHV0vKcsARZJRRRPdkPns1XOyoCTc5jS1jOu6RPGa4WCKKhkIrYEpj2mrkmHfpswlppNjYr+giBtmwznD3pSulXj24g1Z04CqUkklWQPuaIZkaqSVgGYODjB+9xTkmCd/x/LpAUPvUaYNI01jdnxKHAWoisR06qCLBm0O5yfHOJZLFNZomsbWL5AVjTBbkbctgjhkNJogGzV+UiC0DVHk4YcRZZNSVhlpmDGaDlA0mcFYoigjkjRkOOqTZhE9tyEMl7RVwvXtDV6wZ7PeI8gdkqSQ5g1hmJNUPrtsy+jIRRIh8vZQSRSxTJ21DO0+JQmSrvL1N+8IozWyUCK1oCsy2+0NeZqxXm5J4hJVUmmrGkXQsU0Hy1JoapGqzlmuV2y3W8IgwzanB7yTAtPx5FDUdOY0Rct07LDb++yDnNXuAbQEUXDIEhClCj8J8KKMk7NLvCBgtdqSJyI9x0UgpueaKIqGrKmomossy6RFjmaYJEmCJElMZhNs28YyLDaLNfe3C9qqxdAlLNXk/e9vmIym2JZMXQlIMjRdTN3kdK2It0+wHBtJrvGDNZtdQttJdEKDaQ9AUgiiAKc/ObjIW43JeIYsSWRZR5zF7P2IKEnIEp8kTNjtHtBk+TDlslqS1ENRFI7nM9aLLVcfnsgSyFIYjedIKtiuSZqoKLKBaXdYVg9FbdGtnH5vTN3k7Pd7JLEl3lV4mz1JJHJ6doSEQrBfUBciZR4jUNAVGV0pYSkDYh/yLKGoKyRTAw2iMiJO9nRtTVlB2TVUZYrQZhiyiNGpGJ2BrThQSURejKHIyLVAk1UMLIs6q2ly7Y8OlH/0DqWmC8yOe0xnfb763W8IvJrBYMBwOGYyOSVpl9wvHgjDlOnpkMfNPUleYpgyO6/g2fmIxG1RJYmi9DBkDaGVePXZCADb0tltQwynpmtE/G2EYojcPdySJykKMs4wJIokDEskDkSiICBJVMYzmW9+fY3e6kT1ljToMFWJKjcIGw9N1Hhc7xjNVdpGoG1LJmcDRA9EqUPTW7ybR/aqgKsNkHsKkxOHQgxx2oZyouFvc6IgoN/vIyBx+eoYy3F5Wt1RpSM0Vyb2QwaOQ10ZiFJG4OdMX4zRnSmfPl3R6/XQZIW6qHl58QKq5nDp6gZW3/4e01Oz3Hp0XYlYhNRGfTXNZQAAzmhJREFUiiKIjIYOXhix8RsG9jlxEOPaLl7gUbR7LHtI2XSUmUzexshqzmxkEwYxZSNw+eyUXt9mn+yJi5po3aHYLeOhSlSCqY/xM4+P1x+hlhFbhcuzOcuHRwyrYtSffK9nS5lNNZplRRwUTNxjlo+3CAK4ro3YHprm/sOWyZGBrKiUBci6iNRU3Gc7/Djly94zcschbyouj48Js5i2zRiPx2RBQM8YEtUR/d4ETRkS5AtKoUJXNXY771D4MGwaRNI8o9+3qLYOf/F3xmx9j5tPGwbDU/abJWOnh+EYUHesNx5BUDGfjwjDkKZpODqeIIsWnZOzXa9QDR1ZNxBUCJsFXpARFhXvvnuPpg2YdFOen6hQVqyelvSNEU8PD4hkTCYjBGnKz3/yc959/ZEyEykymaRtEM2cpC7ZXi8YTQw+bRbITXgARjsdnz49IHUSdDJZ7ZELJa9evCTYBiy9BbZtIak6imbj+QltKyMIBo+PN3RSx+mox+rujsFgxPnRjOvtAkXtCMqKKpIxHYuuExComc2HRPEOVZLRFZM02KDqGmVdYVkTstpH7EwcQUfRLHx/hRcqzI7GvHh5yvt313SNTM/RmR8fUSQb7lcZHTWGJrBN91Rdjlg3uO6QYOshCTVdLXByOkVA5uFmy/llj7JK2C5X/OlPfkYcrpiOL0jKkPvFCk11uLr5iuPjYy4uptgGnD0/Yf9ks9guccwBUVKCUDJ0XShLLHNMKziouBh2Q1TsyIIEU1Q5Gs2gFGnbFt/36ZqW3sChrUp2m0cUw+RxE6GrDbUj89effk0jgpyLjMdT8iT8w53o1SFlWeEMhyx3T1ycnRDlEY5hcb9+Qrf7ZHXFd/c3CG2O7pg0okTW5FC19Hou19dXjOwpiqhSZwVS1zEcpuyXV5we/4TRaMDHT1dIuoThWJRRwXTSQ9c1NB2aTsAwNY7OjtltI66vbzk5OWO39xEFmbIs8X0fXVHpnw6pmoxKqCiLlDLOaDuBve8jijK7IGQ2GBPHAUlYIUoKklyz9/dEcUu/3yfPc7K0QtccbNcizVqCYHN44U1j2jrHNQbUjUfRRGQ7mbLM2HshaRxxfv6Gvj1HV1yq6p7JzEWQW5arPa4zZLcpCKN7qCVevHiBKLSEkYht5RRFxnw4JK01vnv/Db3BjP7RlM1yw9FoiJIL3C1uUdUeF/M+5VbFGduIwhZFyjg57fCXDWktokg2uzBmfjRm7Ji8u/1II5Xs9hFlLnFxdoIk13z3/gO73YbB2MEPS6q64TGIQCkYu0cUGfhJRJSlWEqPQa9HpTekaY4kCXibABGZnR6QJR7roGI0nNCKInWyRdINSqFl73u41hTDaqnygLzoME0FEYc0SXlq3qJqBXXT8entR8bOBZrqEFc+qtKgmy2WYbHdlSR7H02UqbIEW3MYTkekZchqE3A67qEKCpv9I0EcIUkSX/zoBX6wx098ilJCVQYo0posbWlFAc0RyXMRXVfJ0oogfGI6nTPsT3i4XSKKLobTYtoSYRijNB5n8xHXN2+RJZPTsz5REWA7OlJjYDo6itKiqCJllWEaNnkRs9/tMQsBFJGebRPuanoDhbvHHarYIucqoiMitho91yYvPZoO3rx+RV1lRFGLbhsEXgwYlGV5KCfNLSxL5fFxSyN2BN6eo+mE8XTO1adbBqPxAXAvHEgcTSuRZhuSKGUynDObuuSZwPHxMV2bUiQCph2w2z3RSSVVB5Zt0pQCtj4iznc8fzmjbjLev33EsibohsZm7fP5529YbT6R5gk3n1ZMJ0c8PN4zHo8RBY2HzRXHwgRNSCiClNmFSRgWaJJImQi0Wsg+NLh8fkEpq4RRRl3vifICAQvV6lA1jU44sE6nM4PYC3DUCVW7Jw50RMdB0seswwWimqHICUKn09Qygb/m9PwMTbComxZdk7Ct3h8dKP/oF8qqFXB6NqZ+wX/xX/xv+V/+Z3+f12/e8Hf/hz9htX1gH98TFwHnl1MWyzWSqFOWKd7epxNybq/XhH5NnK6QlQZZbTh7LtC0h4si9BLaTmS1TvnNb75htdrw+999xPMSLLPPdltTFBbPX53z+LjFGRT84IfP+PGPzxgOFXa3OU/BNdqwRBeBVEIWGmRJwLEHvPlySotPVReMR6fMxhNMrcM1De4+romlFtG0yeSC8+dDPrz7jmRVs13k3H17hyqY9G0bS9OwNJ35xCVPKsROYjy1CMOELEnpOwNURWE6neL2nQNayN8zGk3JkgTbMnEs+4Ab6BoEgQMrzlC5u9tSFAGz4ZSRM8GyD2OWTqlYb3fI2g65ga57QDNqOjlEMQKyCLJij6w39IYWlmWR5gm3N3fUlYjbn2GaJl68Jy5iBEHG1GXGgyHT6ZSB61AJCapc4ZoGLTL90ZCuTHnz/AXPX77AcmDQ0zAVizBIcAculiajKhXDXp9OOGBGBAmKImM6H4HI9+xOA91QqTpQBAnFOBh4hrbAsKcTJD6d2LF42iM1DUfHY6IsoK4j0rzh0+1HmkRmZp4SLivCXY2uuAyGU8azUwaDE0b2iNkZvPz8OSezH/M/+5//Xf70Zy8QcgdDHxL5FUXaIEkS/YFFUcZYtsFwOCbNGvxgQ5FVOGYPTW+ZznqkaUsUNTi9Caar0QgWrz6b4ZgVYi3zzS/3uEYfXddpqhJRFFmuN2y3Hr///e9p64rB0D5gqPKWNCvYJXuytmS5CdkkFcsiQRkY9M1z0lTFMHuHgogiYhk2cZwSRhG2ZjFwx4zcOVVe0XMNXr284OH+jq5W6E+PKKoWpVHxdxtWwRNR4OOFGZZ4RF521FXB6dGYKig4HV5SxECnEucFtx/vKLMQVW0xdIXZaIwq6myWO5oyp5Ia+vMRolLz26/+JWEYoisq5ydjyiJku92SVhv2uwBDHZBnNXQdNCmJH2NILn/nx/8Rx8enbDceQRCgaCJ394/olszrH7zk22/e4thjrq4/4kd7kqLFT1Nmp1OERuZs9JLQb7m+fWDlbxDEkrhYcHLm0BvPeTk5oc4gUxq24bcIXUwYw2KVUAUprmwTrnyezc94uL2haSpsXaMBHjZbdss1ZZAdrDAt3G82tJ3B2fQzZEVDUh2SPP7Dias9SCKIApLaUtYdjZCzC3coukGW5+RxgahITIbH6JrLLrwhS7eIoo2kSjS1hG4aJGVHVpeoYkLudwhthVDF5MmWNy8vKBODQW9EUVSoqkpepFiuQydDLXRESch6t0BAoa5rurZEliRENE5ml3hexPXNB7LKY7NZHJrdqsXeT/nuw0dW2xVxFtCJ0CBy8eIVk5MxG39NK3bsIo/fv/+K3W6DKIrc39+y2lwjSCtMy8M0Q3Q1R8Kgqnc0TUAQPLDc3bAPNxRNzGZXEwYFu33M/eMNdZOy2caEYXuwR7UJSbrHtofMT2ZUdUpZZ7Qk7MMVSVKAolB1cHb+jNloThM0uJ2LWtsoncXJeM6bl6cEG48uzVDY8+VnY54d2zTZE0Mt4uVUohByzi7nZFXB3cYn9DIU/QizZ6CaLVe3N7y/vsJ0ZKyei+eXyIqBOxJohYAsLViu9xT1Cl1SkFqXho40KthtUyRZJ8trjo8umM9OaWsFZAnEGlkTSLKa6093iLhUqchf/MXPGI1bhs6A0WDIy+cndGVLldYockdvINGWEiN7zMQdIRYFlmp+/2NdJatzPl5/e3h51Fxk1UFWD2pMbxMitTKWarHbxcRZxHg2ouWgYf3mu69Zrh8oygPb1NuFjIanPLt4QdvWqKrMcNDn7bdXNHVO1ZRsN3umgzl52KLrOnG6Q8FEU00gpcoLjqcv0E0NUWvZ7X0QFERZQDFa6rrlxbMvUCUVWRRpK+j1TVRZQhA6FssNR0cn5HnJarsBSebZs2csn/aYjglCjiAIWEqfcb8PXYamqTw93WFYKm3b0u8PGA77bHdPfLp+i2WZ+EFMf2xjOTZRWFKUGbZtstsGrNZLYlIeoy2VUqOYArsgpKwjiiSgLA5rYFESM5wMaRE5Pj1nOJhAazCajlDUlrJKKIqIqkkRJSiLhjgqyAuVJFtyenqOoVvMT6ATAwb9KS0CcSLw8//gPyROM06PT5jNJEI/YHx0ULGKrUSadYznQ7b+ArdnYRgKsmSiaSBJEQoKV+9uqJOGgdlnt9yRxRW/+9VHdMni5NkpZk8nyRMk2STNSlokkrpBdmwG0zOyOiGpPPIyIm8youKPN+X80YFyv5d5//GG33/7e/7RX/0r2i7j+HTAw6NHq/po0oCBc8R+u+dPf/JTXj9/yfOzV1jKAFsZH6CiZUwaSzSlwd3Nhv0u5buvF3i7nP2mwHR0ZKNBVU2++fo7isbD6fW5/hRgDBvc0RG//PoXjCfPmJ6abLwnOiHD1OacTI95/uon3H8KsHSFTjQoqo6uVknbitX2Hsc4YmheYCCxfnhi87QjiwrqqkCoLZQmwxjKfHz/gecXn2EKNopo8aOT12hmRt22bDYFlj0mikuCcEeHxMPqiiJOeXbyhqKq6LQcpaciWQ1h0KHpkCYleZ6jmxqGqZHkCV4YcHJygu+FJPsER+nTc1WyPCAvK8omIyszkFrGfYWXz36ELKv0exaudrCcmFqfsm2gUUiTGkmAqkjpuWPGszmDaZ9tuoWuRhJN4qCCssJ2GrykZucXlNWOLG3Yr3KW9wmu1aerEzS5Y7teEicBqqqiqDKzkc5wck5WZHz+YsR+E9M0EaOJxfRojqKqZGVA2cTouk4Y+MhqRZMJRGmHmTtMjBm5LNMTDcogR1d7iFLDbHZEGcaIksrsZIqmaOyjLfbYoLUrlvs7ii7B6ZuM3R5i2RDtAqxSxLZt7LnMh/sHguqRx9WaYF3wP/0f/yVlnkGZ0rctZFFltdjxeL9BRMDz10ymNqPRCNfp03YFVVVRFw2qInE2P+N0ek5TaczO+0RRx2bt4cUhr3845vn5S+I0YDQxaTuBtrFpOoNWatknT2SlR91WWJZBJoIgSwxNizaticOMJEloupI2adEkleVqQxCFpBE0ucJ2lZIUNWbPIq8bnpYeedlSlBlZ4dPrGxzPJziaQVMcvLl+kLIKQlzdZLtLCDcB89GEYa9P8j325frmI0fHY5pK5uG+QGxBRmTzuEeioEg7Pnv5jKP5nCLL6A3GtFKHFwY83ftomkZNTtvWPC3WFI2Aa/d4+XLMZDAg36ZYgo6lTmjKiOP5Oednr6ABzwvIqpq8qSiajk4SKboOP92AIFC3HYZl4/RMJK04oLvKjOPZBZpqcTIZc/XpI0US44fJwVveFxmOHO6Wj3h+yX7f0mgF++qG1WbJZD7l9OIc3bB4+/ZbkiKhECrypuJp47OJMgRUqqRi1NcQ2pLLoxlKAtnCw9BlNotHMj//wzlxzyAR2S92mLpBmKxRBJPQy3HtIeOegyPLWHqDIUlsFit0TWLkOCiigiCqGLqOqAjs0j2y7FA2KqunHmI7w7J1kmzP9c17kjRgs39kMppSlBnr9ZKszKialqTIubr9RNsVtEJBXiYIHXRlSx7lSIKCo7mkUUoaRzTlQfuWJuWBJdm0yKJIHEZstzvW6xXv3n9NkkQH13cnUdYVaR4RhBV1o+D2jkjjg8+8yhVurnZcX61R9QZZqdBUA9000fSWIAoZDGf8nT//Ae5YxDA0oiRFtXRUzQBRQjVF0ixgONZIkgLTtlmsPTpRwouXSKJG28DmYU0Rp/QtE/KEngyO0RLmS0ojRrcNvNij7lQ0y0V1Bry/TflwDxj/Prdb+OZDBILA3/zNL4gTH9SY4+dTcnGDrIl4UYxstqBmrPd7hqMZpm1gmS5VaaEzwRCnOM4MUWqRGxVNMFDMFrUn0QglZRVT1BVJFTE9dRjPXKaTCTp9Ej/meHLCq+dHhwlRkPD1b65I9i1t3ZJFFYoo8ez8OaqWMBwbRH6NIuvsohWypZKIK0oSRCSoQKlGHI3PqQUd0Shp5ZTR+JSmFBgPTD5/foyY6yiySRwVgIQggOM4HJ++pMhU/H1FW+k4jkNbNfRcmT/56WvmkyPyJOVofMHR0RF5EdE1HWKlo4g1SbxHbnusFmtO51OapmIfhIxHcxRZxN89kRcxsmJS1Dnb3YqmbWkbEX8X41gup8cn2KYNtPQdg2n/mKbNqEuFZ+dHmKrF3f0NRV2jaBJhFCAikEQe+80WXTNIiwjdHNAIJZ0UEYRbtpuIQe8UXbNpyOhaAVkQ8X2fsmhwLJfA3/H6xXPGvSECImdnZ9ApqLIDask+9NlFG+5W99w9LVnvPX7xqys265JPVx5haKJoY4ajCfePH3H6OmFY8/VXVwcrlQSKLDCf9lD1iPvbO15/9iWO45AXEXXb4vRc9uE9y/Ujbn9O0ZakWcPZyQlSJ/Mf/Xd/zvzZCHdkoToNztAliFKSYocoSui6RpbEdF3H2dERumSwW4do2gDN7DM+HmJZA2TRpK06lg+PLK/vMESLYJuDCJvgiqj0KCiRVAVEHdBp+KOY5v/NAmVSLRgdOez9B/7mN/8fvnv/L9hFN1zfvefm04IoCHEtiYE95P76E6G/J/ETjqY9qCvSNMUyDM5OL1hvnqgrCV2ZYBljTFtgdNKimgKNkNN2Bkcnz5ieOTRCzPyipWhzvn73W/7iv/0TdBd++6sloQfrdXZA44z7PP3+gYnlIisufXOI3tkIYk5ZJIRVTCW36K5IS4qiKLx+/YpGynj+xUt+/PqcyXMd21RJM4u6VkGvMJqMuk3w44ZOUWnkkKjcstrkWEMT2TBY72Jcw2G/32LZI0RD4Gb9NWVtoegNvucdWu7jMZqmEEUR0+lBv/a4eCKMI5RCos1LhK6lIMbPg8NTuqvRVi21oLBbLRgqNrVn4QUhraRiWg6u3EdXZaaTM+LoMOJCEtFNFS9eE2QZbafStRKmBl0hUOYShVDSqAGOMYRSQ1Qc5scnNFmG0irUlYGsDon2PpY5ZDAYsH0MEQUV3dQYOxckUQtSjCRJeLsUWXJQNBlVlZFEnSSuqcuCri3pwpjJ6Qinhr5p87TfMJqMcXWTtm3pOTZn83P8KCSrfPKso9NqOkWgyGrUTkYSRASxoaky1psnTFtBNzv2yT1ZbXL/tOf91bdc3X/H2rvl//5/+8ecn4lQCwgdrJ4eOTuZc3E2p+0qppMRy+USzarRTJnJtM/x7ARDs6mrkDzZ8nD7W8oiBbEjq5eoyojnX8wRFY2qDhGxWG8CVENCtQvOX02I04Q0bVlvI3rDEWme8OStEVDRM5PzwTEjU+DVbEy6bdENBbFuaSKFl89e4ScJrZGTVXt6DoRVSNT4tEbF+cvnyPqAolIxTBtBqdC7Ck2ViQsfVJWNVyKXMvOjE2S9QChFJoMxnu8jyBb7sEKUNaomxjJq5qdzTGeA5cyxhha90Sm7ZMn9aoGfVQi1yH61oSt1xoNzDMumk0oeN1viSqFTJLq6QahthDanr2YUUUJWNAx7x+TZjn/2L/7f3D5+xB0aSGqHIAnIhkKYJgcnuGUTxSWKIhClPtudRxj6nJ5NGI3O+Kf/+F8xGQ4Qm5QXL4/5yQ9+jGE77IIIV57wr775BcMzC0vPQIypdQt3fMqXP7zk4s1z9L5Fo0qYfRfJVjEGJmvfp1FUTF3GNG2qRuZxt0YzDNIkpKpTsHP8XUkabPny8vUfjiu6GEIBZcV2neEFPkESkrUZG29HlHnf+8599l7KcHxKWmiUNZRNjqYPMFWBNo/Iki3LTzvulylWf4rW63H9+MRmWxMkBc5QIUhybGuEJGoIYkNa+qRViBfsEUQJxx7SH1sIQofjDJhNjjiaTuiqgkG/z/H0GV3Voik6aRix3SwY9AyGfYtBr4/UKaRhShTuEam4+vieYW9MkVX0rB5np8es/QVpuWcXPHJyMafX66FqEkenNq9e91H1mrz2eHy6Zb2o6fePOT2bk6UFkhqBGNIgcHb+kqPTKbrTIms5lq2jqyPiqGI6t/D2CUXeUlYNPecZs+kp09mYi7M5vb5NVKQUbcnR0RGmrHPcG9OEBUWesF0GyHKBqitkmcbGT6lkg7BuUF/8gNGXr5nNjukPTKo6Q0bjw8MNZd0QRx11J1K0JffLB+IiJIh8+sMefrhA6SSaTgCpwJChyhrunzZIukyRhXhFyS7c4/ZmdJ3GJvD56t03XN28Z+gc8e//h5+h2iV5UaMYNm+vvqWVWhRTplEqstynawT8fY5jzUHes9ttaIoepi1iigZSM6DXm6DoBrPjPoIWkVQFrQmnxz1UWSRLIuo2YDp3GI+n3N4vGIxdmjZnNJqhKAqyUVBUHo49xNAG2LZJEK6ZzWYgFKwXAf/sH/+O85NLJEFBlCLevf8OReozmbkUhYci6lS5gKUPcE2D9WpBnNQ0Qs3j6hMKGplfMx1PqLsSUemQFR3bMfD8DcPBFFEUSdIA2xoyGPTIgxiahiBMsRybtmjo6gYvCBjNdLIyo+8eY5ku5xdD2i7H0HuomoGklZRliWvPyLOGNAvRjINzvs6VA5i+aPH3O6oiJ4sqYj+gLEMs3WVsG/ibBVmYUWQy/aHDxk8pUEEfgWRiuw6el5AUJUmRs9lkRFHD9c09VWVgmibL5RLTcJBFBUmQ+NnPvkCS9xSpjusMuLu9pUhsjo8usXst+72PO+jz8dMVqqUxOR5z8WKCLIuIQou3D7lbRUye2ewSn9vtA7lcUikiS9+jVWqqzkRSK/pjFVnvGIxtWlmhEEsu3gyI24z9akkR55iWgzWxaJTDyD5PBZpCJCsyFo8B/g7qSqDtCrIk/XcfKH/0ozGvnp8yGV0w6B+BaPDuwzumsxEnx3+KLDpMp3OSSCDOUmoKRMUizWryRKXNhtSlQlVVqIZM0+VE5YZd9o6wiMiVmnVwxde/qggSj4YMKjgan7J+akn2OS8/O6MSW767usIdzHGGKp2iIPQMrq/XTCYj2lajKFK8/S2GIzOZHpHvGqYjlzBf8v7+hrfvVmiSQNcZdLrOJtvyt99csX4SWC1Dnr+waVkTbANaVFb3Id7TBkNoGdpT9tsEWVVYP8ZUqc9Q7aMUBm9OfkTPcVGqivnwlJP+lMA76M/yyGc8skhjCdPqsdlfcXX1gbbyycOKZRiT5SVFLhJlAmFREcYRRSIS7HQsUUMRHEbzAf2pzXQ6ZtCfst6EqD0ddzgiTvbIes78+IQkSUjaAj/rGDod/Z6OLJQIjYRiqnSqgCjmSKJG0sBnP/6cy1cTtts113e3CFqDpinUVYFmGiSxT5QWFEoFVcnJ8Ixfv/0VhtNiOSZtrOLoPRQDdEMlTWIkQWYwcmllmdn4nP7oCE3raMSOJG1p85Iyz+mEli4VEduWiJw6qyjTjiKLmbpHiHWLpQuoroJpO1SJQJO3zIeHMkzY5jStwu2HLev1msFYRBc0fvKjL/kf/f2fMT/9jKPjc5bLBZLiHBRxdUmW1qimxPmLF4zGM1RVQpEG3F35rB/WaIJAnHiYrolGSeeVTIZHuDMRUxHQZIl9uKeut7S6SpTlhGHN+nFNV1R4ccA6XVN3OusixulUsk0OVk5cllSljKx3iG1BmyVsnnxev3lGXgSoYo0qNKiKgKYPUOUeVV1wenYEnYLtgCw2xFnMw2bN4y4jC3OSvGZ6eoxlq+gXLk2TYmqnCGJNKXSIiowktpyfWawfH+npNqoGVdwgI/Pi4hkKKlmaEOxrZNGi79pstj49U6NtAgwHyipAEBSKQmLSExi7ffpujzRN2WxrtlFHVWaQpywWCz6+u2PrB8iOROhrJPuUPIkh13DlPvkWiqBksbxn5RUIqHRxiZwYlJnP7fI9J69PD+5yb4WSiaSxgCWWnDpD0Bq0TkIZaDiKzlgzCFOfyVDi49UnNFfmcbWl7BIko2M0m7NfrEAVmbkqs4GBomi8ujiDsEBqbbJSYHqkk8YicZLh9gyypvjD2Xp7VFVmMFIxhBhbcoh2FUenA9I4RoolXAvkWmUyP2MkKfzs5edMh0fkyR5Z12klnYfdAq2e0VQp3j6iEzqqWiArJPZBjCSrBPuEgXnMp+UnNvme++SJoAzRVYsmLg7GlK7DVmwsVT18nmqVPIkwtD6tkGKPOybjZ4R+ROAVnJ1+wWA0QxJF8jJHcjSc/oi8FokT0JUZnucxHh3R6/UoEwupU/D8NePhkPu7R3beA+vdgvdvb9l7EU+rW7K0pixLhmOFKpf48PFrnpZbluvgMIk4d/ALn28/fktWrumagrdff0AzWgztYNpxXZGXL0+RBImubbm/WxB4OappgSJQVjVhnnK7XxHUHTkyPfsUwzURFZGulamKkLaseH56gtyWRP6CeLfErCvKSkQ0JBRtxO+vf09RV+RRTlEUdNIeRIH50TPGwx5BsKbIY+hkJFtAERXqWEAUK0xtzuXzIUNDRPYHOKLFxL5kde8hCwF9o6XNZZJE5P3tO3792285nb2gTPY0ccGLi2cMega63aFZCvbRGONcYJfe882n3zG0z1E1ifFMBWyqqkJRU3RBYrFdo/cdbN2gUyOqSkBzZHb+mvnZFHSNXezTCTW7XULRlVi2xj7wSVIRMR/Skw12i1viqOLk7ARBEViuAyStz+T4HMN0WK02nF8c0R+5vDh5BmVJU6lopkzPmPFs3McxWnqTCbqqcHk6Z2RriIJ/IIU4B3ZvlxbsNjGmpRBHBYalEWc7WgpESWK7C0mjFlU3aMUczXIpi4bBoIftmoyP+3jxjqpOUc0dhiWy3yfMT47J65i2Brk0UGSTNIqROwXDtNlsVigCpHmBoYg0DZjWMegFz16folk6aVaQ5gmbbEfV2oRpRtdlPFzf4/TGB/MaHU3T4QxMZEtnMD5B0cbERUJeNuz8HUm74p/+86/RbYnJqYnZM3jc3vHh+j2t0FILGTvPx9ulvPhsQtkm7MI9iDJ1qZL4e1aPN4R7D9t20YyO6YnGtx/eMZnO6asG3nKHWHZEmz0j20KXJNTaZiR3OJqOVB26K5qtoaoqXdeB0HAyHNEbmNBpGLrKdOBQxx1KJ6GrGmJ7jlhJGI2BYzjUXczReACN/kcHyj+6lJPHLn7dYGgy7hgE6bBr1O8N+OabBZKY89tfvaXKWobDGUPnFKr9/5e2//iVJk/TLLFjWpuba7/63k9HREZGVmZVV3VXdU/PNHumh4MZkAsOwMVsCO7mnyK54YYbggCBIcjWXaIrK7MiMyNDfPJK125aKy48UbVNAsXFD7iLu/CNmb1m7/OcQ5Z1VEWFJNWcnXuUeYemZ0wmYyZjgfHogu2qoSxq/H2JO4TPPz+n7XJUaUZ4iEmzmH/6z3+CIot8+s0dA83D8WwWpzd8fBciSxH/6A8/47tvEn78M4VaKLm7q5mfOHz77bdcvvLoIxc9zRiPaxJRQzIVpoOUH/5tw/zmmjvpPY0gsfdLBrOAsuipSrh8c8Ps5BJTMWj7jIfHR569OOH2bknT5oiCjmNaZH6CrBTs10vmoxOiJOcQbenrBs+cc9g/UdQQV0sG7pT1psabDmnbln/5Zz/m5c2C2K8p6phGMsgzuH37SNtIiKZPkR+zKptgRwe0bcupdYFqqAThgSIxEIWWydggTyosy6CtSzRRRRVl3r5/h+tZuCMbWZZJ04Q8z4mKhLOTMzbLDXHUoRkti5MRE+cVy9UHpjObtjZI8gO2ZnF6NqMoOrK6p+9qPNdFVgZ4E5WyjynrDEXzCIM9trM6fhVVT1ivHzEsmzLniGrpUsbTKT0tu/0GQzJo6540Szi9uOL7798y9uZInUIeRHS2jmHaJNkWa9zgWAoD94Qog0OS4VkSitYhNgqS4JKEUA8r+jagLBXGCw9b/wxFrOmQWAcHqjKmw0FoIUkymrZit11hWQqqqiPLQKex32b0nURf5cShi+NZPD2tUUwB1dQRA503F+c8PASYs5LNwadXNBYXZ9SpQnX4QN8M2O8jbq6vME2bx7u3SJrDetkyP5lQdinDy4ay9VGlhh+9fs5mU2F7LaYtE+wl8qonXJXE8Xt+9OWM+7slpjzCW7gobQeoVFmH1kDWFfhPEWoqMT7t0eQh6/WSJMk4O79mv9+j6AN061hO8iYtSRrT9h27Q4gsmTiuhaZX+NEGy9OJohJLmwMFoR+SxD4Dz6DMG3RdJ01jEGREucLzPKIoIksL5qcnRFlAU7esn7YIDWgDA7kvKEuJfmQj9S1KLxOlGvNzgbPTaxw9Jy0TwjymTgIQBVSl5XQsQVbQHDpk1eXyxxafPn5DTUQdDKhbg1aomUgDbOmCP/nHl+RpwsPHb/nsZy9Jig1ZvOX54kvirOftN3f82R+/4rv373nz5pI4m1MBSRSgtFPqruR0OkAWG5T079/BW0nmw2bJZDzHPV/gKg6//u2vSMPZ8Z43Nthsa8o6I0zfURxazm9+xte/fcf5+QVVEZKlMV1jEkUSrz7/CfeHe8Jggz29wZAdlukjvdGSpiVtWRAHa5KkQzME1FYi2WeMjVN0VMrKJ6VBV1UGU4GhPUJSeg7+jsFoQt0U/Ognr/mn//w5/+7f/JybVwaHfUWbebieTH5/ICwjTi7nbLYP5AUMZycE4Zr9YYMkmswWZwRBRJjm1F1KU/UY6hjNDin6iE4UyMoa5A5FdjkcNrx58zmCaJIUK7b7ivvldzx79RJFFek6BUEasTg9RTF6RrbKbrdHlQzqtiVOAk7Oh7TkGLbEw3pJJ0sE4Z7p6Jxvfvsdzz6/YZeHWFpKlxWkZcBkPkPXJywfIz7dPjAYDJF7kdl8yN2HtyzOb3Bsk8L3MdSWupUYaKcU6YokqOjdijRNoYOT2TP8XchifsNy+4SuSpimQVOLNI1KGjeYpgpShCR3tELIH/7JZ2z3dzw8rTEMm+ncRVELHn67JwlTnl3fsN/7KKrO1cXnrNdLyrokD2LqukVEIolD2sZgPJ6yfHjEdaaojktLCZ1Onmz54be/xVE8FAk6OmLhiICJyek7CVtRWC83NHWNIXYookPfJIzHHv52TRb0eO6c8URjv99yOr1AEASEvsK1VF49u+Tp7iO90CJJKm2TM3Mv6VOVu/VHRlOZ7SrnfDDm028OjE9H+PUW09LpyjGaZpMVOR9v72h7CYSWNDki8zrhCMxeb56YjOekaYhAz+J0QFknHPyCoTukqRVkVcN1e7IswxtMieOYIKgQWoNoX+NYNooucLt8pApari6uybINh8OB85NTVg87DNfENmW+u/3I1fUr4qCEPmB2co1hKvz6V9+jDQssq0FsBAzdIS8zomCL0OVsNzV109GUEXkgcfXZlO1hS55L+LHP1c0E33/CcmQsRycKM/paZjq3OQRLRoNLPKfjsDzgxwfuVzJZU2BZBmoj8eLla8Z//BWOZfPh/dcst3dcXN6w3YQ4us125fNY1MxHczxvwu5wS7RumI0sFguX6KDjtw2pX6OrApohIRYytjFjYBiUScJi4fH0FHHYZ8iyTJHUnJyckZYFstwwmkxIdzVVI9Cj8/gQIwjGP/xAmcY+oihi2CKCkGK7Jj/76f+Kn//N14TBjjLRKZstmqyjqqdEkc/j0zvevPmcvs3QnY6KnKLNMfUZo8E5SfzD78DaNTefG/RVj1BC/jv2outYqFrKj9URf/qn/4Kvf/E9vfAdX355g6I6fHi/4fbjI6qpU0yfuLwc0Ss2+31GU9f4QczwdMzIaRmcj/jmFx2qOMEY+DgXJT/82ueLP5ihOyZ5NsBzZUzNIFpqOHrJeD6jrkRaGoIoRNMUFvMLDn7Aw92W8XhClh0YuWPEScs333/H4swmjDPCNOPk3EM2Zqx2Wyauy2xkUaQKsphxMp0gig111VMVFd/+5j2aLpCXAr0YMBvrfPFG4/LmDb/49ffHLBMiuqHy9LhClRXiwGe/XKJIIpLrYhgaURKz28XY3hDX0inzBkOzUYUcTVeoG4v7u3t+/JMXCMgs7w8sVztkeYcqeSB0zOYTNqt73IGJIutE4Z7xfMRuFeJObWopIy/2uO6Ak8kpnx4ecYwOXfeIgpauSbm6PkcUDARUFFlgl4d4oyG3n1bIqoJpiORZTV3kiLLGYDFiu1yiyiL+8ompY9O0DUGacnK2OJa3igN5klKVUJoN9Clp7lPXOZI4YzGb8+HDJwy7ROg63r6/4/TGRJYHtPWKNz/6EavNe1x7jrRVWO0a6q6l6yvCuwNFETEej0nC/HfrzwpBqBk4LrKqQGeA0LFaPaEKFqqpERxSXG/Iw/q4/snDlImi0SQiRQWFmDDwLA4PJaYNo3nJh4+31I1GwYbxaMHD3Z6o2tPXOp7TYlkSUg9ZkiGrEnGUsT34yKpJVj0xGCl885slst4wmUgUpcJhf8tkcYogFvRygywevdy5I+JHIYuxR5bumUzHbLaPVB3MZjMelg8kScJkYNM0HWm24+zS4fE+gO7I+Lu4MWkqBUMdoBkVfrhmv+lQVR1dNQjjDONMIvAbLEsjrwLaXkbRLTTLxjRtijomK3e4A4Pt+sCVdwXKkg4RqVU4eXnCYRlydeLgDjvi8EBbirRpwcBqCeOCJFtz9uySx+CBn/7sDVo/YJOWZFXI1DvnH//shh/efc+BFK3XeHFyiW6OeMj+luXHDc8//4IukKgCn6G1oNMUrkcWjqj+7todkxc1htOhST0X+UtiP+PyykM3B+y2t4iDv0doZNsAXfIQehvynqgK8UYzhC5DEHLqXkNzVZRII00aHMflabND0yEKlwwGVwTbiqwq+elPviBIbxl7NoY2oahKknrLdDpltymo6g6/2tLkCnRQZymirDIZujTVga510KUJceFj6DauO+ebH37OxeUJ89MbwmyJa4mstnfHfGnfkSYVFxcXLG+XPH9+TZht+MM3b9jtMrLLEWneEaQRWZFycn5G1xfsdltevHxOHMd0gkqWpHjjlkroSPIOVTE5OZ1TlhF93zBfeFRtge9nzE7OabMHZMFgd7dFZ4CnW+R5z8n5hJ1/x86vUDWL9XaFY49QdIswqkjzCkH26eyWwz5gPJ3Qty2fv/mMJE/RBJ0o2hPHGePZKZrl8u7D9+iKgWU5GLoDXU/XHtV5VZGTVQmedUrfNdDLaHJKVesMLY+DfxxCFTzaqueLz59TtTnbrKaqBFzHRdUEJBWCIKYXhmhuzTZc0go9O3/HdH5K0XSkeUsU7+n6iovLOUWecnv/Hc9uXrHZ+vjxDlUT+HT3jsXwAmdogNDSNBGiKBDuO9pcIqg/8uzN52zufMrywNn05GgIEhUs06ESSsokwVYcJFklSlKG43PkpkboN1Rljq0bdHWOjMtk7BDscixHZrf8SCse0XZiP8BWB/hRhWxKDB0bQez48PGR0WCCO3eompKXZ5+hCgdenqjML3LmnogwgKeHgiTrkFUD1JysSHENj7ot6SmYzp5T5PC0estoNEJRdB4eHjhZTBGljsenFbrmIAgtWRXStDlda9H3DXHRkFQNRS+yzZeYisbQmPLDx7dMp1NU00YkpSXH9FT6sCNNai5vLjm9mPLtr79mPLbo+5D5YsLTeoMYGFyNF5ieRtfKiJ2MrEh8un/PYjEjziOqBBxnjChtccwxmtSw2XzgsO9RNBvdaNgFG4ReIkkiHh4SHGdAX5ekeYrQFyjCFFXruHl+g3NQWD6s8aYGF/MzxM5GUwT2wZ66irEMl7oQ+MWf/0At+aiWysmzK/L6Eal32DzseLyv+PzHDrd3PkHQc/N8hrTOuLmwedisaHAZDVSCMGLtF5Slz6k+xhvodF2DIPS47oQoiNEVk05oyPOUKGtos47nry4ItylZ+vurF4W+73+vf/wf/sfn/Y+/+pK2smiaDt0S+eHj3yBpBQ8fe5Kg56f/6IIyM/nw3RrNynEci6qWaPsOdZCwXxUU4YAf/1FLWxjsDncgiZj6hMG44sP7O1x7wU/+4DPSsDo2okcd332z4uxsTpxXLO87/qt/9qe8/eYb/q//l/8n/9P//Gfs/Iy8AUj58O4HXr/4kroTaanolJRf/nLNs2ceeaKgoDKfyIyGE37xq0/c3Fzz9V+/5c/+8UuyOmG9zhm4CxSpwff942pA3xPuW24ub5BlkbTIieOa1WrFaGhj6S7rwxLXsehKhY4cy9WQFJUk9RGxkEWJlj0Dd/Y7j2qH0JvEYcvT446hrZE0awxzhCSaFEmBJvdMxxplodCQ07Qwn5+w2uxJ0xzaI7pJUxX8LCfNImQZilLBtl1UvaWqCtqqpEViMnXJE5mq7Lm8HvPdt+/I0prF6RBdFajLFrqjo9fQOyRRQ1NG6GZP19QUZUfVSqRNjiTVZEHG5dkLbh/eMzJP0C2JMI2JUp/r62v8XYTrOLRdgqpr+IeErhWZnwzYrjeYpo2kHkHVsiyyXi8ZDwf0fY+umWw2B1RZxXEckiyhFRO6VkIVHPbbgNmJiiybHIIcZ2QRRj7eYEKefcLSLdrSRJBLFM3BMhS6ruHF6zd8ePuBPCvpBZlDENKJEkot0rQlru2gKQZhkNK2NaoBPRWyrHC6OOPtD59wBhZir7Ncb8jynIurGYqmMxtbvL29YzDWqQKJ9SrCnmZImGS+hj1L6BuNIpDRDZksLzEMC7FvqPKIJOkwnRpJEslTgfnZhCJv+fRpw2imIKkKVVUc4ceblsXpAMU45q2mIxPL0Enagl2453JyzuPHLZIq4bo9ruLQ9h2T+YzVdkValBRFwXA4pigKZoMrHpffMppYRFGBpnpcXJyx2i1RVZl3b2+5vjwjiwPWywxV0XCHNmXWcnI64nF1iyq52I5KnOyoG4HxaA7CESB892lLTg9Sxfl4hOcs+Nvv/5bJeMDuoUZ4LnI+uOJsqPHw/UekvqPvLR63a/7wT39Eug8YTl2eVin3Dx/47/67f0ERi/jJPXnRczbz6LKEWh0gGj2bxx2fP3tOXdf8zerX9JHI5fiE/faJTCipHkFzxkhGyuh0ytSast1/QpE88jJjNh9y/37PdHpCL5Wo+oJPH/8ajL9f/1RRy3a1w5tPkMWCTpKQRAPHUcnSHWVZkuU9bSNjmQ5iD6oFAhrhas1weIYuitgThdFoxl/88j8xd06Zz6+oupz17nsEwUYRx+TNHsPq2G4imkpmfjIgizMWo3OS0Gc08iiqEEmB8ACWrZBnHVmx4+zsnLbW6fsGP1gf4yOagKhqnJ1csV/6rNd75uczRiOR33z9ic++fMO7d99xfnmOgEHXS0hyhSAIvHv3ibYRce0BQbBjNBqRxhmGccRSiWKHow8okorRaETVp/gBiGrObDTksN8fXeBtjjXQAZUgykApMbQZbZUThAd+9OVXHA4BoR+hqjKqoVN1BbQdVd4TBEuuLy5RZAdJa7m/fUen6SiKQtOICGKPJAGdiCqpTCY6WQlxeKCsQdd1Rs4J6/U35EnJxLlClGr2fsLizKMqRSxjTJYFvHj5mn/3n/41umchiwp13tFTMRzPEJWGpobA92nEjNl4znZ1YDgcstmtCcMQy3AxVYskKaAv0XWd6eQEQRLpRYEkCVDkhrPzBe/evkdRNCSxRVVV8kQ7RsXMkKYCrfWoxApbNxAFDdHQOGwCzLFOW4bQajRdjqgpaMKxGJaXPlnVMhkcbUrn55eEhz1FCpajkTQr6sZDFPbs1z0vX/yIqgroChl/X1F1BxStxtTHjOYGvZhTJRli06EbMaJdYA8n7LdbthvoBIMwibl+9hldVxH6e4pMwh5UKJJHEork1ZK2bRmP50RBhGXpdAhomkaUJqiqiCyrjAcL6FoEqSBMK7oe9kHGj//ghl//6pe4xpHnHB1iJM1mYEukdUxdwfn4nLLOUS2F7d0jUZQxPzklSg/IioaiGyjygE4OjpndoiCJSyaTIQ/3Wzx3gKmrCI2MJABqgMwcSW2PsbKspRNq2k5BUAskUSEMchxLQ5RaNFWmqgo8x8VxhghKg6mM2O2X3Fy+RtVbvv/tO2QFXMtDljRGEwPLHvPtD7+hwwXlnqe7mvPTKe5YI9pliK2AJJVs11BWMrMzBaQGx56QJzW90oJUEG4CkrhGVkHoKxAkRMlkt9twejalLCuyCIqyxXVtZEdkdZdR9zne0CLxj2SUd3/1/vdq5vzeGcp//k/+d6iSSZo9gZjy//lf/pzdeoeq5Vxfz/jpH70gTQq8kY2gdBi2hmHr+EGOZPRIcsdg3HFy3jPzXkHj0lcuJ/MRwS5jNjrlz/7kv+SzL274+P49htmQJg33t5/46R/9hCyrCOMVn/1ozndvv0XSRP6P//P/xOz0BN0TORz2HD5V/Kv/8n+LKp8Q3vtsnh5YbVKmzhhVMYjTO24uBb568TnZpmI+GGEpOs8uZkgOnJ2d4Q0M0uKekhq/yiilNfvDcXXn+z7rzR3r1Tu2uw/YrkTTNGw2GxxziNh37Hcr8jRBaETqrEHuwNV1TF1mPpkR+QFdqzCdaRiGyNDzuLo85fXPhozHN0ePdh2xeDajMVtazSWIK5KiohMlNoeAqu3oZQV14CJoCoY3QFDAHFjojsFw7KFoR6Wj4wwQFRlN8TAMg7qNcDyN9x8/kGUVp6fn2I5BGBTEacjrN59TFi0Pjz57P+IQbfD9Ix4prxOGowFqqxNtUuYnC5I6RjUdZos5st7iODJxkLFaPSHKOWV9wNRlmuaYq5K1IyBfElzyPCcIj7io1XpL0/Wsdj5+mLLzI6qypUekrBqqpqVrj0wuUc84u/DoW5W+UXAtm916j9A35GmI1C8ocwXNlMiK/jisBgWHKOev/v3XZGmJNRiy3sUIgoAjWBi2hSTKxzhAnqBpGo4zQJZVRFHkbL5gtz02ZKu0QpBThkON68s5o8GEvhf47dtvUYcWT0FNrkpUao5suIiiiawUyN2cIpWYzGzCMGa5ijjsY9I0Z+JcMV2oiKpMmBWYQ4sojinrPbOZQZmIlHFMuq/ooiEvr0+xVZliX9ElLeHKp4giNg8PdEmLUAh0dUVfV7RJA50FvcZvv/meKMpQJRVdNsiCAkNyjlDp0ZTdKsdf1wydIfd372nKjKe7La5hYWgNTSGwmE44ObewHQXdkrm7X6GoPaIIpmFjmDbmwKaVe+K84PbjGmsioHkK5+cL9N5l+W6HrnuonsKzN2ecmmNMpef204o0h8ddSNo2XH++YDidURQiJ94J7b7i0j0hXkeoqopqhpycqpTdCnOsoKoeaRLw+uUJimiw2foMnQmX4xMqQ2V2OcTVBC5ez7DPZMyBRSWUrLcfEBsdd2BS97BZ+wyG+rGIVZUIYkzd1UhJ+fdHEjh7dYHiwGO+xnJcijolDg/EfoVtLBgOh7gDHUUVMIcivdSRZiW2baPaGd5MpGhr/vabr3GHFusw5OPtr6BsUfohrVxhLRoMW6JOe4Yjk/mZRV7m9HJLQYFkWkR1RCWI7PclSC2qnVA0e8aTE/xDTS90JMWGMN+TNQlB1JMUDb/54QfWvg96xtP+kfeffBaX52x3BWkOHz6sWa33bFZrokNHlsg0dc504hEccsTepClFdG2AqupkaY2pOdDLPDw8UFY5h31AHO1Ik4iwrOi0BtGskSyJIM3I6ZEdhU38RCEGJGXIeDpi569o2hjbErDNIwqmPER0Wcl05KJbMo+bRxqhpWk6NMPFc2TyZIlty5iqh6o4jEbHskdVC+RFRNkK6JrJw+MtitYxn10y8iZMFgOswYjF5ZBKiOnlhn24Y7nzuV8v0YctcX5A1RUmixNOn1k0YsbTak9a7WilnOHg7Gg3cxWKMiI6+JxMTlEFGUu3cFwNRe8ZeCa9WJJWOyS5QhIbFLXj+w+/ZbM7iiH6uiGParbbRwyrI0kE0ryjMzIE3SDLS/zDA33ZM7ZdqiBEQIGuoqlL+qYmSffUXUtTyTQ1dJWBImhEh5g4zFAVncOuoK8tDF0h2jZQqbz95pbV/ZL3775FEhsuLxZomoaf3fPx3XuKg8B2s2SX+Lxd73h8cvn0/YCPv7U4rA1kScMbmjw83FGWLU3doaki3uDYbC6KAkWVEASBqjyqRGVZI8sDZFnEMBRUTaape8Igoyih60z6WEHpNMS2Io99mrJgs9qRRSW65lFXCZJmMRw9Q5McqiImrgN2oc/16SvOLl+SFR3n58/whicIvUoSZWiiRlu0GJqJZRlEccBw6tBSoagdii4Q5jsen1Junz6Q5TWBLyBKNUnms9+FCP1ROeoObAzdxbEGJKmPrtuIkkNWJhyeItabOzpJYBns2UcZ63XC5ekZctcQp0sO+4B37z6y3QVIkkTXmSymbzi50NClljqLEJuO3WPOi2czfvJTlzLNKOMM3/f59ps7dpsUUVDIs5YoCpEVkSxtOfgR212CYljUvQA4BGGKYSqUTYu/DpEkAdc2yCIfWYHJ+P8PHMq//pv/yF/+1b9jt9uTJgHPnp+hcsr2saHtP/Ef/s13fLpd8rh8xx//2WfYhothGLz60RhBbjmdWogVTOwTNncRY2vBT350hdj0/OiLM4pEIMm27NcHzuef4RlnfPz+I0VRsdx8i6QUqJ1BcoiOir+RRdCvud9uePvulnIjMBws2G7v+Prnf8X16y8ZOi8YSpf89GdniKS8Pr9Ebgd88+0T7lRmYjmonc/zVxpCp/Huhw8IbYjUyRRljEBLFpRY4hhd1/ntN2/pWwFZMqkrcEwPSZKpu5LtbknR5AwXBcOJy3K7ZzqzEQWNJN6S+immrNIXMLGGkDfItYomZpxMZfIkR+s92qTls5sbsjBFMQwKsSSTAvKsRpV1ojDBNh1MXUdXdNq258OHDyAI6KYDkkAYr2jblKZuqYqeKM7xXJPN+oCqGaRlQNEeePFmRpIHPN7vSPMjj+u773/DdOHx7OYFg8EJTduSlgmbzYayFLi9e6DIfDTVJM1bdtGWNMtopIDReIqujjFNC8tUsO0Bod+w3QREYUoQBGRZwmg4w7ZdDn6Iqjmouk0jgqJpDByXkTNA7gUWixlZlbIKVsRVQlKVDMZjqq7mfrVDUkbcLe9JqhVlWdNUKnXZ0PU17sDAP0SojKmqljjKiIISxdTpBYUoiPEck8lgTFEmJElE3TdH44gh0LQp+/2erlVoapnQL+iFlldvTkFoUTSZ8XiMpij88N0jd3drHNOlDSSKZUV3CLHQePubHe/fLmnrDvqUl69u8PfH8L9jy3RtjaEpfP7TM1RLJS1rLOeMJBQxZAdXn/L4IaKqJITe5Ox8TtfnbHdLZEVE02ug4sX5BZZlIdc9E3XCw1NIQ8V4qKCpNqomkmUZTdMQ+uGRD9tCkeVQt+RFyuP9gbaRODud0DYJSdhg6WMcW8e2PR5uCxaLBaJcEkcVcdhw++mBIhMwDZc0TXlabtkcIsq2xY8D4iRDkCRkUWEwUOhFn7LdYU1hcmKRFwEPD4+0YQulSNdKvPzsDW8++1OMoY479PjVr36FO4wIio/09oHxc4GH6GtEvcMdjfhwvyTJFO4PAdOxjKTGHPwlt9+/43H9HsKEzmjJH/YE6xptfEnQ+dgTAc0bMDCH9GKDY7m8f3eH7ZnomsXjpx2iICMIJsvVhouzV/hR+XdH0xVs3WVgzehzkSKviLOIrCpRFBfHHNLWJeOhCxxv6GXR0fcN9kBl528QBIsg6Gk6kbZt0Q0Vw7Vo5YZObvHcMf5uz26/QlJkNNEk2G2RRZnFYkaS70jKDXXjs3uKEBURRTPYrzugI0m3OK5Okj1hOi6aOSQvdCTZIopTsiIkylZUbUnbKoidyHa1RmhF6HqGA4+mqlEUBdcdoMkOi8WCroWBM+bZs2vmcwtVaRHaiizKSYIG3ex5/aNnOOOe04spi0ud6XxEVu0xLRlDt+haBQEVUTi+yM0nZ2x2KwxDoW878qxAkg2SsMIwLGoyGqnl4tkly/1HsqbC9DziNODdD/fYzpjx4JwyVSiSGEksSNMtcRwTJRlx0tBUClmZIikVJ9NTHp8ONG1P29cESUorVvixTxiWKJqO4YLpdnx6+oGsKrHtCVkmAh0Ptwkf3+/wRipl0WEpJ9iGSRbnR51rc9Tgnp9NGXoOZZZiuTovXl5RVDmiKFLXJXEco8oOfaNRZSq2riH1HZo0Qm5Nzhdz2qZh4p2zmA4RBAvNhunZgLY3eFp+4vrllK5VqAsBRQVJUug7BdvRyPIE25qgqxoHf0kSZVj6EAGROF0RxXv224QsCklijbIpCZItRVlj2lM2+x3b6J7hqcPrz77C8FyWYUTSy7SKSV25tE1BGvpUpYCARN/a1IWBIAgk4YHJ2GOxsNhvAlZPS+omYzabIcs6TdOQZyVxGKLqJquNjyKb5FmNKAnotkSWh5jGAMVuCeInTEfmh+/v0ZUFw+EJrdhSyjscU6QoCrI0ZeCqxFlIFJRIvc7T9pFWANszMR0bQeyPX4KFjqYqGDouSj/ANQbUdc14PEJVdKL4QJYlIGlo2pTpfM7H2wf2+z1xlKPJQwQawiAjCnPi4GiNOuxSmlxEElWyrCDNC+pWomgKXNelLmpWd1uGwzHbQ0Qt9LRiR4fM2DP44vkrLCWjCitcY8B48BJTs9FNjazNOX0549cf/5rlNqaTAyaLATPP40dvzri6mNO1R12pqDQ0QkMrqiRZTd02iIpA1VZ0CJiOTifWZGmO0KrIsoosSiiygYBEU//e8+TvP1CK6g5nIJJEDX6w4b/4F88YzxSqVCP0Oxyn5OS8x/Favn/7H1DUCEkMcCf33LwEUTD47/+H/zWnVxJvPvd4/aWCJI5ZnNxwefmKqmlBTDg7vWYxO0PuTH76k5/hGNdstyn0Is+efUX3OyDt3V3IaDRiee/zs5/8MX/8p19wcz4iK7b87B9/jkLL5dUIZ17x9d98zec3n2OpM9rGRnUU8kqgV0vGsysG3gsOyxVtUVHmApZuoaEzG4xpoymOrpMnGUPnhDoX6RsVSx1Br1BWoGkq7nCEpIg8f/kTttEOzZJ5+/GWvb9muw/QbBVBlTFth6w8oGlDJMXAGmgohsTMviHMVpzcaOhmQ1MnyDSsnpbQqsymZ3StyOniDH9/wN/uMTWVNE7QFJ3R2GW/2RP5MUPPRe4F/E1EGBR43hDTkBlYI/pOpW4EmrYnSHx6CizL4uR8xmi6QBBl9vsdqiFxcnZKFFeE6Z5DUrNc7cmqCmzQPI3dPoDmqNK7W614+/4Hdnsff5dy2Ke0rYwomozGF0dsUd9SFRptI9L1Kefnl3StQhyHSE2HIogomsra9+lkiazK8RwTtWsoUwn6o2M0K3J00yUtQixXo0Xj7HTOYjblbHFCXdRHO01SkhV76rzHUVSapmV3WJOXBaokoss9eZrRCA2qrjL0RsxmC6qqoqhTFE1mu10jCAJlU5GVAavtCsPQSJKOx8ctH949kuYJttxgKxBvd9h6g1RniFXPF88vOR1MsU0FwxLZH3a0+LRVjyK4TKYSY2fCj3/6nB++XdGWIp5pc7lYYFk9eZkyOxszmns4IxPXc7EHCoomEoQhCDK2XZNVKbtDQFtrqLqC7FY0qsIu6lENaNvjUNDUImnaoqkObdtydX2KN1JIEwXDlDHsEssy+PjhE4qiHPE0eU1eF0znE7aHPXcfI5ID5GmNobp89mbBZtngBym+H6EoDnGWkxYxvdJieRJtVaLVGpmv0BsC22RLsE+5WrxgNLSp7RgsCUkSefjhLUq3QVN2WLbOaDLAUifcPmzQxga9buJOPPbBlsO6YjY8QcLg+nLEbvOXJH4OnYXlwOXVnLbQCT8daI0W73yGWDWUZYlQmQytC354t6GqbWStQTI6mr5hMBhwOh9zfTNhvdzihymCICHaxt+dyA+4+/YdfdZz7s5IkoLhaEyWStRtSt/HOOqY1ZPPwV9SpA1ZFuN5IzarlKZSiZKYuo/pqpRo21CWKXlds4mW7OMnVo879suStgLdMojiGG8wJ04y4jSiqnsM3cPSLVTVP2Yv8wOdcDQdWdocWekI4oiuOQ4XmqYRJxvKrEUTT/nixZ8iVA6ebXDrf0JyDRRXwZ3YaIaKqgn0vUCRJ3TSgf0+Z7dN+eyLK0QlY73aYBkmhgWjscJ0MSTNIyaLKcvdnjA9xlVW2zvqquKwDcnCDNcymU4t2jokD1L6TMPShxRJRV0WdH3L/d0SWTbxw5hvP3zD8PSKspNwB2NEQcM2bRSp4fJ6QtmEfPvdO1R9RNt1CHKOabfkRUKc+SRZjNiLKNKA6FBS5RlNF9L1DUFYEsYNRVWjWyadIJJlCZJg43nX0GscdjUtBXVds90/kBcxr16e0dQhQiMiCCXBfkWR5EfxwmBM37dsD0u2+w2TkwmCILA9+Ago/PDDO7I8JooPPD2tqMoeU1Vo25Yo7chqAdmR0R2ZrgXLGCDUGvoA6iZhdyjxZjZXz+f8m//0SxT1iMfJ4oom60njnLKANCnwD9ujgEKskSSBzXbJYnGKrPaIaoRjaWwefVShR+l7zibnzMcLxkOT0VhBV3vef3/H+jZD1wYMpgNkzeHp4cBwcErepEhuh6w4bHZrBKHFdWwsXcAbyvi7lL/5j3tG7pzri1e8eLkgjnKKvGU8ssiyCknuSaLjQBpEazRdoipbFEXh+sUF290DKSUfVvcUbUZaFkRpRN4ECIqCpJh0vYzQd6hUFE1K1rWYmout6JhTHUlpcFyVd+/eEcU+u/WGze6W/XZLXde0bU4axii9QZl2GLoHrXKM+dnQtGsOm5iB5eI6x+euSE/X18jCEE3SkCWNNEvQ1SPbc7v0abuSwz6k10SKuudwOBBsfdqyYzLRCOMKxXPpW1iv1/z2h7fcrzZUnYKqWeTZHUqjsV7VIE1oNY1N+Ugpq9zuPxEWNYf4I7ebb4jKmNXhgU7K2O12qKKBUPUYCDjOgJPTEXkRkWYVQRwhyFAWFX0v0LY9ogpRnHI4RKiWhWz8/urF3ztD+S//21f9P/3nX5FFJXEWs1iccnv7kcnC5uPbkNG8RBBV0gzGY4FXL8/IEoH397eUfYcinCB1IhfnJnGQ0jQRjnOGpAwoyo4ofIthtxThiDKu+clXU5JI5/3yHfo4ZjH+DD/d8u5txOvXl7SpzH79yOnliDyX0awatTlhs31LXZeE8QpBNfiwWuEMRT4bvcJwztgF9yhyhKl5rFYVr38y4ttfLdluP3J2eUZWQujLCEKPJGg8fFojyR2uPaCIewQ5YjR2SIuWJM+Ik4qmzWgykSQuGAwGpNWOxcmAOEiZeROmgyneHDTF4/7hHV/+6Blvv19xcX3BZHRGWeXsDxG32wTR2/Lx24Ai6mn6LTJDDLmhVu1j6SYIaeuS+XyOLMuUZU4nwD58QFPGXF9esN9ukHqZupSx3AGlsCPdVyRpyngxAVnju+/fIUvw+etLomiLqnp47pwij9AVi7YRMCy4u9+CoFKnHTcvB3z/6QOt0mEaOl0sIHYtvSDTSi2eI1OnAkKn4Y110qyizGuGQ42HxxDb6Xh+9Uc8PtzS8MRk8oyskmjlmDqrMHWDru4oiwJZPmZPVF0CuScNOjS7Q1F10nyHZcwJgy32QCWOBTxHJU9K6Fo+/+INh0NBUSTUXXgM1gsiRZZg6Rpt3xFlKd5oSBamFGnG2Ysz/ENC5Mc4toprW1RVS55nnJyOaLua+9UBco3z8zF3yw2S2OCaA4q+xNJkmrogzRtEuSbZVoynZ9RCwOoWXrxw2cchw4nA3buCPNJ4/uoU/3Dgq8+eM11o/Jt/+1fEccvA1XBslaJqaRUBezRB6lI0yaCIVAxdIk4TdENm/bRnPlXZ+DG6ZKFaGlVfoNomh7og3gW8HJ1x2ISMpyfcL7fUXYusCJydj/APd5ycjnhaN5wsPB7u32HJM8LIx3ZdahIcb4wgQZGENLnAbHRB4EfYrsDDwzs854rH7QZJKanKjkbQsacem82a8/k5Q8dGsUr8xz1yZ9GpBaY9QBZrBE0A2SYPU5K2IApaLhcLkNf0vYEqaFzMZgjdkLS6wx2KfPi24PnLGTIW46nGu48/Z3LqYgkitx/WGO5nhMUtVVKAaCM6x1yho/eUtY7cl8TCirF9RtrIuJZGXaq00Y5Wy1AHDnN9RrLa0Ukat093GI6LpepI1d+/g4dpgmRoVH2MYqms7isU26OuYtJgz7/6Z/81v/jF1+RtiTuW0fWSJLXoKxdd7CjFR0xFYxPk2LJxLIKZCsHuA2Iro2gWsmaw2yfcnJ8g1i1B5aOoBlmZo5sKRVZiqQptWqD1LkHbMZvqRIctL59/Rp72fPPNbxmdQZrIqLpNHgccNhGWLeGYU3RBx5t0JEnDcOKRZjGW6bDbhgioDGcidWHRtTIFW6gVvvrqKwauydPDmjQsaJuSKFlydn4DvUYjxpRtTFFK6JrB0+ojSSgxWjgMHYOxO+Hu9hF7YLI9rBF7+Ed/8gf84uvv0GuJ6WhE3CdHfZ7iktc19/GK57OX5PEOakDRkWUZXVMQ1Ir3j2uyaImuOSxOhiiSy6dPn3j27AVBvKFvDRI/ZXF+QRmH9KVC2Ymcnk6Q5YJ9lCNLNYIgMBqO+fDxLaPBAm84YxPe8vh0QBQGmE7CbhUy9AwscwRA1+RUObSVQJ7niFKPqks0XU8vKAwGDohHCHxW+GiSjaJpiGpIEuf4q4qL8zMcT2GzzkHtGC88wqCkzgpsQ2LkziirmtatyZKILK5wZQ+pqdhHW+Tc4eTZnMMqpu0y7ImHvy85GQ3wDw/IkkGSNZiGhKz0jIdXzBcjvnv/F1SZyu0HH1WzMRyVyXjGxv8N19dX3N5u6eQCRZ5Quk8Y3RkmBo51zIKvHjfcXF3SVx2VWPLtbx45v54iqSmypOE4Qz68/YBrzvHGOnFa445bnp4i6lJB1RKG7jNGk47/8Jd/y/n5ObLSkiQpp4vn1E1BURR0tUml7KFTiIIQXXTpmoKqDtHMKbo5QO17VPm4+SnFivv1ks8vnnE2n/Fud0+895lMPfabgrrJUYUBricRhQeG3imyUBH5BYIKQbGh71SqOEOWXcRBhitp6MoCVa/ZrhNMR2a9TnHcE/bhHW0j0lYgUHBzc8X9bUjbFbhTka47As4N0yIrU9bLnK8+/xxV39NUc7aH9wjSALqGga1Q1RsU4YS2kxhNBeqqY2IPCZKUVbhF0ASefXbO+28/4Cgml5MLXry54T/9x/83nWSAeNRstm3Jx4cHpFZGkCrKPqVH57CDkTdFVWqaOkORdfq+p1Nbon2IgIzpjZBFiW//zbf/sBnKl1+cEudrvn37gY/vd6RxQ1503H66R9E61o8wnV1yejbm9bM/pk3OSH2BrnB4fKfyi//8LY9PH7n99MRmlfPbb9ZUhcCvf/nn/PDbf0sWSCjtK3aHPc+/nPObd0t6rScrD/zw7S2rzQciP0DtFcTaJAxDirYEYYggnvDs4o9QHYXAT9juH2gED3MyYTo75YuLP6HodYospUs6tncCdQMfPh341//6LQ+bDbY7oG56Nk8NQbimbWQ01WA0FRCo2a4f6aQYxVGQbZei6wjSLZpVH7Vscsfl6wnuVGA4PsEYjHnz5pqTyYTWyNmvMpqywTEm/OqXtyi6guXYhEnAr775jlrZ0HcRmw8HxFrh7MRj4V1iqy6m5XJ1MkDpGmR6fvTFZ+iaTJmltE1zBBePLpjNjg3etpbRTIvBVKBtQ+6/X9L1PWpvYMgC4XaNljV89ewUmRZDHKAoCk+Pe4qiIQ4rDEPj228+QJ/iODGmoRHsO4a2gFkZTDQDTZAYDocoqozQtfhhR9GZ5G2N7+8JNhsuzucYtkQnNcjCkPXTPUWZoTsXtJpMK8cUSYTRaYiyQimVGJbMyLa5PLliNJogygLOGAx3RE3HdDzH0XVUzSbyM4a6QFWDYJioA5f379aoSsLAKdDECXGzR1J7sibAsC16WSOXcsJ4jW1ojC5miPToGizOHNqmo+sERCym0ymPjwcOUQxlh+UccyqqLGKaJlmR0uQhdVYh9zYmx9jC69c3jNwBpjjg9WuHvMnQqInjGNeDz746pVGf0PF5/XzKN9/eIatwcq5jj0ziqiNtGiRFJwlCir5FLBvEGsLkgU44gmdtvcEdn+AM5kiqgKwJKKKBKbh4mstkMSVtGuZv5pTGHllOOD31OD1X6JoUST3BTws6LWGTvcPUbeajGcPxnIE3OWa5JJEw1Hh++RrZFoiyhK4JKJqcth5xcrag70ouL56zuLnCm1joZcKJJXF65iBbEnW8Q3J1UjVEFxR+dP4KTVWx1Uum+hBRMjFFAUsraaSSvJNQDZEo3LNebRGIqCi4/dSjWRLr9C2nr+DThx1bv6ZVVDb+AFG3qfs1eZ3ww1OE7FhE+5Aw2rMJQjbbW2zX4sRaEG1K8l1DVSgU+dHhXKciGgofPt1TSzLoLYuTE7o4Z/XwSC12f3eaGvJod1wdpwa90LP79Al3YiGKJpbqkKYx5likFiK224rdLkLRduT6BhqDC/c5M3tE3dq0Tc9me0/Xq0iijjcYk6cJmtogygKtpLL3D7R1R5uK2P2IKq6RJZVOEsiEEKXPWa3WqIpDsEoIVzsWJ2cgKxi2AUJOViTQWAynp3hjkb4uaLBYH0L2fklbVKw3S6K2pNMbHu5CirznxbMX2LqDZ+nU9Za//ov/zId3S4q+RBBbLMdlMTrjfvOJZbhitdkQJxvCfIfujHjzxQ1SB/E+5+H+lqJd87j9CIhczKaMxQqzMVB0j7DJoATdGtJIBreHJ8Ync+KioKEiy1p6Kirh+CV+H7V0NVijEY5j05UKUVzgDiYs7+8IlgFVUeMNJtzdLpF0l17NqcuIWogRNAWhAVVU0DWLsuuoaWjoCYOUzcOGtsxIsnsoWpBL0gDCTUORN8i9QRjl2J7LYKqh2x3z8QhN0hiNXA6HHUXc8ri9R9ag7EK6viUJcmRhxPWrUwSj4YdPHxB1uLi4oC9bsiBAkUVETeHT/hO+sCJY1wjNhNHCY5sF5G2JrY4QdI28Duj1AmMks10tGdgWvZwxXrjohsPp6RmSoh591aXPcvdbBu6MNO4YjgakZYAg9OyCBwaDGUWukaQttu0wnkmwkZm7GoosE8c5mmoxmQ7olJR9kVA2OSdXQ2zvaA4LQh9/GxzJAyOJjf9A1aZstgGDgcPpuUfT1+z9WyJf5sXVM1SpIo8S+sJAajV2yzWH9ZoyC2nKijQNcQcT4iTB81xoVZoipyp90mBDFO4pu4A8brEUhbvNLV+/+47M7xidTilvCybWiE4SGM50FKljaCzwsz1VKzOYWPj+nqHmMrBqTNugayWGyhzFUKjbiCQoERsTU7FxBzCwLTxzhNBGDFwVy7JZryIWiwmqKpLGECcNeSixfMjxgxTTVdimaz7dd5RVRleI5HHDaDbl5OQ1A+uG4cRB6EUscY4k6viZT1602KaBLU7QSh2j0RmZOvtNShR3iPqEqmzRxIY0KpDFAefzOYZhkNQlTWNTlS32IKbsNuRli+M4OOaAqm+wNJWqz0HWkOqOKFn/vmPi/w8u7zqmLUcIUsGrL6Zs9weWDymGesZXP70iSysebw+Eh4Svf/OfuH/6NU/rFfv9luXuF3gDlcl0QN13tMLRC/7p/i2qofP89R9QlPDXP/85bQPbdYJ/iPmrv/oLwkOOIszoWoH9psQaqBz8NaYjEuxFfvnLX+NHH7n98JH93RrLGPNnf/jfksUatzsBc/iSTWeRxjpff/Mdq8MSUZeJ8ho/2/Pdh2/5tLyl6yCJesLIp28NonhLnD0i9BaSJHBxNaTrOmQRDvuYJC4Yj0fQaUxGUxxH4PJywnzucHbuUBY7Qn9FGK7R1R5jNCBuSk6eXTA8ndNrMr999wPv7j6y9Xf86rs1q2CLpNsMxh5hsiWII1BKWqHB0rxjJmc6YLPeEYU5VVNj2jpt3yHWDev7PW3RI0oZaRqyWRV0vcrNqwWqaeFMVbKmIi17bl5fYAwcklykk3pEoaJMCiwTRLni5z//W2aLc4pC5+6uxp31tGKIJHnoTsPqMeDNZ8/QVQNVTrA1i2Dr09QxdV3S1AqONyRKYlbLPTQdA89E0irGc422jbn99IH9JqWtHJAL4sSnKWWGg0tUXaMSDuzjNXHSkVUFXZ3gaApN3R0Hqt8B0ItKYDT02G1X1F2L5vbEfoQquWzCOwx7QNmmdJJOUXWoQk2TxLSVgGE6TAcmt093BGlAlEQ0tGw3IbvNlijaUFclD59yZrM5+cHB0SQGukbsR8hah+nOEFUdy52gDSpaAd5+uMPP9uSVhGrbWNIptZST1zbrsCPpduxXCc5U5+e/+ituHz7RCwqqbpGmIaomHCH5okTX5KiNiuF6tGZOVdf0VY+iiIxnC1RTQpJ6Xn9xRVmL/PaHJ3q1peoL1us9pi2gNzpVKZB1HbXs06sZYZKg2Mc3/LbriQ8apjlBdWA6c8hKn0boeXxYke1XZFGC2KgIQkEr1nQo/OGf/DG7ZM31xXN0UcRRTGzJoykkLq7OyasQ/7Bh5l6Q7zO6TGK9zLh//I7FzMDUQ5Z3T+ySR8IkRzdsNEWkiWvqXck/+dmfsosLvnn75zw8rkmKT5hORZlp/PrXv8TyfE4uRJ7e7+ilCG14IDzsCe8VLkczot07Ul/gdOLRVwKWOWS5WxMFAwb2lD/66g1mX+MMLO7v9xQV5FVM03RY5jE/GEcRhnXO6fVPUCX5746pgTG2OTk5Y6rMcDoJbzRAyAtGrsIu3BC30Lcm0TrBUSyeXc7JaBkKElfTCx7yBySx52Ju46dbbMnmdLxAFiUEKrzB8XcWZUqcLxkMLPK0QOoN9kGAbkNd5uRJzWjsIas6ktjjuh5huubZqws810JqDRzDQGgkhN5hNOuYzlTubw9ohk5eJkeYvCTgDDy225BwlXLqXPK/+e//R/6bf/mvOFuc8cd/8F/x+Zsvubv16TsJ120xzQ5R6ugFgU/Lv0XXVfxdhNxZjK0FfVERbWKCzZY02TKanSE7HlFT0+sCqlMTJSH/9//bb8nyhrzas1pt2Kz3fHj/iH94gkYgPkTkhz2iIuPNJijKMTN9e39HHmWcWh5fLb5A7RXypkJQZGxzgibOmHrX6KJA08XoZkvXisiSiTfRSbKUjx/XxPmWqA64f1zz/tsP6JpDLYmEfUpY9iwWz5hOTtiGawz1hJOzayrBJ/IDRsM5X33xJZIgoMoeY++awyEgCAL8Q0Fdyez3Ptfnz+k7AXoBWQDP87g8PyNNc/K0oSk0yjLkENxSFBmuZaPr+rHUopgIqU0RrulKn+WHgIHuYhsygwGcndtkeUJVCpj6jK7VePvuO/KsIw4E8nrL4/aWrCoZnVhkdUnZQl5JvPrxKYpdMRgMmJznzE8VNM3g9vaegWPRdyXLxwjZmLHxU1rlgKR0PN490lYCcqdTZnvEXsY0IUm2jLwJqqgxmpgMxzrL5Rpds+hamIym5FlFltZYxoiuldjvImTBoM508lRAUXtkpaXvVLK0ZzqdYssGA8NC7hrEvqarSy7OTxl5Q1zDYnriUVcRqZ8idj10OpqpIIkCdAmrxwcaSQJRQhNVyiznEOzpuo626MjLhCjNmc2u0PQJtrnAdRyev5hiOy30MqpmMplf40wsVrtHpMaiy0rkPmQ2PSXPOryRSprsSeKYgWuR+jmuYXF+Pefly2d4zhRdk9iuV4idSNckGNoIz9EYOi4f377DUHQcc4iuGmw2K3RDo6klirJF6lxWyz1/8xc/oMgadSuRZDF/8e//LfkuQShEFElkOhsSHELiMMFwZXRkujxFbBUcYwqdQNuVpEVE0e3I05gkPqDKFm1TINEj8vtzKH/vlff//v/wZZ/EJZ9/+YIPbw9YzoCq9KHXUJSSxfySf//v/zOvPpuhaBWPtxHX19fs0juiPOL5xRWPHx548+xz9kGN5mlstil1nnE1m/Ltx3skWUAURSShRdc7yrLkD776E5K0JM4LntZPDEcOaVKjyw4//tGX3D8+UTY5ZRpjWi6kNWVX8fZhzyHdoQ1durzAFE1UwaBrQp7dzMlyn8e7FFnJmM8mHOIERRkTpQeKQqUXIlQF5N5DUwWKLEHVBiAX3N4ecAcTZLXAcRyatiDzC0zL5uzqlOVqQxDu+PHz1zRpj+LAJgpIkgTXG9D3HR8/vccyTEzT5NnVM/78l99gj2pCv+FkcoKpdSzvM6Zjj76DJMrJqxBv7KIbA5K4xHJM/GBD29Z49oim6Xm43zCde7RdfXQFWxZBFGMYBpYl0LSQJQJNsWcX+FjuHNPoEbsOf6VycqGSpgJ1KWKPVJ5WWwQ5p29zxhObLKuoc4HFZE6VHn3u7z/eYeoSYeDhDEUOYcDp/BTL6vnV335kemKhqxqWZWFZFtvtFtM26Pueomooq47LC5unVYLnnqAKLWUVU/QNndjT1EebjiZCV7d0qEiizMB02W739IqEZzvkVc42DnDsnnFvsN0d6Cc1gjyi9XOGi1OqqGBkmWRtQiPK6JqDoUvsN1uqRuDx/g5bn2IYLa5zRJXUTYpQO7Q16KpM1TTk9QFZceipmU+viYsnPOuK9fYTRSZhmiq9lOLvegZjldQP0T2BzSpGEnXOZzOCpUwmrFA1AdVUjxda31LXBaZuQ68giiJNmzIdDullnaf9ijPPRepM9kWC0LVcnE1Y71I0TaGsdVpBpWg23K4+MRxOUcoaq5RYtQWWJ5NlFX2b8erZF0RFwuruDsOdUUY5tiFzfnpOlqQc4hWyblJkEi9Oz+iFhB/e33N2ckZWpZycXaMLKg+7j8iCTHjYopvGEVciKzjDEZ2qs7zfMMgVfvnN93hzl5PFEKnvkIUZV1dTfv6Lf4s6dFi+jfnszTMUryTPGpTG4E/+0U/52x/+hm+/veXZm0uaqqcsNoBImTSYuk4nF7w+e8U2O7Ct32FqE04HM9YfAvK8xLInXJ5N+M+//DXGwELRh7QIZEmLIMc4A4jThs+fvUY2FL75/jfogsXV/DgUxH7A1eUpjQzbNP37l+xmh6uPeLzN+aN/8hP++j/8FYrVo480ykOHGPUc0pjWBFUTOZldkZUrRtYCoyt53O/ZxgWGmNE0NY4zYzjyuL//iDOY4TgOiiIRBAGyKZIkFYpqUwU9k5nJLr/F0F3KoIS2o+nBsiz6toJeomkSxt4MVVJ4fIpwJ7B5ypDtHks1iMMEy1KQRJUgaQn9HcOTGY7cstqH/LM//EOSfUjS6uRJTtvAZDSkbHxqqaEuSixdoGw7dNFmG0VobgyyQl2JnM8mLB+2ONaEsvYpS5/z2RfERcsm/UjdZ4T7jsVgRB1nXM9/zLZcUXQJs/k5+8cnesOkThJU00FxOoq8Re5rJt4lcZMQJRm2qjMyBwhiiyzL3O2XSIaNq2uUh4CffvHH7Hcx3737JYOJyW5fYpoqmp5SZgZlL2I7JkUWke4SgjDjR189o20F1puAy2czbm/vGU2HZHHKZr8j3Hb8wc/+EFnd88Ovf+Dlsz9iPjbZ+DGrzRPzxRipEYjTiqysqLtj+S4rW3oxom87ukpkthggYrDcPmLqQxxL4HF5S92o2NYQWapJkxLH8ZD0EkcbAxF7P2A6vyFKtkzdCaqgEMQ7hqNzyrIkDGMuz2fohsTTg08a1ShGSFoXjLxzkmxP04johkaZtXR9jmXMOJm5ZGXOahlimjp9l1FXHWleM5oMiMqEuo05bARmE42BrSG2KkJvE8Y+p2czdFvg8fEeS/MwTRNvZLDZbNgfClRVxnIsdLvj4/sVkjDgZDElT0qqqsLQO2RFQpCOrXQ/jLi+OUWUWvaHA2mS0AtHpN14NEOWOzbLPUXZMT+Z0VYFwSbDUE1W4R3WaIbjWcT7EkVIKfsOsZUYjkfomsTHuycsQ8HAoVcbajpkVPL8SDLI0hhJUqjqHk2TkPuavKjRbANnYFMW8Pa7W14/e8Hj/Yq8rbHcGlFq6GuDPG7whmd8+/ZbFlcaN8+esVzmVE1IVVXohozQQJ2DJMiMJzY9IqZs4NgGD8sdnjeiKn3KvqTIJGT1ONhKSsXN9StkLeL9uwdoTDBF6iRE0zQk0+b+dsfC82hbn8flAXdgEcQRdSfhDm2quoPOQFMl2rbGVUXyqma52uE5E+qippF7Nr/e/8OuvJOoRVbhh7eP1D3YroE3mhAnB6oSbu/uuby5IUwSirrhyz/4QzZ+wmaTUxUuT0sRa3BGUovEhcom6NiEEavDio9Pn6g6Ec2YkGcqiupRVDJnJz9mte5J0h5JE9ju1hwC/9jA9R+Iy08c/Ee++/5ryq7g06cfWMYbNsGBLA0Rqp7+KWBYq3RtQNMcqNuC79/dc3sfoJo2ljflh/c5fe9QFg1ZliHLFZqik8YCbR8TpxmGbVE3Jf6u4/r6GkHKUTWZLAvYbyIMw+D09JynxzWi3KOqKkl6RP1stzF90WAoOtE+5MO7TzjmAMt0aauWj+8/MDAmdHnD2XSCqYkkQcyLm1PyYsmPv3rOyfmYs7MLFotzuq6hbgsCPyFPjlaFw7pgvbljNDaRRY2qkAj8hKpJaeqOtkwQkFk9LcnCNRIquqIiiRVdLVAVMmUbMRgM6fsazSxRZAj2EaqocnX+jDJVsA2Xs9kVjikg9cfg+cQ7Zb/puXlhIQo9pq4QhiHff7/k5sUAx/Q4O51SlAlRnDIczdFkG0mE6cRmPNN5fAqYTD0Ms+Xx8RHdGFA1HdvtFkEQKAqZNMsRBZm+EznstgTxBkHr8SYWUb7G1gTGnoumGqRpj6KNQTTI4hLn/ATBUBE1hbbryaMKS9QRq5Z0GyF2KdF+jaWZnMxVrq9HiL13zENpFW0X0bQFjVCQlDmmuUA3BdpSRhFrkn1KEq4ooopnF1eczy/IDi2SkJEcdvRqi+U6jEwPU9YZDWx0vacRFKyxQV3XCGKLbRvkWcP+EGMYOqalYegD6r5ntzkgVQ59J5KRkTQ1ddMQ+QVhmrPdxQxHLne334DQY5kmdC2ybjKcjqm6CuQaVRxiqFP2QcvOz2jrYwP8dGFh2i1pVlDXLaPRmLIGTW5ZHh5Yp2vOn1+y3W9BkbDMAY4mI9QtSV/TCzrhIWegD1AamfX7JeXjnjNVoRYjXnw+4uUXM8bXGrviQNo+cff4AcNWGUoOP/uTVwwuDR5XAVUAVivy7/6X/xfhujg6frOSNJJYTK+xVJe+cvjs1Y+5mD9D0zTSPMO2n7M/SIT5nunpCMedc/VsTiS0XH/2hqKtqdoDkiSgmg1VV6C7LgPHI00q7h7u8bwRs+kJH27fgZgzn87IspaqCHEHyt8d3T1nG2S0Zsjj/h2vrq/o8ozb9ROfwi2SZ3NydYakyFStjB+lBH7EzfCSp11BkGRU+5b52SWdIqKpA4QenKGDovf4fk4WNUThGgkJ0xiTpSEX52MO2wO2cUpZ90hCx3Q0psgbVEmmb2oEOjTLRjWOYG9Tl2h60A0RQRaQVZuh4zKezNlHOeOpzdnVCYrUo8kK7mTEh3dL3r9d8en+jt1uQ5bHlEVEEIY0UoxhCwRBgWnYVH1NkMZ0sk2cdnSty+3qiV4paYSWXoaqV3j/6SOrw3uypGViXSPVFUNzgmOdkbJCNeDm2WuSPCbJC5Bb8grsoUXVdBiWQ6toVECd14xsl7LN8auEfeqziXLsiUtZR0htg6XBh9u3/Oq3v0Y1JLbbWzzHRWhVQr9BViwWsxF5HmPaLpORy6uXl1R1Tpr4qEJLE7fE+4SmKmiyY3zp/GTM7vCAoTu8eH5xXAs2sN08ohg9RRNyfr3AGzoMRzaS3FCUPnmb0dKh6hov37wmSWvCyEfoWk4XC2QaNk8tuqLTizFhkOJYLvQVcjMkTXKWTzmSOCBKUnqxpBVsglSh6kxoBarSZz66oMxlojDFMAzMQUvTSlyevcZ2TDRNYTASSPMK2RSRTIVKilkeNoTZnvm5DYLMblcQpQkIMl2nIxIT+w1Dz6Cn4BD4SPKCxdUUUdXphI4862hqCUWVqZuK3TaiFw10SyevE5o+5927H3Ach9l0xH67YTIa4hgGjjXC0sYkQYEodViWRhjk5CnI0hGJV7U1eVUSxBFZVaPYBoqpolkqm3VM02i4wwWvXr1i4OrYmoUkHNFbnmtguCZtJZDEGfPTCRdXMyShQehVLM2jLLrjh6t8R1ZGDMcOstrjxwWSpnNycU3V1KxXAbcfNww8k+XuA87YwLArEEr8fYMsWaimwNa/5bMvn9F2Ar/9/hNVdyCOcsqyxnM1irJF1W1mixFpmpJlBV1XU2bHv3ugFwTaXiGvE7K8pypU6CWeHjd8+rAnDCHMfVb+ElHTqDqRQxLgjlyKtKIre148v6ZOGobGDKkBXRRps5bz6QihDWiygrqQkDoby9BxTAV3YGAY6u87Jv7+A2Xbtuw2GUUu4QxVojTh9m7Nmx89o2pqRK0mybds1gX+XuLu6ZGHpzVd76IaKvFjwsNdzuM+J2sCxOpAtfaZOwvQVJbbR+I05PLmnLppCKOCtGh5XK943N6zegrx3Btk0Wa3rXE8B39fsA9CPO+U7Trj04claVLihym90DEcjnHsozR+oCyg1VEVB9eZMh1fo6oqXZ8xGPW/q/YniKKKbZvEcYhpeOi/g63meYui92gWrDYfEehQhAGK5CALCn644xe//GsOwY4w94nSA70Iq90j++BAkVRYqs3qacv1xTPqSjheaJ1Mlle0eY7KkDwpyZMSxzxjvdmgmgYPq0eSMkJWVe5uH49Qc6Flt13StzDQJyRFjCjpqJqFrPXIEszGpzRFD2KFrko8PK5BFBiPLYajASfnF/QU1E1Jh8h4YnJ3/8h4bGFZIrosczodQFtSRxWWbDJzL3hxfUUSNpxfnPDwuES3dFRLpu1rdtuKydRjcaZzcjLl+voa21Y4HA4oiojraWR5SFVntG3DfhdQphKGbrHd+Lx7e4dhGPSUWLaKaZpUmUBw8PEmU6q+pykrLEMnzCJUW0UQW4IkRUJCqgUyv0czPTbxgSz/HfRa6Pj2F98T7WJETQZRxDJHJGnJLvaJKzAdl/OrCR1H3WKa79B0C5k5NSboMogajqHTpDllBIPBcXVEZ3DY7VBEjdFQZ+8/4LouuiYg9jq6bZHVKVXX02sKqSRiXLS8+ekJQ2/KwLVRJAlN03EcD1GQMSyL6eyEsmp52qywlB5JKmkEmX1yQFEa1us1+yigrkUMyyEND5zNJ0iCyHy0wDFshlOPTBJwBlOynYXRG4zH0lEdKlZ4hs3PfvwGqReRRYW2T5Clkq5pafISVa5JqgO7Q8R659PJNYIskKQV69WeOA7p0o5gu+F0MSUIE+KqJhEbVkWIXzXc3HxJXYFhmHz3zQpNdbGHLaopYGgepm6gaj3LuyeyoEbzdO66DQ91TJAWyJLDfr2iqp5Ik4rpeMTFpcUf/+Mfc34+4oe7e968+gKjgRfjM1zhnOvLLzAHHnfbLWKnst/vkQWd87MrwmhDEO45O71G6lWmowH3d0vaSsK1Legr6ha2uwBRqulLic024e139393DsuPTKegtvD+z+8wWgsdnTN9ysvJhIltsPxwz1wd8HJyiiULnNoz3q3e0ckicu3xk5++IAxLBvoMyTTIa4XVNqIiOz7Ie43Z+IS66NltH3h2c44s9VRlTt8LZFFN10pUVXW8UXc9QtdTFSWKbNL2EmHUsLhwkAQT23UoS4mijLi4GpMkNZo55NWrFyh6z5vnL49tdqOkVzXWfsjwTEfWBZI8Qndl4iwmSEO2YUhDj+VpiKrAcG4RhwW2Nmbg6tCLCIpKlAbEcUtRNghGi6gYdGjUvcRscUFWJ4h2TaMJlE3NcrlE1UDWZGTTwxhqFF1I3UgU+xS5FhDrnjxKUAT47PUzDFWhyBLMukXwYz6bPWOoT0mygrjYkTUBZVlwMv8xVxeXuK5MmoaAyH4bUGY1sqSjeAaHMKGINebzOZ1asvFDvvjiS9ReYTKYMZt6nM6nWEZPnpa0jUwrHsjrHMNp2e99liufj3ffs9m9J8sjPMdDVUQ6uQAZdHvA3/7ql7StQFVVDD2PoeuyWRVcXnooas9hl6PrFlVV0ZQQhT59o+I6Y05O50iCyHQ6ByUk7T7ijASKMsbzPIqiIIoikjQmL3e0fYRlTHAMDVWo6dqGtu4QBZk0KylKhaIUqPqcpOr55rtb4mrHs9cXzGYX+NEWPz6wmD3n6vwCd2BSVRVZrLNcfUCSJN58MUNWWooqRzM0OrFE0rqjKrhvsTyV8XxMWbcYuocqmdDXOKZCHqWkQQoN1E3Oy1c3qKpOWda8fPmSoqhI4+p35ZwG0zRBlIiTHAQR2zbpKbm8NDl9JlKIj2RFR1tAlVScn81p2995rDURUcxBbdiHEV0t0VNT1RGKkHKycOnagvF4imV6pGmKYkLZZWybjLsgJq6gE2tG4xKhrjk8CtCnuK6A0NrMF0ddqKyYjE8cajGlagUU2cT3fSRFRlEk8vIo8Wi6mqyJyPISQRDo+oLbx49oloI5MFB0i6Ls6HuBssqwbJEkLEnikKqscRyLuhbwVA1HVEn3AULb0VY1+/2eN28+Z7444+T6lEMSMT45pxcNwiDj/vEBb+yi2hKj0RDdUdltch7vdyiyiS67//ADZdn2jGZz0iJjvQp4eHggL0J+/td/Sxj69L1xRB/UGQ/LRx4ePyDrFY6r892vP1LIT8yemWzTPbu4JiwqVE9jcOIh6Da2McLQdTbLFVUpoKgD1ocdUeoTRQV1LrKYT1FVFVmBt9/f8Vf/+Vf0SEiKSNZCp+vQ9eRliag0pNGehIRNvsJ2Jdq2ZOjOce0Brq6jCypyYzG2J7Q0pGWCppq8f/dAR0dRB0d7iW0hKhJpBo49Yj47Q9NFdtuPhIeYgeNRtwKyKGG7A6IsR9NdNocDUR7TdgJZXrE/hJwsziny40NAkjUOUYJmOHR9wWaVkmcdadYSxgJh1rD1G777+ERe1Ky3K+q6xj8kFHnNdOZhaBKbR5/hxOTy6jlF0+MHx69pdZ0iCAK6ruK4HoIOk8UJsqYSVxGdLGJYAwxLRzcENF2izEvWqwfERiGLEj5/dcGr60sGusXLqzOGjs3b798xcCd0hPRShag09KKBqmt8+dMJqmJSVyKm03F7e48gVnS9TNv2xGFE29akeYIggWpqyLKMv9/SdBK1AKOFw2r3yN3dPSIKsiwyHRuEkU9Li2MbyLJK08m47og8KdGlIZLgokkWdQ693HHz5gxT09ksP7H87hazrGizhDhJubx5yf3mnrhLSAB9OMOeD1HclqaSOD2z+fzHBnlaEMc5hmGgKAKaLeBMbPQBKLLFaDzA30bkTUpa9+yijF0cc4j3DOcO05NTRFUjyVOqWMYyYTayeP/djv0+JQl6DocAgZa+lSjSjvFwyM31JQIK3/zme/aHLSP7AhmdXsr41Xcrus7FFhU8zQbNZDQxUUQFqe9YLEaEQQatiCq2pGlM1jScLU4ogxBdERG6EsvKMRSZ2fgEpVNQRYcibajylCgNKSof02xQFYeBOUTrZWS5Q1V01o9bvv7mF3x3+468E1FMlYubK+5Xt8h6Sd+lOLKG3Rm0QYnvB0wmlxRJgFS1WJKMq5+gCDoSJVEecLeNqHA5mU0Z6R3RZs3AHmE6CvefVsh4DJxTntY+adazXgr8n/7P/w8+fvzI2eKK77/+SL7NaLKI8XDK3/zyN4yGQ55fXHIzOefl/IKXl5+xuqsxLJ2bq88pEzg87Xn4eMfAPTIR98uYKsupi5zp+JwyK2n7DbrWoTnW352qM3j4rsDVBlx/OaYQRf7Rf/Nfc/nqhjcvPkNBw5nqzF6f83G7Jyt9xK6n0RsO+x0DT6ehIg0PoCks10s6SWA6e8Z2m2I7BsOZwy4MEZCYDG3SOGS7TrAsjb7zoQFFclEUhec3V5RFhmZM0R2J+9sNdSWy2T8QJjlllSKqBqPhHEmSqIueqFhi2A1ibVPUFZPJCEE+PvgmVypnLxwMVcceSty8Oidu1ly+mKAoFgNvwWjh8bTfEKcZsqzSJjFnIwe1L0nDCKnXaUuJuqyZz2a0eU9dNOg6iGJHlnU0fY4o59RpS1c3pElJ1yrYnkJZtQwmDlVT09KiKwa2YhMlEbUu4PcJ7z78gOMMQFbAaTBcl19//YHLs8+ZTa6o+5rR1MWbTZG0nl/88i95uN9iOyeEoc98OkBTVN5+/y33H2LOz8+PK9dK5ebsNWWa4dg6A3tE1fXcXF0QJzvkvqevOi4uLjA1m124QlFFvMmcHpPtIcVyDPzD8sgdLFVsw0BXTNZPO0SppcxSFrM556dn/OVf/jnOUEeSVTa7BEO3ybMSWZbp+prJdIit5/Stz9PDe4S65/77iKbQMfUBZdwyn4+PL2tezbOXI8oyJy8SmlogCFf0dYO/KTClIcGuRVdFNFXC0MDUVZ42e/wgRtIEyrzj7du3SJrP5Y1LmvncPT0h6BlpEZNFDldXI1xHIdqveffNB9pGQBQFZFlGUSzSrGQ6n1CWJXXVo4gj0ui4aWoasLQBQ2+BZdhcXpxiuzKqUrNa32OZA26uX1HXObLSMhg4iJ3M0PGQe4k8ysjTgr4FGYFws2f1lNChYA4GLBYLLk+ntFlGFh+YT2cYuoqpqdjGgDQvsB2FrktQLOHoe5d6ZLHHMGwOm4zx4IzbTxs+fVhjmZOjvMHuGU91hM5hYD1jOLFwJgrIFmksIWsCTVeQFBvSKiWNO4qiQJRAVUQsfcrANRGljihoSMtjjMb3A4IgYrPfoFkqDT1FU7HeLWnbDkmQcWwXVQbD6jg9m/H85QLTMqBrEFHI8460ykAWKLOW7fKIXgtDn1//8lvSMOPZ1QIdEPOemW0ytz2qoEVtNbyZQye3nJwtMHQXARXPc/7hB8oGgaIu0I0jOFSSFNpGgt4hyrZ8/91bri5fMB572IaFbZzTVC15tuL64pLx+DXBsuPUXvDqeoI7FrFHHm/fb9jcp5zNFqiYrB9X9HWF0HdstyvSNEJTbU5PLkmTksOuYn94wrSG2PYpT48RT5snKiVlNNDYpD4CGkKjIMouUqZRFCKrzZ7Xn9+QNyXrXcihCCiVmDt/xbtNTNMWQEdZlyxO5wzcGYapUOQt2/2W5cpHUY9+1Kbr8A8RlukxHnrUbcD5yQWz8fHCAQFdtwkCn6LNEEWRrKoRZZksy2jrhqaqEBoRS7WJgwRdt5lNz5CxURQFSW6R5A5BENA0CVN3KbKKJIxI05iyLJEUmbqrsWyV2eSS1faJol5jmjrT8Tm25aBoGpIGTVGiqAJV3VLXLWmesFwdyNKaJEpp6xJFFtC0Hs9RGQ9czmYL7t/7qP0I21X44bt7ijLFcXVkNSOIQuIkoyVnce4S+CFFVhGGK/I6Ic5iBFVmH8Tw/23vP3alWbM0PfAxrYVr9y1/fUScEJlZkkWyKBoESAJEA7yInvaV9CX0qG+gG2gBZINNFAokq1IwMyLOiXPOL7d2aeamtVkPPJCccJCFrJrtB/gmGxs+8O37s+Vrrfd9RYEkSSjzkqaEvGy53zxxCLeEhw2mMcLyZbyFxuenR9pWxXV8sjwAucEyFdJDjKWaZEV6ylyWLKq0IjgcEdoCQelQFAHXMyj7nDLLUTqbprCZjOcsLmZcvl4AMt//zY+UWYAqlpwZI2gqHt8f+PAXBf/9f/dn/OmfJjj2Ay/fHpkvCzxDIokC0jTl0/3PHOIMQc9oSgFTNzBdE0VXaIWCtIyYzBfsDinv399huQpSpyO3BnGUEq0zXp+PuZxfkzwNNP2OYxSDINH3IIoSwzAQHQMUGXRNpu0yBGGgKnJevXmJKA9Az9XlJaoCitABArKmopgSsiUTxQGqKFIkFbqg01cJr99ZzBYmtnaBI7lQiSiKxNPthk6o6Zse0xhh2hMcd4yhKtzebjGUCWqnoopQVwN9efIStcY2RVEzHy/pWhvNuaSRLcbnVyjOmKDJGdyabXyPaA5Iss7MnWDrOlGcEecFVa2yPFuhti0LU+arq2tA5/LFW2zbRRN9fvXdL5nPDIxxwKtvJ3RCy9tfjlFHIa77jq4qqZsE2XQ5lhJpP7AJb2janKf7O/7qx98SVR2PT1vmcw3yEdkupokSVs6Cq+tvGLDJkxIFBbHVeXl1xe3dD5wvLvCWC/K1ThHt/u5EySOXl+eoE43J21fkbUiXJnx4f3PqDKYJ5tjm4+Yjk5nP3HTJxJ5wt+X64hzJrnh42LBczRB0UBSFoi0oqwRNdDFMiS8PPyMrOoZh4FlT2qZks384jb/DAV0ZMCwR23Woq5T1LqBBRlAl/JGDbWpM5haK5NNUOWmRMnFneOaYMmtQZI3pxCTdbzgGFb//8BNJ2ZKHAuPJnLOzS8hl3n31EtQWxVbR7B5Ds/F8B0EVqWoBz3fRBIvvvv0lfT1w3DRMxi9oeonlcslqvkLCx7R8VgsduRc5n7zAMQcEsafMZLquQ5Y0JFFHFHTqsqCqU4qs5OHuCc8ymE4npLQEXUReRMgKCLbMwz5gOllx7Dq+rB/xLiz+8qf/hbKHxfgFpm6CJrA55Bg+jJc2F6+WvHw3JooijruW2crn66srbm9/pBVC1vsdnz89YpseH98/kpUtm3TL7hChOjpFVqKKAsExpG81WqFkv0/xfJ/JYoxmmJjmye/18fERkJAbGbHWsGUbx5QxzVN3OYpOnUVRUkiKksXikpdvrnnz9Zj50kVTXcajCQgdVdUhCS59XeBZBpJQkURbdKPiDz//yMP9Fk3VCXYpZSoh9g6q6HN2vkBRDdq2p+0qvvnqBUXSoQsu767fEm8zFNmkH057gMMwYOkj2uqUsHO2nGAaOrI0IAkib78eIUsO09GKosiYjC5IwxRFktF1nbruThO4tMDQdMJdjNTKnC/OkAUZ3/LwPI+iyJgtphiuRJrUmOYZVxdf0Q0pPSmHfY5jniMIEr6/pOtlXNdlPBvj+ifRkiJrNGWHP1qRHCuO2xhRLGnqCjqRpX9Jl7SItUocx+wPT4hij2s67PYhu/TI3Lex9HPCY41tjXFdn/Cw5fX1GWfzGZau0RwKdp8zDrsISY8pmhzdGuMtTJI2xDJHuCOdOBHQzBG2q1DXNX3VcTZZ8ubyEsfQCDcZwtBSFzUyGgM1kmAzn8/xRzaiCIKogyTS9A1JlOJYKsHhibbWeLrLqaueYxhzWLckEVxfTZF0laQWaXqFuswwDAHH9pFkDUXq+Phhf1pflDQ8z+Ff/st/hqS07MIt86tzkqLk6bDDHsPl9ZTx3KEj/vdfUMpaTZoXiJJB10IYRPRDQV7uKTOT1eKCOMy5u3lCFFrCwxqhNSmOAqaqEIY7VAMEteNxu6cqRdKsRdA0rLlLkhzYPx7xzAmOI6OqNQM1mqYRRRH0A4+PP1K1O8ajFbajI0gt3kzAtidoTUVZJJyNx4zGU7zxGBWRs6sl53P/NPp6uqWsTyq/PAnY3+8hlymOEcHxwGjiM5l6iFJNU8lIgo2kgOvazJYOeR2Q5y1ZWuB5Hm2jcHd3x9DIFGnHw+2GMmuRBJM0rGhqEAQRBhVRlBGGgb6t6aqaNisR2x5T0vF0B9eSmY01rs6XrKYuI0di7vtMvSmWapInFUXSoCgGy8UU1dROC99FheXLPD59Yf2wRkCiLGvKJqcdcnoGmn6gyTJM1aStaoa2w3PGTP0Zvu4xlAKaZOHoFobkUCYS6/WWqswYeWOS9AFNG2G5DsuVh2FolFVOlgeYpk0Q1hyjgDQtUZQS27bQ1RGWY3KIjrSAZolYtszIM1FVlSCMMR0fyxwz9s8xLYm8KkiqAsGQsEcOgqCwWCyQlJIoijifXTOUPfP5HEkxaIsOoWlQVZWZP+YQBNyv7wmSJ8YTG0NWifcBujrD+2MGc9O15NmRie8wnU4RhgFDGPAaG4+G//P/6T9GqP+K3/3rJ6LbKYYKqmJSFj1Da2EpM/R+jqUYWLrD7foTgy4wmYwQBIn5bIkg1STJE5LYMx3P2DykmFKNSE2KjDqySNo7ZL2k6x4o8vYktEpj6rYijnKEXqCtE1ZLH02WMD0Fx3R5efYrgu2OtDwg6S6yopIeYvK4JM8CZFXhw+0Xrr8e8813LzmsEyzVYuL0iG3P2H6FZul0rcD+IcDWFbpeRRRavLHBbHpGlbUIisYxlAn3LYNYYZgu5/M3RIeYiT/i1YsLpv6IkTfCMTRsQSPY3mCaDaIyEMR7bF9jdX1OVA+kfcom3rIJSjSj58uXLzxuN2R9h6irHKKaqpGJq4T7+I7HY0xbymjDwC/enXN3/5kqN2hrh8eHjiCKCZKAp01BnG+RXIXx5ZxSTDH8nu9//FsGQeDDzQPbY88uijhGEauVQ5UXFHmAYjjIlkEq1Nw9fubu4Qu6ZqHKBlUWMx9f0qYGQ9tx+/GWUiwwjfnfnZmzpC1S5NQn+SKQlUf+p7/6n6lSnf3mC/WwYXOXUQc1up4TNx0PxxRdUFGUniwqsUYKcd3RZBWDzMnA2qq4OB+RpweKsmY6f4Oqa1QF5InIZHZGXu1xbbANG6QeUWmoqwHXW+BMFTR9yXyukWUbDN0lSRIMVUPXbJo6oUpz8qRC1zyabCBNNqwWE+ISbH/CyK94uHukKmpWi468SBlNZvS9je9NOOxCdtsbBBSaRqGsN3iOzT7MCY4lx+KBYxaRFCW7+A7dMlGMhliIkUwbuhpxKJElg6pRiduORGjoBo2R55OnAV1j4DoGXd3wzds3iI3Ejzef0DWF1XiMZeqogkj0tKWICwx7xK/Oz/Fki3ZouIvfE1Yb4mNIk7c4uo9sF9j+mMVqQVrsCeOIvofV2Zj5fMHo3MC2Fwy9RlwGFGpIJmeEdUk5hCiejuk6ZIWE7ejkeUpZitRVQSeISJLC8RjjT9xTMXTYIQkuqmLRDTHRNsdWRnz1+ltW8zGvX10y9C1DL6AoCvXQ4I9HLM8d9oc1cXja9TOshn5oOLu4YnF2RV1pTCbnfPvdL0Ee6AeTvnMwTAnb0agriaenJ16/m6HqNbIsIjMiTkv8yQWqOiJON8iyiKrlRIeAKhHQVAfL9JHaGWNvim2LZFHNdLRg6GuUBgymqOJAkSc03ZGyq9Btn1bMcWwVkRZDl4mTwx/DKCpsy6CpUoQhQWjB1adYpstm94HVlckx3aFoFqo2oGqnL9SqojMMcDweiJItrqfxsA5RDItjnrKP9oiaxNN+TZKWzBYXWJ7JbGGe1PGyQt8pfPPtG4LDE12ZIw8i5+eX9Ej47ojd/RNt36I5Bn3dU7YhWbFmu7uhaTMcR0HXRWxLpasTfM/BG0lMZiM6EfZBxH5bIA09Q1lzjEPub3dUTY6mOrSViiyojJ0JtiZyd/ORMsmRBZGhUVBVFcOUOe5Lxv4Ew1RQVJG7uwfKoqNpWu7v71kul9iGimMsYCgZKE5pa46HZtSIQs3xkHJmrPBkk29fvcWSDNRBRTdOscnL5QVfv77AMSxeXV1TJxn3H/eYso1j6wSHhKfdB5zJgKqZ7HcJd3cHJMH8919Qpkdo6477h09UTYU/sgnDA3QqI9cnyzpubj8w9h1kNHRJxDZERHrqoqSOavI0odIUok5lqCSaMCAPNjRJzjAoGJ6EMZIp6oEoGXCdCW1XoqkSdRuRFi153SJqJ+Pxpskw5BGW6VN1NnluEhxihD5AGhSKIWEX31IUGefLCVdn50g0BPsNQZBT1hrL5ZLzxZixPafNRdYPAW1hM5uelNKyYAEQJw0SBnWxRhZqQGQb3DGa+MiaShomXFwvuVwY2I2GrQ/MJyvG/oheytA8nYdwjzMfU9Py6uuvmV54KG6D5CgMkkyS7RHEhraVacSSRhAYhgHHFpGFjsXFKzRvxHwxRelPthedJBFXNYNeMZ44qILOZDQlawp2+ZqmDymCAtPyUZsYW6lpWh15ENGHHFVo+fbbFY5u4lg+b9+s+MV3b7m6uESg580bh1+8+44sDZmNDegtNsETWZPjjl/wsLulbWuk8rSncswr0rpEt1uCw5qRabKcOORhhlDLhLuGtkpOopXw9LfVdJGyBrFqMfqaIesp8gjD6gmjmG1Q0QoeotmhaB224lCnPb/+06/ZHyPqqkJQTNIixpvrpxxuY0SWVCiazPJ8zsg/43CUOEQteVszqBpxWPLVa4vJwqYYav7b/2bOzcf/J3/+rz5RVhrnr6YoTPnL//FIUh558eqavN3TqgGT8zPWQYBqnLzr9kkCoo4/EunKnqZ0aJqGKDmSFxn1QiIrC351PsfzdY6Fy+PNFtcSERs4n57h6A5SL/LV1xPyPMWx52RpgSBLSLmIaHk01Di2hqMLiKVEXXTIsktZVxi6QtcWTLUJxeeK2x8e0HQTup7HxwJNhcf1e+Jwz3Ru0Uo145GNZcpoqkm8z2nLEsORoU+4WHogGixHC+oyxH9pMp+uKJIGyRaom55OSDFnFrvsgDlxT2KGoOT41BDfJ9z97iN1eURqFcowpR8qClXCOpuzWr1iqGsmKwdJKSmLjDpXkLCwLRnLEVldn3MoUjoKLt9N6GOQmoCmF9gHHbqtsw4L0iymzyVEfUJwLNjtM8zRmCwMqLuK+09PtFVJlWU0ac43b8+RhQNNe+AY5+RJyYuLc8aWye7pFseb8eMfvuc/+Zff8vPNBxRFYTyW0dry7w5ZR9kO+BOTOHpEsSZIrYjtdOy2R7JG591XrzhbzLl72FKFNWPbZZAUBFGlqAvSoKRKWpqups4OqGKNMJg0sUiyazHsCVW74dXikiZNoREp0h3HuEaTbaouR9dHKI1J3Uborkge7GjznH1SIfsaxzIjKAMaXeXYBTRiT1H1iJpDEGTUdc5jHJwiH4eEiicQWoamxXRMNknMw/0nwqcHHEknPzaIdDSGyn3ymesLC9dYUfYpSVzz6+9e8uY/+iUvXp4z9wxKuebn339iE2xxFBWNM7yZw6f1Z0o63InI0DaoiIzHPkWa0fcwWBp0ImIjkQciiqXhOBITf05bSZiShSYOqJKBJAnUTcFTW1F2MsOgMbfOaauebMjJDEjyEGlQ6JqaxXhGE/XEUYk2UdBnMmWd8tvPnxEkBctsMVyR8WiFr8hY1NA4VFmNqIqIosjYv8R1HBRFQZ/YlGGJYjnk9Z6b9R2HIOfs8g0UNYfDHZPFkuWZgzCu2QgPlO2YH393w6cPGx7CI4Ut4c2XWL5NsE/RJAfH1hGReHX1DcvJgtuHPZqk8N/+V/8FF+cviY4lh6cO356hKBpla9FJHdt0Qy5IRKmIb88pioJjGdMOKVn5AGTsHyrEpuPwlJBmEbMzDcuWMEQFIe+Z2SqmNqBbNU/b91jmCHOsEkZHri+vUHofyxDJ83u6NkYSG+p2oE5F+qJBGQZkteJ4jFk/bTlfXJBkIY5rMh2N2W9uUVD59D4gKgoej19AsOlrAUWqiA+nkbbtqNi6QnHMMQyNoSkQ2oFu6FnvA2bzM1BbykGglXKiuKSuS+KgRZN6kDLSvKdTdCxP4hhtmMzGdGWOokrotoqvq3x+uiUHRqs5ggab3ZZOENgeWtK2QHYlxL6nyVtcG9JAw9JMlr5F33ZUqsyg6QiyzWQyoxdaZEVi4k2gltkdMkzNpO/Bn2m4nk3ZpxT1wHfvvsFzBIZG4Hy6pG1bLFegKlJmswW1uOOnn3ccwwOK3iOYGubSJCgGjlnDIAisFhcIWsM6OXC7vkcQXfSJxT7IqJOM+NigmlPOXzl8udlhWEv28ZFO0hj5C47bW8QO7F4jPUagwOXKQRmqf/8FJUILQoOIgiL3qKrOyLtGkhSSJDvJ5HsZQZLpBomy7ajaCsPykVSFxcWExWxGto3p8iNpUlANIMoW8iDRyzKt1LANN9T9QNMOFGXG5dUZddNz+7DFMGyaWmC7idBUh74dsGydODmiqy3ffjdjMVuyvivYrte8fvGOvtVZrmyC4wM//O4RQ3cZeWdcvzjj21/NaPuBrEhJihjN1FgsZjRdxNP2nq5r0d0ChJa+y4iiCEk6hdYXacaL669papkkzlisZtzc34Eoc3+/pygTiiYkiWv6vkaiRxkkqrTAdWzCYEtdtmRJwc3Hj2RFydBbaJqGqknomk8/1DTdgc06ROgHDEMg2K/5ww8faOqBzdMd45GHLAl0mc5ydoWIQBhsUUWJOmvQFRNdM3nabIgiia7rEISCKBCoChnH1ekHDW/kU9QhURGw3j/QShHuxKDuJJ52a1oEOlngp08f6Lqeuu5o2pyL5QUvr6+4eLVENrZcnS357quXfPrpBy4WV5ydzygKicMh51jUHJuMoquYTVzOpy4X8yldX/B4d49pilSRRRomUBkYssrEn1Gn4JuQpjmHvOVv3n+PNa3I6wxnpvDu139KeAgwDIOqlpieLfl4t0F3zJNhqyUgSCln1x62b2CYEkKdcT4VmLkqphXyj77L2X0WMDqbV19PkWWXnz9EvP9QM7+aYJg+WfmE5fb49gRa7RT1uDrj6THEVGxWSxvN0AiimGpIOMQ7TEflzTceTTiAqxPrPZ1QkoQP1MpApek4zgJBNFGNk0oyr3IMR6YXMxRDpKMirRpuHz/RtDmmKjG0MrppIukqfX/yIYyimDg+stmteXpa03UdhmFhmRqGodA1JsvZ10ync25uPnFx/hp6g7at0U0NQRZo244k6mkrlcenDRcvzlicrVB6gWAb4BgqZ0ubqulQZQW96jEakTo74Dsq08mMfRjhznyKPmI0MfEdj2YIGc/GCKgIqMRxyOF4y2xlEUZrirzl8vqMl29WiFJP1wukWcnN/Q3rwxpnYtH0ErImIygCXV+iSAPnZ69QlII0G/j8eMfj/Y8UaYZny4i5iCoZhE9bfv3rdwS7iDo1ePfml9x+SmiahvXjPXkSYBgTFEUjSk/Z1+OZSZodOO4L6lxlF+1xR0vyqvu7c/3yimHo+P3v/0DfQl1kaNoIVfaomwFEgWOcs9kGTKdTLNdhMpuA0FA0R6YLF8PQMHSdqsxxLY1sW7I/tMRJwC+/eUfTxXS1wPZhQ1AEJGmNqisIokRRNuRVQFmlgAnooDRo4pyxoaF3DdmhJIsTNFUmjkKqdYpQx1iyBHWNVLXYqo0h98j1gDS0JEGB2Ns0VUbRPCEpGpqxOt0BQouAjj+yCMOIphTI85ztPqJqO9RRyLYI+PbqW4ZBQDBq7NrlV2++4/52h2k47IMb+r5FlipEOUeSBSS5R5JkWjGnd7f0eockiCThDmtk0Ksxu6dPuGOXuD4QV4/0Ssc2qFjva2x/xNPhM5ubPbqp0ZUJWZUwNA2GYzB1PA5BQiMLpFmPKAkEWYLre+iDjivLdI1AcH9AV3r6WkBuJc4mY1zDYTLyGNoCRx1RRQOLyZiq7Kmbhrx/pGhTkrJhMjOZTCanrr0z5vExxF04LJZTFAnOXlwhCwN37z+cLKYmDtdfvUBWVXRDZr/fn94nx8Nxdcq6IEpSFF0izrdEcUYQFny4+y3/+t/8f6n6BFFtyMuSMH8iae6pKdntYlYX56fPT1tgajKGUtMNIlmWESc51y9fY5gSZ/NrLha/4GJ1gS2PuFjMmU3HqLpN2yq0nYYiSgxlj9RoaFJNm6f0zZ58r3C9uiKLDuRRgiT39H2PaSnomgetQpVnjNwppqGjiB11maOrCm3d0bUyhqbT1RnSIJBmBxzbIw0HPEfD1FRURUKSFA7Bjq7NsQwbTdGZz0aokkQc5miyRZbE0Opcnr+haXMO4S2PTzs+vj8wm15g2zbt0FFXFopS4uoVE+0Mw5iQNBmILnUpoEsKUp/j2zY/v7+hagrEQUfqBARBwPZPImM5ygjzez4Gp9+ps5YyF5AklbJo2O0ObA93dAT0YkGWtiStgGqbtEj0nYgmW+iygm+qxLsYWYbt/hbdkMizHtfTaKua3dPAZGzhGC62eY7UaeS7kIfvP2J0BhI6hzTjw08p0/E1TaNj+SI9KWWV8f3vd/z482cMQ+APf3vg4fET690T9kgkL0OKvGW+0NAECanTsJUxnm2RlQGt1P39y8S/rw/lf/5/fDuk6clHqkgzZMlAEU8B6013ZBgGmkpGNXpcd0xZ5vRDQRK3eJ6COx5Rxi150ZL1B+g1bM+mLAaoW1R7iueLuLZKlsfstgGaaqDIIl9+zrh8OeLh6RHb8nGsEaosEQb3nL9csQm2SJ3KxWpCtA9IkozJbIEgyWiqTlVmPG22yDJUhYztqGi6TNtDN+QokoPlSKeElEEhzY7Yls9PP3/EdkUEdFRVRhENNFXm6eELrusjix4iIq9enHO/vqHvJMrmyNA5+DOZpjntoQiiTBgEtLXCeOLSdAXj8Yztekeep0xnIyRJwDRc7m5u8EZjBFGmG1KyNGHqXUCXU/Q1QZQzG6/IkhTLsZFlle36iTYX6YUGwxRp+g5FNpAUaMWaIIyZmHNUs6SuVQzVY735zOXFBfQaZRviWDZNLZ7a/J5E14pkR4HV8pKqTMi7EtfTaeoO3ZBo257pzOX29hZBkBhNLcJgw+XiDW13wLUXWOaSH7/8Ww67AUmSMF2LNE9oqpqxvUSWa9K0pu0EBlEiSh7JY53RbGA2eUHVHAmiksWZQxKHxFFBUxUIjcbFas7hEDJemAgGlIGAZng8bO+RRANd69EVh6oMGGjoSgHdlwiSCkeRuB6P+M23Fzw87NAcCccvyDYJXWdwlCs+f9KpsohSEsiLiLHjIQkWTdXStDmqJnJ9dclhH1CWLb0AaXak6QZevHrJersnjjN0WWQ+mWJoJn/YfKDuGt7N5ixHSz49bjBtC0d2KJuQrkso8p7VasH+kMIg0vYd4/GYuio4REeGrGfqzajkBFlVoVH4/vcfmXhTrl9OqbqUT58/IcsWpuFxtppxOOxZLGc01cl382n7B3RLJE8HyrLENAy6rmJApS1qNEdBVFo02WE0PacpEh6Oe9ZxzOvpElmK2fUlF7Mzjp+fOOawmvV41js+3KwRLQHH1QnXBybjJWH+xDD0+O6SNK4YugJBrBmNXbpuoKoq5pMlSZKhywaCIJDXGVWd4Y9m5FmDLIuUTYEpKQRxyvnVBRICf/jwM2PXQcehGB7JQ512COlqEV0y0S0FRZUok4HgsOOf/ZN/RNfIbHYhgjYgKT1hmNBKNV3b4OgWdBqSJBCFO2iGU9qIbxIEAV37v92Xl5fX3N/cY+oGy7MVYRyRp3tm52+xVJXb9RcGoUc1ThnLHgtKOadKMtpOQFElmjxHlHUGscXTPdKg5NA2fPduTl8JPCZbLMFlpJpEJORxQzPklFWDrZ/EB22tYCsiuqeySY/MTBEhrwnjhkowEAQZXZN5eLrn/GJK1hZMnQlyP7Dd7ZFVE1EryOsCTXZYjFeEhzU9BbY7IoxCmqZhMr1EE0xW8wV/8bf/llppcRyHKo6ZTHyOeYFvCCz9CUUGqSCgiDH1oUeZuCebr9LC9RW2u5CqPomLFM3FsT2eHveYMw9ZzBBx2R8OuJaKJUy4v3vCGKkoQk6UpQiijG3PCYM9tmOwnJ2dog0rkTiPmboe611KEaesXo2xNIO8zonrlpHs49kST/uAvMhYOHNEoaYeFGRpwDV1srQ9dUuLgmAfcnHt81d//deY1gt8y8MfWdzfP2C7J/uustWhrpifnVGXCnm+Jyt6bMVmuhQJthvO5q/YZTlN2hIGCfOXLnk10NQptAqqZaChUNUFut4hCyJtqYBYsFx4fPp5iyCBYRvk3QFTnnO1uub77/8NuuHTKR2OpfF0n56Uwn2Pq3lczn2i6AlZs3HcGfvgSNsVuLaKqXvYuoOmaRyjHfsgxvYGHh+2+KM55+fn7Pb3bHcPTPwV7aAziCFirxAfU3xvjK7rFHWEbo6gS2DQ6dqIrnNo2pw4CZiOz3EsA1WBrj1pCVzXpSo7BKVioGToZQZMuqbguG+5frHEtA3u1h9w7DGuM+anjx/wXZOsLBAtEc1w2D1ELGYjdsEOzxhz8cLk/vYBeg3PlVnfZ1SZxX/2X77k+x//wIf4gLeaUNwXvJhM2AQ7mmFA6gbG51PC9QGh6ZBEHcVWGK80gnXLUKvURUCvGFyOHdpU4g+bj5TiwD/+05enpLlCRlEbkjhFVA18c0xX9piWztM2wjBNJLlHMRQOj49Y4zGKoqCWBYKq4fkO+/BnhMHG8ydsgyf2WxiPDVzDRtUkTOVAn8TMRxW6bGHaFrrdcNgr3G80elnm5vOaF6+uicqT+8P1xRW2MyPP7/npD2vaoUWWReZLi/U6QlElijKgRUSUNLK0xrQUBKFB6GR+969u//36UCZJxn4dYqgWVdGxXe/QzYEsDxGEnjxPEcSO8/NLBFmgajpkxcMwDI5xyO3tlixt0VWNkXPaN3varMnyAFkVSasdu/2Bv/yrP7DfBVimgyI6ZEnJ5cUY2xwxtDKWbiELJWWyZTFbkh9LmqzHUjTysCdLSkRhINgmOJZNeLwnT2NG/hzL1nnz1RyGnroSEHqRq7OXVHnB00NwinMsGtK4oW4bXr58TVurMAgYqocknt6uy4tXZGlJ3cSMJjphcjglZEgwm56dRpqHAEN3EKSTCs3UTGxT5fHpCUSJIAxpuw7LsdFUA1WV2R8esV0XTTMoixRhEHl1+TVZWpHXDZ7n8fLlSwxD4/J6Tl9X9GWNbSgYaodtmsxnZ/Q1lFlJnmX0fYs7cTH9jqKOTg/mKmE+94GOY5TRdRpFnXJMtuRVzHods16nZHVEkNxiuCaiCLquUFYJgtidujvbzyTp8TTGrDN+/ct/hq4rmIZHlrb8+Z//OZY2Jz2WFGnB7n6N3A4YqsHjfk08JFRSTUOMaB0wXYfxmcLrd9dE5ff0SoQ/ttgHIWEykMUl55Mlr87n7Dd7HF9BUzzibUqvS5RdhSJqTByV1XSJLPaIqkavwnz6gr600QUN19EZhJ5//T//JWnZ8LsfNvzr/2XD/+d/KNkWCpki8/Y3CovvFHShZeyeY1sebZvTti2vXn6FZTkMgsrt/Y64TOkGEVV2GGqLImpoqwxDE0+dGMVjHx3wPR1FUEG1We/3xLsjtqrSDxVRINB2Mq7v0A09htXTSzGryzmHaMf+GKHoJzsoXRpo8oFeGMjznHevz3n1asnh8EAcFIycCa7tc768pi4zLENDEASSbM0xfsIwLMIgZR/sKMqYpmlOvp9Ny2Th0gkJvj9GxOLm9p4PX35AcVukoWNiT9GQmJljdg8R4XHA9k2KUkdVdUxtQKwbdvd7irzn890jUV7RdTo///yJuq5ZLs9wrDF9p5ClNYcgZbvfkOYRQXxAVVXKLMfUFeoqo606guBImu8QBY2uFRhaGFqNkT9GV0fkWYDBhKFvcfVzFpMxs+UYWTkZ7R/3Fb/87gWOJ7DdblD0AlmAvpUokwypF5n6C87PLjEsnaKOGU1HeKMliAOaaiCKoGry351gFzDyfM7O53RtztA2ONYSqRd5enhgaAcsy+Kwi5FR6YWcNDly2B4YWoksadB1k9l8RJrUKKLD8mrOfCxhWDZRWtDGPVVRk/c5Vdzgj3o0Q+Xq/BXDICEIApYl4Hgy73++4bjfY7kj0tZgn7b0OqhOh8BpJzUIHrGtEcc8YbuNWUynfP3VS2x9QlcKbB6OtHVxSsASPYqkQVNPO1RJ/AB9zeHwSBonLEYjDusDrjkhSVJUxWIkL3h8LDmbvMXtJUzRQ1IVsqhkPHJYzWeYukJXC5iaT1ODIugokoTvWiRpQLwvyY8RiqQjDDLzkcnFcs5oIhEEAUXeYxgz4rTk3S/ecH4xZn/YMAwDddzQZi1llfPN9QW13PP1y69QFYs0KfFkg/HYJy8qhqahrRvc2YheU6mqCsv2adCph4pByE8/8yziusD0lni+jWE23D984eWrs9NufOUiDQqKIBIcn8jrGyy1RZQbim5HKzX47oK7LzvqJsUwZS6vx0TxniQNMUyZqgqRapEsyLCMmrqMMU0T1SgRBImPP4XkZYbrykTFDmSXvO4p04Kvrl5TFwmG4pIdanzb4vx8zFfvXrFarUAZEHWZ9eaIpqiUecJoavO4PiWuPa3vuVt/oO56ltMJjjVneXFFVXZEQYowSIjYaK4HfYBQ1xy3Oa5toMg9TZfSNANVGVCUA1EU8Lg+/HHHz0FVTIr8SJHWdJXJfHaBZVkUVY5mDZRViCwNtHVDntW4vsr19SV0Mm3dMB1NybIU27b5k1/9I6qyRFdPE7iubBi5CqY+cHm+xB837J5yppMFmimSpAL+xGR21rI7RCzm53zz6g19UuG5UyQfnKWP7S2ZTGZ0TY/jOCzPZsxWOm0Ddx+yUyfVLbm/X6PjsokrYgSW0wnX8xFNLTLUA55lIrUGq/kKWzOoioKyKGjrjrNLE00/iXQUQePi4pyubthtQ84vzxgEgaxIUcRLbN8nKWKyVOD6+hrDlMiKBEGuKI46M3tEeN9y+ylnv8/49DGlyksUOaBIHnn3doShq2iyxHQ2pxMb7h6fCHY9Td2zPHMwLYWff9qi6j1tIzAMAseoIK9yRPW0bldXHVnxH2Dk3Vanbwx3XzZkacViNqYqEkwb4qjCsz1sR+PpacP26ZG6bkiTDlkTkRQFEZleFKiGgq45tbltzcJzTMq2RxJaojBmOVuiyjrxMSc8HHEtlyjaUhY7Xl5eIA8Cuiwx9kekYUaV1nz31RviIKSpc2TBpClN8rzk86db+lZhNLbJsh3hrqQrB5qiR2ZAHEo+vb/HdRzCoOTThx1JlNLUAh/ff6SpE2RRoiyPuI5FHMdUVYVt2/gjD9cxeXy8Iwh2pHGO6+gokkp4CBj5Lm1ZkxwK2ipBKAdsTeHV2YqZPcFWdX759TsMSSHahsRhiix6uM6Eui0Z6FBEha5v8XyDxdk1ZS2gyDq9ULI/PKBJAjQdhiJgewKOa/CwfuDsasnF5ZSmzdFNDdt1yIqOpnFohprpXOUYNARBBFJCXsSUWYuqynSNDOhUTXraATzm3Ny+R0Rit90iiwptPdC2HY7tMZsu6DuoU51jvObu6YYvNxnHPOXy9Yg8KXl9ecZi5rOaTWjKCklUcUdjyrIkLwvKJiU/SpR5ytnS5O7TkfVDjtyPWT8eiLYDWmnw9sUZxyiiEisET0OfjrkLHygHgzA80HUNsigw9A1FnqCoIsMAdd6BXGC7FZYKbd3QKDWHSuMuKDnEPd//PuflqzHdtKQ/6mz3EovRhBdnMxaeTR6LzGdLbNslzzraTuBp94BsygyyiDUyUK2el5fXDKV4El01oEkmVZ3wmB1J8ghJFFnvE94/rLEn/umfti149XbCbO6dOvutcXImeIz48Q8fKfJTioQsSUz8CcvFjL7tiY8B8/kcyzLpmhJVsTENnZHvUhdHDK1lv4nxrBVJVBMnIU2bksQ5ZS6hSjauM6FvFQRBYLqYExwKtg8wNB7H45EkDZEVD0sc83olMrQb6nSEeFzy9HlP3YoMRY9uOiiygdCCLotMxg7zmYehGQytxDHcMx77yMh8+PGW+FgRBcmpi2LbSIrFZDJBkns220eEQUTsddZ3T7iWjC6LiIPMYnaGpgls1ncoosx8PqfqYtyxjTRoqK6AKreneEJF5PHjGkXw+M//y/+IzTrk3/5PHxk4Jark5Y44PjJZ6miyQ7gr2G4DdodHFmcjguiIoGootk4RB4zsMTTD3x1RrHBdha5pSZKM5dkIQY+oijX0A6qsUeQR56slK/+aPBWQ2p5X15esVg5tn9J1HZpm8Pr1SyxXoRFOE4A//O4TdVciazKjqYVpKUxND0nQMG1o2yOS2KHLPWPbpipbfvOrbzmbLDjGFarjIGkGVXOkySPaXGd15aCIE5JjQN+nTPwV9VHi/e8+EW4DbHHM2fmc2/sHuu5kdSIIAu4fPQOFDqIgp8oUXMtEHhRczeUXX73EVDVeXM54ip6YTGbMziaM/TGGouLYIKQd+XqgKCL22w3ffPUKRZTQVBVNlXh6eMRxFC58m+lIx7EGSDKUQkSSRIoqJHyIMRUPU3aoypSRq7J9fKLJZCxlhNx1dHWFY/mkSYHtmFxfX9J3ItvHANexqKqGh8dPNH2P7dmsVgt2+0dUScN1deIkwNBtjnFEHNcwGOR5SZlDLwr0cknV1Fj2aQJlqxd89fprLmYGQqWiyCNUtWW/STDlCdOJRVH1fPm4RUJiZs0QjZLBCJEGiT5vEFuTkeNjywITT8UzXQxd43g80jU6mqwhygVnqwtszaHYSei1z9T1yMqIfVwymZ+RFXuiJKbsTk4nx32B50MzpLTdSS9wCG9YXumkacL5xUssx2CyWBJnHZpj0Jk5RVmzmIzwbJW6jRDUDlHVEKWGi7OvmIy+4+03L9BslaIpqVqBppUpSoOqAEk2+NWvfoWqymi6xGjscHa2xPNGIAh8+PwHkCX60yYuluvgODNkWWc298jTjvHMxXINRFFEE8eoqszn2/c8PNwyHY2wTBVVVpARsHWNLD41T6TBJM8T0rRlv2kxLJ1jeuAQHgnDiPv1hu3PD5hJhtGUJE8RUiFhCD09NdE+JCsf6EQ4xgN5OjAen7qyZSkynrxjvtKplZTKyLHHMnlYsf1SocsyeZYg9Br7zRZFkhj5Bo7dUxc16bGiUSoU2yApc0xLxrJ0rs8u8XQHuR84xilhlBMnFYOoIAgieVZjWh5pVXAIar5sE7bdgLJ4izl+SYuPYr1ksL6mYYI3usbxzxmGAVvvSLZ73v/ulquZy2ghYvotN5/X7NYVwmAwny0JwgeqqkYVWixFItofKTIoqwbJ+PtNsf+dCsqL8wVpkgAt5+fnCMJpCdrSXL776s9gkMmSI/EhZr8NiYKIzcOWLEtOXkauwzE7UFQJx2OC53ho6EzcJZJgEq6P+IbL2Da5//yAPCjIYsvQ9Qi9QBrnyAgYao9ry/R9hq5JeNaIm5+fMDWXyXhE2zfE8RHdEKHvCQ81P/74SFMPDI3E+z8EFHnMev2J7dORts6g63h5uURXDBRpQFU7VvMLsjSl72pW0zOaOqQucgzD4ml3jyzDMIjMZkt6OnpaHHvOTz//gGXrVOVAkhzpWom6bsmLirIukFSFsm4wTZPb+zuiKMKwLGRZxfMtguOBNM+YL2eIssFun5JXBV+efiYID4THPXmdIMsyQy+TJhVp0dMJA01Xk0QHouMjaRVwfrmgqhrCfYIuW6fIStXjmIRoqollOtiGR55mmKbJ5r4m2Dd0Q85k6iKJNkOv07Ytx+MRUVBQZJMkzhmGgTwVMC0X2xmTly1PjzsU2cKyZcLjI0VRMJ2avHt3jmWq3K4/MOgKmmkgtS1KLaH3Crbhs/QnyK3AEDt88+pb/uyXv6avKy7nBouRzEQVkAeB2aXP3T5CkHrCzY6RriA3NY6s0RcVktjSDAJFn7HZ7YmDAFuyCYOYIhHp2wZJqam7HtGwqYSYKCmY+BVf/dMDP/3tmkFSeff2NU9bgb/9qyckzQdxR1FmLJdnrHe3yJrI+inFc1+iyiMOu0fiIMRzVYRhIA8lwl1LsE8o0gwBGVMxWYxG2KLCyHbpuoaqKKmOBbttyGFXIssqmi7RtRXT6YTFdIE0qMhij9BJuP6MH99/QlFEHMPguN9zCELSKkFWTFRtYORPePv2LXUbMfQyum4Qxnsc52RhUtU5k8kEAZ0oTNBVERD4/e9+JDgcWS4WGLpOejzgODaz2TmWbJMcGsKipLVhn3/En3tojoU7kaDt+Ou/+Es0XWZ1OWI8O6X0qAOYAiwXE968uML3LDxXx/c03r29ZLVYYigeqqSQpxWe5SMJIrIocX93R1O10As0VUlTDqyfvmCoGlIPXXs4WT/JJZXUYvgD9shB1FqaXGK/SblaXHE+WxLm91S1hm2Nmc5c2magHxxUXcPxfIoypusbdtsj49EZ+0PB2cULKiIUB1zThrbCtrS/O/PpDPoOQexQNZH1eo0gDUgUxMkddBnH3RGld/n+b3/AG+nMZguSY8rj3ZrLsxf4/piHhzvaXuTLzR1xVPDh8xbLnpKXB0S7pRVqNg/3SFqHKmrkeUrbBuiGgDC4PN2EaLrMIGT0TU9Z9Ow3n7hcaOi1x+EmxXdlgijDmtqcXSzouoG7zUdq4ch0eYZqi5xdzFA1gb7p0TSNusqRUJAliZk/Iz+a9E1BVR+ZjEcogsjZdM6Xj49oos36yx5UiTeLKX94/6/4i9/9rzw93WP7FqbWY5omVT7gmDPubm5pmorokGAoJr49IQkqqghG+oRkW+M7Fpqi8PmnR8JNzNBaiL3OcmYjdyld2lDuWsRqQO4kbHXFeHQqQlbjFX/5+x8RmoHf/vgjVV0wmYy4229I8iOmYeNOxzT9wJuLM6ogRhxalrNz+q7k8nKKpDSMpgPnK5+2EgCRRsqx7RmCIFPVnDqAwynRxJsYWLaBJLi0DQh9ga45DFXFxQuN8wuTOoYyqykLgf1jycK3kfseoYWhi/A8+PT+kaHxMTQDU5epc5GhkxnkDR/fP3I5vuJ6bONoGj98/sR9FlILFkMrYc1Melnmcf2EIkOWJPj2DFVS8WwFyzH58vlAnA6EUUhHQ5af7tNjHnAf7rl93PL4aUPfdjgjn0NSYJgeumwSJCHTsyWvfvGSN7/4M779p28R7QHRHEiKirKpYdDIy+xkFZembPdr8rIjrxIGNWc8txBlCd1w2O2PdJ3GepPRtiLHKMAfL6i6mDRPkGSL4BiSlyGqYqGbA2lRUBQVqiKgyjJNOWDqE7qmpW81bEenKGPa/rSe1HQ1k/GcskrRDZHpdMTQ6VR1jyobDFkJZUnyR1W6qjgcgph9uKVpc/raQBFNivKIZhfUw5E+b+m6hIfjBkk3qfOUodERsVhvHhmNPKIg4+5mje/ZZPmeKN5SZQfi4IiqCByCLX3fY7s2n27vyMoCSVRp2LHZHbm73eKPVeq6ZbuOMCwXx+9paPnxQ8Hnhz0/fX7gf/z/bbn9nJJFJb/73Q0ff7rlh9994ePHj3x+v6csQ5bLOWWVsF6fRL+2M+HyxYjv/sTm08f3TBc6VdnQ9dDRMZn5SJKE506Q/h2kNvLf9xcn7pzdY0qW7FitVtTNwHgyJdyW7DY/4XoaFDJir/JP/+Q/oe0rojShGjL6XiU6xiwWM4L9AU0SieMYy7CRKhlHV7HP5izHLwmjJyb+ClnQkQ0J07So3RRJUmj7hNlkTtu2hGGJruvEyRZNO2WFTqZTHh63tG2LKEvUdUk7tGiaQZs0lHmBa5mYps1kcklTD8zmEz58+pm2q3n96hzHNYnThKYGEYvXb88wTZcP7/8Kw7AQBRldN5GGHtfx/pjHqXJ+Mef9H+5Zri5Oo5J8YKBjGDqKTAZFRNB0OkFANQ124Yau69Atj6zOicMYlA5BE1BFk10YkwQlrjOhGxoEucGzPA77Lf5shK643D88sVyds48D+q6kymJm0wm2JVM2PUEYIyoqrqlTFDt8VyMI15hWSdcZVHlDmlTYxhxVG3B8DUUXGY11jseUN2+vECXYrgfSIicte6q2Q9dEdEPk9nbNeKrDIHHxyofSoYwLBHHDq8sLbP2KONqw30W0rc6r118RpS3hcc/UG3F/F6MrJqvJBZ+/X/P4cKCODaIoYh+vGXnnqCosJhJiZ7OLEwxTQZNVujJF6Hts/Zq2LVFsEWEYkGSfti+oO5XpTCc+SBRpgTueYdoDQRij6gZZVpKGOUPf4boqf/ILj30QYWoqLxfXhHnA598+ok4tbu8+0dYKktRx9/geTQdBUJhN5owdjZu7jyi9zpsXv+bx4YZDEOKMxpR1jjMyaZualTvFsWCoKrpqIDvGnF9MT2knWYnnyRz3FVdXU+hk4qhAUw3QG1xXQRLPSLKIokqxJw7HLIFKRlcMSqFANGUc0yBLO/LiZIJb1ANvvn1BkD2g6yfxRxhENE1Fnh1xbI+67GjrjsMx4vxixdncRFE0do9bVMnFdRy2+xu00qbDIpNbFksd07Bpyo447knTjP29wOWZS9vt2G8ahsHE1h0s+RT9qFlzuiZHVTouzqeszhbc3HyiLDqKtGcynpClMnlS0zQ9y+UEwzEAEd1xmApwv7lDEDtM2WDhL7HUns8//Yhpupijnh++/5nz13OySOL8+oyqbjEEEdnoKOqMr755S5btyYsEf+LS7Hui4sDuw5HJZAQdmKZFFB2RNY2mrwmjR1Zn32FZLruffsdkPvm7O1HTNAZJRtNl9ocNimJRtzL7uwRFnTH2lnStwW6342zloRoyd3dPUCtMpiuKtOb8bMHt7Sf6fo+mGNRJzfnqDYop0pQyZZLR1z1hcOTs4pw8358W6JfXKIrO5rHEd05fJP72bz5wdfEVT/Ej4WNBvL9D9TVevlvS1glNXdN1G5KHGWJscLFwMYyQ/fYzi4sJeRozVBJ/9qe/4cNPn7GMOZcXKw7BEUl0MbSC6XjG/fYDlu1TlgmiKPL0EOKP9VO3OK74+dOWQZd5+eKMTRwSHDUsfURpVEz1OfcPt7QDDIOAZy/I4hbP8fj55w9cvrjm8cuGMhaYzD1sS8OUKmy7p9Ir9ls4FkeavkPtDVRVIitj2lbl812LY9Zcnr/kb377F+gTjzIP6eUB3XZJo5av370mfFqz3j4xkRaAwPppz/WLCz7df0J3YlzHQSh00qFCMiyqKKZrY2Szp6w6UGt0zaZuO1oxJylKBsmmGAK0QaKvTGYLmbYqoVlw5o1Zrz8RRhXTM4NsJ1JlAtfXK16+mHLY7elUiaou2K4LBCRevL5iv98SrGOgPt3JYcXVu7eMbZemCTHtOdP5Cs0RUCSB88ULbsP3eP6EWolxrBbXmhMkEWkZoipjBn3Et7/+FeeXZ9x8/i0/f/+/8vXXL3g5XxFkJZQF/kRDkVU0XUTRRZqi5eqFxdDUPH76yPruicenislixGhu4HsWQbhDkwcESaLpBpKsoqx7xKHF9sdsgxBFArUa0DWPvm3xfIemPuWzu+6IoReomxTEjihPiWPw/DPckciQOmw3AbIO1+cvqZojt/ePaCMTTVHwPY/P6y2t0KDoAv1Q8+rVOUlyJAxS5mMFsZcItgcczUNXXNAbFMeiESvSJGfoXRhyJAyqJse1fIZBQJRFDMlCGCQUB3YPIX09UDcR3eBielPscYzSa6TJnunqNJ62XY+2stmsa87PL6npCcMHwkOMoQpIQsOg9Hy4/YmZYbINQ16/WFFUKnmTYpk+uiaxKx549fIb2r5EqFtMqWeyktg87uiFgus/PadTJf7m8Wdk1yWvKxSjo6wbikwkLfbEqc5tICIIMr5v4JgC4iARHzMMbY7QijjumtvHEHNs0wkNs7M5ZZJR53/vMvHvX1B++binyjtmkytEqcPzLYTBRJRyJo6NaWlkSUHfCmwfIwwHbEdBrG1E0SVLUtq2x1JGiOTEccTl6pwu7/AssKwzPvy4ZjwTeHE9IzgmlHXD08ORy8spP/7hM4vlya1/u8k5HhNmM3BcmyxqGY/mfPn8gKoYiHJH3bWIKmiCjGkq3N6HXF/NqYqSx8cKEQPN6Pmrv/4b6krAdS1EevpWIznuieId8+kVRVqxffqEpvqI0kDfg21OWD98wbIMOmrKpiI4nlJjFHVJnKXYroIoGKhKjSK5FF2KLEhkccHH9Xv6oeJs9RJNMxEkkdnkimMaUgkhZR5jq0sMV0TQU/aHDKFvqIWYqu4QBp0873F9g6R+xJhY9JnG5eqSqe/y8PgZbzSiqBt2+3tkoWZgSivFdFWNJEmUQ0qdN5iWh2EnbPclpu1TVRLvP6xZnU14fNrRtwOyUqE6Aj01dT9Qlw2Ga+O6NrrZkiYFT7setVapgpSv3s4Reonbz7eExx0j30F3Jgx9hdyCqQoc4j3eeEaR1PzNv/mZqs4Yz+bIek2cC8i6ztNujy5pePacdZHiuAp11EGe409eUtY1QVlSShFdKDK0HXQhI2/JZObQDSWbYo1jj+jajAGTtKyxapUqERnbMnVToBgRM0vnp33H25WMZ/X88PsDQzgw/8Zl+3CPbk/YbQMc30I3JMosxtQs8vSAb5mM9SliV1L3Ry5eTwiTnInl0RcK97cPzHybOu9oqhzVnnHhjWjrHeVQoM1c4jjAc85J0hhFbjlfLRGEU0pQ1dRkaYEsGci6wyCLTGwJR7Go845+EGhalUOyR+41DPO0LqKqUx6ebqgb8RQykCaIwqkLL0saddWQJiWm7nJ+scTydeLtDlV1Oe4j5guLoo7pa5OmLagFGV820Xr/pBhUHQRFYVvkzM9XrNdHlv4UeoGqSTA8mzgrUQaVw37NIGTM5iNs2yMKa7pWwbYtZjODp/sDiqJwdXXBdvdAz2mM1ok90f4eAwHf9yky6Ak5G5/z8fsvuOYUR/cpDhW/+cXXtI1Dq93jT1Q2m4ioa9ErE1FvuVv/yOYu5NXrS5qmIWu/IMgWhuOhWTZDX5DGES9evGQfxDzcfcCwRBg0fvvDb5kvVvTD/6Z4DMIdiigRRZDEJeOpQdElOL6DN9YIwgeuzv+E//q/+2/4v/1f/y9s9wdsZ8SLr674/fd/geNOOAYhkiAjSyKqVFGmNa7r0mopWWfgSRJdXTK/uqQXBe7udrgXMzbrI5Kscoxi5uNr0rTh4vIr4nzD+unA+WLO2YsprdlR7nPao8j53KBIU4xKZ6z3iLVA0YBtjKlLmTjZ4DgrPv10wDHmfPOLK/7mtz+gaT6CmDM/V+iL03i/6StcZwIDTGdjdNVDkSQEQ+HmcMPq6iWuB8M+QlIEltdnxMGWXmywHY2q1uiHGt9dMh5PORw/I1LTtS3GSKZTSo5BRJnBi6sLfvwxwdd9Rn6D0FsUfY5n6nSNzOPjE7/8zT/G9kUOhwfocpzZCEEwaNoC15Qph5Y8jdENgfnsEsNWOAQRXdPylBYIuoDjT2jR2B0qtoeARjndRUtzhDOuiOMSTRxI0y37fcDVq1+wOYTkZU+jtET5wMLQqcqE6xdTjk970mSH1FoUqcS76yX3xx/pC4Ox5+EtNfLCQJWntENOXJXkSYFmd6w3t9zc7plPbPI4RRIuuDq/5ubwmU+Pd/z6T/4xd5snJKlD7gACVMtH6lwkQYNOJU9A6CNM3+BuLfCP3vwLXn7zS3qxx7ZdPMfBkFt0teH2MSBvZfTGAbPiKU1R8op5PeFydI3Y1jw8bZEkC8edEaZbJLckSE7PltliiuMofPzwmeXFJZZlEEpHkvxAdCiZz3wUWaQuSppKwTB0svzIYjnlGMTExwhZsnA8mcftHVWlYBljtodbRhNoDxa6kaFpC3bbmFZIMMwRRd7huwNpdsDzLao8Jc/kU5c0q5ElB89Z8viwxbY7omNCN+npWxevGRB3Ha5q87Tb4sxUJFmFRsNRW8KoY/Vyiq7L3HzYY+DQ6wVi5SL2PX3bowkCcfpEgkS73SJqCoosEWwKvFmAoWoUKdyvDyBbp6QeV6dqcs7nZzzuU7qhQOw1VpMZVZnTlqDKGqOxSpFmjH2bQ/QDdelioTM9nyHoBWWj8+t3X/PTT79FdEaI9oy8CPBdj6yuSVOwzYGyksmTlrEjYo1thKHmL/7NE//8n/xj5GHg8fEHNG3EyzdvkMR7yrpBGBSyOCLLQurq76XHAf4dRt6CNGD5PWfXp0V3x1ry9LSjbCoUC35+/4XZdMXF1YTJXMMwHYTBxtItouABzxPZfUlpqwTTcJBan/v7e5y5xNC7yNjUQ4lh2RTJwHqzwTbGqGJP1TasznwW03d8/ryh7TNkQWfoZSTRRFEEZKOkrEPOzsb4toMuDziGh2mM+PRhi6a2JPEBe+IyW41J2dP0BXUroenQDzWWrZOkIVEUYhou9zd3fPnyiV9+9w5dsvFMG8+QMESZxXTOcZ9Cb9PULXVZIKsNj5vf4poTJqMZSZQSRTFVF7J92JHkGU034K6mvPz2K2RRQtZhvJjQk9L1FVJnMHcnKFpKU4scthFXExtFMdAlg9HIpe07bm9uyKua+fQVjuCgyAKyLlDS0EqnvM++7qkzgTxqENUayzBxNAWtHzO2Zoy8KaraQmugSzZd0zJZyby7GmMqEo4hYpkqpqAidQpCp1McwRdUfvyrO/qmpwkE1tuUci3SVQXO2EU159zcbqiqiPHEYZBkHB3SUiFSakrNZhsmhJ9DwvBAQcloNsUey0zmIzRVhEbgcuWiaiWa4TC2xmxvE5IsAtkiTRNEpacVSw67Ek1WmM19RpMJ7tjiGIS0ZY8sSRjq6cFS1iEj22UYfNq+4+13r9D8CUvvjKZSqQ4D49GS3X7Dw/sHlq9HdGmLqo+p05y+Etk/rnl9+Rs00eWw27PZPqFILmERsIkPmPqEJpGQWpOHj2uqpODy6hR5uIk3yJpBUe1RaKiGU5auKah0hYQmN7RlQd8cabuGpikQhAFdMlCRWI3HFPmRu7tbtk8BxzTj/ecv9JFIs7tnKBUkQaSOS7rGIMn2VElJHiRIXcPIneBYJq5t4tk6ChXz2QTPVxmKBKNXT19WipDFS4usL6nTHtdUwFJ4e/0SFZfvf/vXaIrKIdsyVCXxTUX8peRPvv0O2zWZLmS++erNqQOvjigj+D/8V7/Cd8fEIaiqRJVGJ7/FYeC43dJWCYqicIwKNsGeJCspq5Sh69BlB8P0MRQbf6pzsVpyjLZcvBlxeTan7RJ6aeDxacfPH/6Sqsj58vkB27ZZTCfUZc5hH6MYc958/Q2KrfDDT1tUVkRRzHLqMtJlirRGUQea4kDfrLFGMroyxxggSyvapiCryr87baGSZCV34ROWM6Fpodi3vH37ls3hSFpXjGYy/8Of/9+ZjE87xFNtIFx/odUUZF9GkCW86ZjDdsP7zxsEraYdcqKHCDkJOB/b+IaOVEO4L6hEieNTzna/w3Z0VtMR3rmDUMv0TUyZDvzi65e4FxJd3bL7fIMh97iXOobWMLIVvPmA4ZgIUkrX61hOj6q2yLrKIPRYVkfbh9w/3ZLnMboSMfZMPHfCPtwzHtksJjPaXMEybHzP4OraY+glrqYziq6jTBPChwrTGfP5y0eyvMQ0XXTR5WxxxXI0Y2r52I7FevuZ9dOB8cWCJD/QCyqqOUO3RCajEcdDiesrCIqMrEFWNPziF7/kfDnGXqkszmYU2YZ4HyAJLjePW9IoQZFUFEUhCEoMEaI04uY+Q1INmkI+rQ6IcLZ6gVDLzOdTlLSlqhMMGXQUrL6nKDLCXYXZiIhtwy7uUUcTSqFFckvSNiCNj4hFhy4ISLZJ0yuM3XOuz+YImoQ/8tgEXxhb53z9zRneROPDT2toROo+JMg3REHJ9dVrFrMlx/WWF2djmhYmk0uiZMM2WSMIDeOxjdJWSHWC71lYhoE45OT5kdl4SldkSELLIb+hFgfCsOFf/Iv/nn/+X/zXbKI9xfG04hTHCd/96j8m3A+0SBRljDnRqescQ2jpCokwrUiEjE3eMr66QLBlyq7kq+8u0bQRuq4jDwU0Be9/WGNYZ1Al/PDDj+wOe46HHEO1iQ4B9BbxsSE43lP3OXXbst2EmOYE23V4++4FgyIh4qAZIvOxwUCH1I2QtFPW+ubx06m71huYjkjaxDSCwj46kOc5fTPQNTW+5dGUCWVx5PxsyWQyoetszl99haOOMBGYzc5JxYZeFtCMnqbqaIuOuqowrRnTicdxE1InHXmUISgDyQ78mYA/tvjnf/JPePPtBXrfom1LKilBNUWieI/li1R1jSzL/PKXbzEV65RKJjqoUosli3z8+AWqgu+++oa+6uiVjiTrMHUbSZKIq5JCkIiLmDTuaaqayzcXqNqR9797ohNKcFqs2Zz1fUyxfWQ+dSiEkjAP0USF8eKSs+VLltMRQjfw+OkROpnl3CLvEmzPR9A0JjOdLK2xjTG6oXI2n5Hsc+azBRL/AQrKrm9IsgJDt+m6jqenB3RdRVVl0jTj7du3qKqKruskSUIaJ/iOS1WUOJZNcIi5vJ7y4sU1oijjewqWpRMdM6oy4xAGaLrC8RgQx0dc18W2TZIkYbPeYZomd3d3mKaJ5zmoqoplGQjCQBiHCILA2dkZl5eXLBYLdF2nqipMTWW5nDMej7Ftm+iPy7mSJDGZzEiShMVsybff/ArP84iCkMVigWUZ+L6H73o8Pj6iqjKyIjIMA13f0LcdSZLQ1g0jz6OuG2RB5fr6JUVeEUURmmawmJ+z3RyYL2fYrsN8PmcymaAbKpPRCFEU+eH3f+Cw2zHyfGRBRBJEbP1U4Hm2Q102iMLJQ1JVdY7HI47joMkK73/+ka5uYFAo8oq7uztkWaXve9quZDxxMUyN2dSn7UquX1zQ9y3zxQTD0BCEAUEc0E2L8WSGabm0nUQYZgyCfPLprCrKokEcZIqqQJRUrl+8RtdVBElhPJ5QtiWDDFVdgyhjmCaL8zPqriUvC37+8J4kPaBILZI4YNs++11CHJWsVgs8z2M0GtF0LVmRM52eYixHoxFt27JZ71AUCVO30GSNskqpipKhHRiPfbIso65rsjxBkDh5i6YRV1dXHMKQuy8Rf/vXj6jSHNc6Q5F8bj4e0KURQiez3uw4P59h2g4//fgRx3aRRYnHx0fmi+lJtDKeMJlM2Dw+0bYtqiyzmM7Y7XYAeJ5HWZaomgFwWsmIY+4fHtiHwSmHXlKZjE+Z9K7r07Y9WZrj2h6SpOC5Y4ZBoq8H2hbiMKYoCuq6pihLvnz8RFWUzGcTkiRhtpzh+y7j0YLD4YAkqYxGE0QRFosZvu9x/WLFm7evuLq6QFEkRiMP3/e4uL7ANHV0U+Pi9UvKrgK5ZzobU5UNTTuQFTm6q3N2vkDW4ePHn1kuZ4gSzGYTRmOHf/bP/4zX7ybIOlRlz/2XnJ9/XnPzZcsglHz9qwtuvmyxTIeentvbex6fduz2AVESo6gGv/nNb9gf1hyPR3TdZBgGhmGg70TyPEUSVcqipy4lskTk5lPEhx8PBFuQhgVvX1zwz/7sN7x5cc3l+RmWaTIMAh8+3WJYLkMvIMoDvu8THI7I0oBl6iiKhiQMf/SYlWiaBtOwaJqOMiuYz5esn7a8fP2KquwQRfnvjqLIrNdbNO1kB1ZVFZZl8eHDB+I4RlEU7u/v+fj5E4cg4HGzJu8GBlHBdUbc3dyz2+3YbJ6QlZ7Z2AEE9vuTy8VivkJTDYIgYL3eMgwDLy6vUGWJ89UZbXuy7QnDkI+fPrEPDpyvVlxcnJFGMVIPy9mSvukJDwFdc/pf7BgYTcZYrgOyRFaVxGlGM0DX1zi+xWRysguazmcUVUlTd2iazupsjmVZtF3N6mKKbinEacIhDGj76mQvNBpj6CppEuHaJk3TEMYRoiSzO2xpuhLLtVieLejpECRQNY1jHLLd7Hh4eqSqTmlkTdOQpilpmhLFR+o/rjOtt098vrkBUaAbepAl9seQ3W5HXde0bUueF4zHYyRJIklzfN9H10/3Z5alCAJIknQykTZMvny5RdIsBFnDMC3iPAdJJMkTFqs589WULEkQ2h5dkaiKjLZsGTrhj6r402v1fU8cxydRhG0TRdHJBUUQTjvMVUUYBnR9S9+3rNdr5vM5oiCRphlN0+G6Hn0PlmUxn85QZQXxjy4jWZFSFhXDIKBrNnnWoMgm2+2ROAmZTCaslleMvDPms0sW80tGY++0s9cUWLrMh/ff89u/+Qv+X//v/wdDX6MqEmLfEQQBkiQxGo3+GLEsEUURRZYTBTFxnNK2HXefv9DkJbZmIIsK9/ePJ8Nuxz2Jibru5NZQlsiyjG3bPD4+MogDru+flM6Kiiirp/AOx+HLl09E0ZG6LhHFP0Y8KwpFUWBZFooq8eLFCwzjdL8WRcYwDBiGgWVZNE2DZZlUVYVhaEiqdLLmaiqCcEtexNx+uaGtanRV47DfUhclVZkz9ny6ukFRBYJgy263QdMlXNdmGDrckcV8PuG7X71muVjRthVJdmC7ibFtl36omc/OGU00EFqaWkAWHaKw5P3Pn09WhVZH13WsVitUVUUQToVanp80CdvtljzPEUSRqmpo64627anrhr5pGY19vnx+z2GXM5uPoG+4/bzG0FRevbzk6vIFINI2PZ43YjKastsemE2n1HXN2dkZs9mYJA2RZJEg2KMoEoah0fU1bVeQZRmue/obXl9fUxQFsvIfYIdSlkW+/vpbDvuEpu0Yj0eExwOKqpJlGb7d0/cttmWg6yrDoPH9D79juRqzPDtjMh8RHvYMdAhIaEaLZ3iY+oTtekOa1jw+7nnzenHKrXU8kiQ5RRyKJWmaUtWcxiVVhqZb9EPHZrdGViREQSZOc26+3LFe7xlPTKhOHn5xWrFYeuRZTJyH+M6INAuIwiO/+PaXeLaGMAh8/nSDpmkcdhts22Q6nTP0NVmWIYsakgB5llDkLZZl4Ng2mqwQZQf6TqTrhz9+QDriJMCxT8WQLJn0QJRE1BUUfYquDFidDaqMbZisFmeEScbV+QVDVxOlEdPRGFVpyI97ptMpZVBTVQWSJCF0AlVVcX1xiaGrtMce+p5gt+coKKxWK969ueLHH39AYiA+hmiqwubpgelkSV3nFGWCZWuMJwv+8PMNtjsg95AcczZhhD+eUHU9sqLiGRbHY4xuyPz88R5Vd1mcWzw+7JAcF1URKKoSWYDf/v57LEujj2JaBgZBoOlEJEGna06WRouJx4t/OqIzQTFViqiiH1rgdMk/Pq65OF8wnU55vF1jmiZ9k9G2Lb4/wcegamGzf8QQNSRJQpQFJrMxZZWi6RLT6QRJHDBNk/lCJCkS8jynoSJJNmi6Rzf01NKBxdhFtVz2+4DgEGG5YxgkxmOftqqp6xqBiosX55R5T9+2qH/87AuCwGg85e7pkTcvXrB+2pFkOZqmY2oqyD2qKdFLpzFJWZZQNQgWaIpKeDwwurg47VK6OkVe47gumqYRBFvoJZarFYJwGvsuVzOK4pTt3XUdZVPydHfA8z3ubjfI51NkRWAYutOlJbaIosB2t+bh8Y5vf/EWQRiQZRlRFnBHHmmZUXUlo4lHcIg4BDHL8yWKqVH1JVLX0xYV/+I//SdkccKZM6PqKsqoQFBgNDo9JPqhRpYMzs7PMCwTWR9o2pbf//YzL19coOkSmq6iTXyORUVHiqJZPNw/0fcNsqywWSc4boskKcRBiqJVPG1ChgZM3cKwNH75q7e8fnmO7UqUdQl1S5aH/OKbNzw9heR5RXAI6bqeLK+xPZeur3j//j2i0jNfTBi6BmqBtjk9gKJDjD9yEcXTjvegDmRZxvEYI2gVddXRlcnf3YkmOo7rc/nyjDIpYRBPF7B8yjKWZZm6a4mzlFbpGZ/NKcqS7c0dsuVgqRZ10WI5Fp5hoMsK212CIikIvYCum4iiTFnUvHjxhul8xmb7ha/evSHNCh6f7jk2B/70P/0N9dWAObJxbYfPNx8Z2g5DNUjKjDzN+O67X/Hpxx8ZxAFJkeiFnmMcUdYgtB1V1+JOx/R5RT80BGHMMY5YLmx03aQoKvqgYx9uGIbTZ9jzWpomOylxtZNY6OlpR1INaPTMJmPCwx5ZFkmShKeuJYj3aJ5GkmyQJdjFBQM1ggSyInN2eYGsKtQthHFIl1Zcnr0kb1Q+PXxBtU1EQeeYHJn6Pp++3GJJFpZj8rR5ZOxOqeIC13Yoqortdk+WFbiewW4fsFq95BBskRBQ/5ijvd6vOfvmNVmV87e//5F3f/oVybFkMp/x+LDh7evXHMKAh6d73l5eEQUZIi1F0aKKMsE6wpmY2IZ52k8uShx9dNqxFQc0zWA0HZHFO2zbJTpuyPOcxWJCXiSYpk4YhqeiklPU78PdI1fXS7ZBiKHrjFyPSlIp6oHRaMTTZo0o9Ax5jaZ7tE3CbHpJJZyelYvJOaOrOaPpFbph8cMPP9D1PV1R8jfv/4Ifbt6jKT0jT6GtMrbhDtfx6EWF/X7NcYgR0KjKEt/xaZqOzeMaAYUyr2iaGseV6duaKE4p8xJVO+0V97qOUkPTNHRdxzCc7hrDEFE1KMuS0WhKnhccDkdG7hRJAtd3OK4P6I5JUebIrUInQC92uDOdrgXbtDgG29Mz2T69dpJkpGlKVicMVYMgSVRtjeNYFEXBw/0T8+UCUQQJg93dDe3Q0tEwmU2RBAVTcZFVgzRPmMzGOLbNMT5iuRZlm2FZGkWZ4NgSnz/doGg6d/cfWO8Hzs8XLM6nfAkidusY15kSRxm2c1Kf111HnGik2YCmF1xenQEFXiVQ1w13d3cY9cmXVpNUZFlGQGQYBMJ9jKXK6JrE5vEJ39cIDj3ffHuJbQnkSU+aphzDClGZUHYlmqbj2BbHxyNNB0EQMJvNiKI9ogRNVaOrFp0goOsqCB22YwEKYV7Qdd0fn8GPzC9cuvrvr/L+exubP/PMM88888wzzzzzzP8ef/9e5jPPPPPMM88888wzz/zv8FxQPvPMM88888wzzzzzD+K5oHzmmWeeeeaZZ5555h/Ec0H5zDPPPPPMM88888w/iOeC8plnnnnmmWeeeeaZfxDPBeUzzzzzzDPPPPPMM/8gngvKZ5555plnnnnmmWf+QTwXlM8888wzzzzzzDPP/IN4LiifeeaZZ5555plnnvkH8f8HKjENS+oXcSUAAAAASUVORK5CYII=\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"execution_count\": 9,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"# show the results\\n\",\n    \"visualizer.add_datasample(\\n\",\n    \"    'result',\\n\",\n    \"    img,\\n\",\n    \"    data_sample=result,\\n\",\n    \"    draw_gt=False,\\n\",\n    \"    wait_time=0,\\n\",\n    \")\\n\",\n    \"visualizer.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"mmdet\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.8.16\"\n  },\n  \"pycharm\": {\n   \"stem_cell\": {\n    \"cell_type\": \"raw\",\n    \"metadata\": {\n     \"collapsed\": false\n    },\n    \"source\": []\n   }\n  },\n  \"vscode\": {\n   \"interpreter\": {\n    \"hash\": \"26395be4d8bd6f462fe6992ade267d864a329fc5ba918775a7fc2edf93f1463b\"\n   }\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "demo/video_demo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\n\nimport cv2\nimport mmcv\nfrom mmcv.transforms import Compose\nfrom mmengine.utils import track_iter_progress\n\nfrom mmdet.apis import inference_detector, init_detector\nfrom mmdet.registry import VISUALIZERS\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='MMDetection video demo')\n    parser.add_argument('video', help='Video file')\n    parser.add_argument('config', help='Config file')\n    parser.add_argument('checkpoint', help='Checkpoint file')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for inference')\n    parser.add_argument(\n        '--score-thr', type=float, default=0.3, help='Bbox score threshold')\n    parser.add_argument('--out', type=str, help='Output video file')\n    parser.add_argument('--show', action='store_true', help='Show video')\n    parser.add_argument(\n        '--wait-time',\n        type=float,\n        default=1,\n        help='The interval of show (s), 0 is block')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    assert args.out or args.show, \\\n        ('Please specify at least one operation (save/show the '\n         'video) with the argument \"--out\" or \"--show\"')\n\n    # build the model from a config file and a checkpoint file\n    model = init_detector(args.config, args.checkpoint, device=args.device)\n\n    # build test pipeline\n    model.cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'\n    test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)\n\n    # init visualizer\n    visualizer = VISUALIZERS.build(model.cfg.visualizer)\n    # the dataset_meta is loaded from the checkpoint and\n    # then pass to the model in init_detector\n    visualizer.dataset_meta = model.dataset_meta\n\n    video_reader = mmcv.VideoReader(args.video)\n    video_writer = None\n    if args.out:\n        fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n        video_writer = cv2.VideoWriter(\n            args.out, fourcc, video_reader.fps,\n            (video_reader.width, video_reader.height))\n\n    for frame in track_iter_progress(video_reader):\n        result = inference_detector(model, frame, test_pipeline=test_pipeline)\n        visualizer.add_datasample(\n            name='video',\n            image=frame,\n            data_sample=result,\n            draw_gt=False,\n            show=False,\n            pred_score_thr=args.score_thr)\n        frame = visualizer.get_image()\n\n        if args.show:\n            cv2.namedWindow('video', 0)\n            mmcv.imshow(frame, 'video', args.wait_time)\n        if args.out:\n            video_writer.write(frame)\n\n    if video_writer:\n        video_writer.release()\n    cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "demo/video_gpuaccel_demo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nfrom typing import Tuple\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.transforms import Compose\nfrom mmengine.utils import track_iter_progress\n\nfrom mmdet.apis import init_detector\nfrom mmdet.registry import VISUALIZERS\nfrom mmdet.structures import DetDataSample\n\ntry:\n    import ffmpegcv\nexcept ImportError:\n    raise ImportError(\n        'Please install ffmpegcv with:\\n\\n    pip install ffmpegcv')\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='MMDetection video demo with GPU acceleration')\n    parser.add_argument('video', help='Video file')\n    parser.add_argument('config', help='Config file')\n    parser.add_argument('checkpoint', help='Checkpoint file')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for inference')\n    parser.add_argument(\n        '--score-thr', type=float, default=0.3, help='Bbox score threshold')\n    parser.add_argument('--out', type=str, help='Output video file')\n    parser.add_argument('--show', action='store_true', help='Show video')\n    parser.add_argument(\n        '--nvdecode', action='store_true', help='Use NVIDIA decoder')\n    parser.add_argument(\n        '--wait-time',\n        type=float,\n        default=1,\n        help='The interval of show (s), 0 is block')\n    args = parser.parse_args()\n    return args\n\n\ndef prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,\n                                                               int]) -> dict:\n    cfg = model.cfg\n    w, h = ori_wh\n    cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'\n    test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)\n    data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}\n    data = test_pipeline(data)\n    _, data_sample = model.data_preprocessor([data], False)\n    batch_input_shape = data_sample[0].batch_input_shape\n    return batch_input_shape\n\n\ndef pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],\n              ori_shape: Tuple[int, int]) -> dict:\n    assert frame_resize.shape[:2] == batch_input_shape\n    data_sample = DetDataSample()\n    data_sample.set_metainfo({\n        'img_shape':\n        batch_input_shape,\n        'ori_shape':\n        ori_shape,\n        'scale_factor': (batch_input_shape[0] / ori_shape[0],\n                         batch_input_shape[1] / ori_shape[1])\n    })\n    frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))\n    data = {'inputs': frame_resize, 'data_sample': data_sample}\n    return data\n\n\ndef main():\n    args = parse_args()\n    assert args.out or args.show, \\\n        ('Please specify at least one operation (save/show the '\n         'video) with the argument \"--out\" or \"--show\"')\n\n    model = init_detector(args.config, args.checkpoint, device=args.device)\n\n    # init visualizer\n    visualizer = VISUALIZERS.build(model.cfg.visualizer)\n    # the dataset_meta is loaded from the checkpoint and\n    # then pass to the model in init_detector\n    visualizer.dataset_meta = model.dataset_meta\n\n    if args.nvdecode:\n        VideoCapture = ffmpegcv.VideoCaptureNV\n    else:\n        VideoCapture = ffmpegcv.VideoCapture\n    video_origin = VideoCapture(args.video)\n\n    batch_input_shape = prefetch_batch_input_shape(\n        model, (video_origin.width, video_origin.height))\n    ori_shape = (video_origin.height, video_origin.width)\n    resize_wh = batch_input_shape[::-1]\n    video_resize = VideoCapture(\n        args.video,\n        resize=resize_wh,\n        resize_keepratio=True,\n        resize_keepratioalign='topleft')\n\n    video_writer = None\n    if args.out:\n        video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)\n\n    with torch.no_grad():\n        for i, (frame_resize, frame_origin) in enumerate(\n                zip(track_iter_progress(video_resize), video_origin)):\n            data = pack_data(frame_resize, batch_input_shape, ori_shape)\n            result = model.test_step([data])[0]\n\n            visualizer.add_datasample(\n                name='video',\n                image=frame_origin,\n                data_sample=result,\n                draw_gt=False,\n                show=False,\n                pred_score_thr=args.score_thr)\n\n            frame_mask = visualizer.get_image()\n\n            if args.show:\n                cv2.namedWindow('video', 0)\n                mmcv.imshow(frame_mask, 'video', args.wait_time)\n            if args.out:\n                video_writer.write(frame_mask)\n\n    if video_writer:\n        video_writer.release()\n    video_origin.release()\n    video_resize.release()\n\n    cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "demo/webcam_demo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\n\nimport cv2\nimport mmcv\nimport torch\n\nfrom mmdet.apis import inference_detector, init_detector\nfrom mmdet.registry import VISUALIZERS\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='MMDetection webcam demo')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('checkpoint', help='checkpoint file')\n    parser.add_argument(\n        '--device', type=str, default='cuda:0', help='CPU/CUDA device option')\n    parser.add_argument(\n        '--camera-id', type=int, default=0, help='camera device id')\n    parser.add_argument(\n        '--score-thr', type=float, default=0.5, help='bbox score threshold')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    # build the model from a config file and a checkpoint file\n    device = torch.device(args.device)\n    model = init_detector(args.config, args.checkpoint, device=device)\n\n    # init visualizer\n    visualizer = VISUALIZERS.build(model.cfg.visualizer)\n    # the dataset_meta is loaded from the checkpoint and\n    # then pass to the model in init_detector\n    visualizer.dataset_meta = model.dataset_meta\n\n    camera = cv2.VideoCapture(args.camera_id)\n\n    print('Press \"Esc\", \"q\" or \"Q\" to exit.')\n    while True:\n        ret_val, img = camera.read()\n        result = inference_detector(model, img)\n\n        img = mmcv.imconvert(img, 'bgr', 'rgb')\n        visualizer.add_datasample(\n            name='result',\n            image=img,\n            data_sample=result,\n            draw_gt=False,\n            pred_score_thr=args.score_thr,\n            show=False)\n\n        img = visualizer.get_image()\n        img = mmcv.imconvert(img, 'bgr', 'rgb')\n        cv2.imshow('result', img)\n\n        ch = cv2.waitKey(1)\n        if ch == 27 or ch == ord('q') or ch == ord('Q'):\n            break\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "docker/Dockerfile",
    "content": "ARG PYTORCH=\"1.9.0\"\nARG CUDA=\"11.1\"\nARG CUDNN=\"8\"\n\nFROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel\n\nENV TORCH_CUDA_ARCH_LIST=\"6.0 6.1 7.0 7.5 8.0 8.6+PTX\" \\\n    TORCH_NVCC_FLAGS=\"-Xfatbin -compress-all\" \\\n    CMAKE_PREFIX_PATH=\"$(dirname $(which conda))/../\" \\\n    FORCE_CUDA=\"1\"\n\n# Avoid Public GPG key error\n# https://github.com/NVIDIA/nvidia-docker/issues/1631\nRUN rm /etc/apt/sources.list.d/cuda.list \\\n    && rm /etc/apt/sources.list.d/nvidia-ml.list \\\n    && apt-key del 7fa2af80 \\\n    && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \\\n    && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub\n\n# (Optional, use Mirror to speed up downloads)\n# RUN sed -i 's/http:\\/\\/archive.ubuntu.com\\/ubuntu\\//http:\\/\\/mirrors.aliyun.com\\/ubuntu\\//g' /etc/apt/sources.list && \\\n#    pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple\n\n# Install the required packages\nRUN apt-get update \\\n    && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \\\n    && apt-get clean \\\n    && rm -rf /var/lib/apt/lists/*\n\n# Install MMEngine and MMCV\nRUN pip install openmim && \\\n    mim install \"mmengine>=0.6.0\" \"mmcv>=2.0.0rc4\"\n\n# Install MMDetection\nRUN conda clean --all \\\n    && git clone https://github.com/open-mmlab/mmdetection.git -b 3.x /mmdetection \\\n    && cd /mmdetection \\\n    && pip install --no-cache-dir -e .\n\nWORKDIR /mmdetection\n"
  },
  {
    "path": "docker/serve/Dockerfile",
    "content": "ARG PYTORCH=\"1.9.0\"\nARG CUDA=\"11.1\"\nARG CUDNN=\"8\"\nFROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel\n\nARG MMCV=\"2.0.0rc4\"\nARG MMDET=\"3.0.0rc6\"\n\nENV PYTHONUNBUFFERED TRUE\n\n# Avoid Public GPG key error\n# https://github.com/NVIDIA/nvidia-docker/issues/1631\nRUN rm /etc/apt/sources.list.d/cuda.list \\\n    && rm /etc/apt/sources.list.d/nvidia-ml.list \\\n    && apt-key del 7fa2af80 \\\n    && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \\\n    && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub\n\n# (Optional, use Mirror to speed up downloads)\n# RUN sed -i 's/http:\\/\\/archive.ubuntu.com\\/ubuntu\\//http:\\/\\/mirrors.aliyun.com\\/ubuntu\\//g' /etc/apt/sources.list\n\n# Install the required packages\nRUN apt-get update && \\\n    DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \\\n    ca-certificates \\\n    g++ \\\n    openjdk-11-jre-headless \\\n    # MMDet Requirements\n    ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \\\n    && rm -rf /var/lib/apt/lists/*\n\nENV PATH=\"/opt/conda/bin:$PATH\" \\\n    FORCE_CUDA=\"1\"\n\n# TORCHSEVER\nRUN pip install torchserve torch-model-archiver\n\n# MMLAB\nARG PYTORCH\nARG CUDA\nRUN pip install mmengine\nRUN [\"/bin/bash\", \"-c\", \"pip install mmcv==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html\"]\nRUN pip install mmdet==${MMDET}\n\nRUN useradd -m model-server \\\n    && mkdir -p /home/model-server/tmp\n\nCOPY entrypoint.sh /usr/local/bin/entrypoint.sh\n\nRUN chmod +x /usr/local/bin/entrypoint.sh \\\n    && chown -R model-server /home/model-server\n\nCOPY config.properties /home/model-server/config.properties\nRUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store\n\nEXPOSE 8080 8081 8082\n\nUSER model-server\nWORKDIR /home/model-server\nENV TEMP=/home/model-server/tmp\nENTRYPOINT [\"/usr/local/bin/entrypoint.sh\"]\nCMD [\"serve\"]\n"
  },
  {
    "path": "docker/serve/config.properties",
    "content": "inference_address=http://0.0.0.0:8080\nmanagement_address=http://0.0.0.0:8081\nmetrics_address=http://0.0.0.0:8082\nmodel_store=/home/model-server/model-store\nload_models=all\n"
  },
  {
    "path": "docker/serve/entrypoint.sh",
    "content": "#!/bin/bash\nset -e\n\nif [[ \"$1\" = \"serve\" ]]; then\n    shift 1\n    torchserve --start --ts-config /home/model-server/config.properties\nelse\n    eval \"$@\"\nfi\n\n# prevent docker exit\ntail -f /dev/null\n"
  },
  {
    "path": "docker/serve_cn/Dockerfile",
    "content": "ARG PYTORCH=\"1.9.0\"\nARG CUDA=\"11.1\"\nARG CUDNN=\"8\"\nFROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel\n\nARG MMCV=\"2.0.0rc4\"\nARG MMDET=\"3.0.0rc6\"\n\nENV PYTHONUNBUFFERED TRUE\n\n# Avoid Public GPG key error\n# - https://github.com/NVIDIA/nvidia-docker/issues/1631\nRUN rm /etc/apt/sources.list.d/cuda.list \\\n  && rm /etc/apt/sources.list.d/nvidia-ml.list \\\n  && apt-get update \\\n  && apt-get install -y wget \\\n  && rm -rf /var/lib/apt/lists/* \\\n  && apt-key del 7fa2af80 \\\n  && apt-get update && apt-get install -y --no-install-recommends wget \\\n  && wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb \\\n  && dpkg -i cuda-keyring_1.0-1_all.deb\n# (Optional, use Mirror to speed up downloads)\n# RUN sed -i 's/http:\\/\\/archive.ubuntu.com\\/ubuntu\\//http:\\/\\/mirrors.aliyun.com\\/ubuntu\\//g' /etc/apt/sources.list\n\n# Install the required packages\nRUN apt-get update && \\\n    DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \\\n    ca-certificates \\\n    g++ \\\n    openjdk-11-jre-headless \\\n    # MMDet Requirements\n    ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \\\n    && rm -rf /var/lib/apt/lists/*\n\nENV PATH=\"/opt/conda/bin:$PATH\" \\\n    FORCE_CUDA=\"1\"\n\n# TORCHSEVER\nRUN pip install torchserve torch-model-archiver nvgpu -i https://pypi.mirrors.ustc.edu.cn/simple/\n\n# MMLAB\nARG PYTORCH\nARG CUDA\nRUN pip install mmengine -i https://pypi.mirrors.ustc.edu.cn/simple/\nRUN [\"/bin/bash\", \"-c\", \"pip install mmcv==${MMCV} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${PYTORCH}/index.html\"]\nRUN pip install mmdet==${MMDET} -i https://pypi.mirrors.ustc.edu.cn/simple/\n\nRUN useradd -m model-server \\\n    && mkdir -p /home/model-server/tmp\n\nCOPY entrypoint.sh /usr/local/bin/entrypoint.sh\n\nRUN chmod +x /usr/local/bin/entrypoint.sh \\\n    && chown -R model-server /home/model-server\n\nCOPY config.properties /home/model-server/config.properties\nRUN mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store\n\nEXPOSE 8080 8081 8082\n\nUSER model-server\nWORKDIR /home/model-server\nENV TEMP=/home/model-server/tmp\nENTRYPOINT [\"/usr/local/bin/entrypoint.sh\"]\nCMD [\"serve\"]\n"
  },
  {
    "path": "mmdet/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport mmengine\nfrom mmengine.utils import digit_version\n\nfrom .version import __version__, version_info\n\nmmcv_minimum_version = '2.0.0rc4'\nmmcv_maximum_version = '2.1.0'\nmmcv_version = digit_version(mmcv.__version__)\n\nmmengine_minimum_version = '0.6.0'\nmmengine_maximum_version = '1.0.0'\nmmengine_version = digit_version(mmengine.__version__)\n\nassert (mmcv_version >= digit_version(mmcv_minimum_version)\n        and mmcv_version < digit_version(mmcv_maximum_version)), \\\n    f'MMCV=={mmcv.__version__} is used but incompatible. ' \\\n    f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'\n\nassert (mmengine_version >= digit_version(mmengine_minimum_version)\n        and mmengine_version < digit_version(mmengine_maximum_version)), \\\n    f'MMEngine=={mmengine.__version__} is used but incompatible. ' \\\n    f'Please install mmengine>={mmengine_minimum_version}, ' \\\n    f'<{mmengine_maximum_version}.'\n\n__all__ = ['__version__', 'version_info', 'digit_version']\n"
  },
  {
    "path": "mmdet/apis/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .det_inferencer import DetInferencer\nfrom .inference import (async_inference_detector, inference_detector,\n                        init_detector)\n\n__all__ = [\n    'init_detector', 'async_inference_detector', 'inference_detector',\n    'DetInferencer'\n]\n"
  },
  {
    "path": "mmdet/apis/det_inferencer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nimport warnings\nfrom typing import Dict, Iterable, List, Optional, Sequence, Union\n\nimport mmcv\nimport mmengine\nimport numpy as np\nimport torch.nn as nn\nfrom mmengine.dataset import Compose\nfrom mmengine.fileio import (get_file_backend, isdir, join_path,\n                             list_dir_or_file)\nfrom mmengine.infer.infer import BaseInferencer, ModelType\nfrom mmengine.model.utils import revert_sync_batchnorm\nfrom mmengine.registry import init_default_scope\nfrom mmengine.runner.checkpoint import _load_checkpoint_to_model\nfrom mmengine.visualization import Visualizer\nfrom rich.progress import track\n\nfrom mmdet.evaluation import INSTANCE_OFFSET\nfrom mmdet.registry import DATASETS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.mask import encode_mask_results, mask2bbox\nfrom mmdet.utils import ConfigType\nfrom ..evaluation import get_classes\n\ntry:\n    from panopticapi.evaluation import VOID\n    from panopticapi.utils import id2rgb\nexcept ImportError:\n    id2rgb = None\n    VOID = None\n\nInputType = Union[str, np.ndarray]\nInputsType = Union[InputType, Sequence[InputType]]\nPredType = List[DetDataSample]\nImgType = Union[np.ndarray, Sequence[np.ndarray]]\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',\n                  '.tiff', '.webp')\n\n\nclass DetInferencer(BaseInferencer):\n    \"\"\"Object Detection Inferencer.\n\n    Args:\n        model (str, optional): Path to the config file or the model name\n            defined in metafile. For example, it could be\n            \"rtmdet-s\" or 'rtmdet_s_8xb32-300e_coco' or\n            \"configs/rtmdet/rtmdet_s_8xb32-300e_coco.py\".\n            If model is not specified, user must provide the\n            `weights` saved by MMEngine which contains the config string.\n            Defaults to None.\n        weights (str, optional): Path to the checkpoint. If it is not specified\n            and model is a model name of metafile, the weights will be loaded\n            from metafile. Defaults to None.\n        device (str, optional): Device to run inference. If None, the available\n            device will be automatically used. Defaults to None.\n        scope (str, optional): The scope of the model. Defaults to mmdet.\n        palette (str): Color palette used for visualization. The order of\n            priority is palette -> config -> checkpoint. Defaults to 'none'.\n    \"\"\"\n\n    preprocess_kwargs: set = set()\n    forward_kwargs: set = set()\n    visualize_kwargs: set = {\n        'return_vis',\n        'show',\n        'wait_time',\n        'draw_pred',\n        'pred_score_thr',\n        'img_out_dir',\n        'no_save_vis',\n    }\n    postprocess_kwargs: set = {\n        'print_result',\n        'pred_out_dir',\n        'return_datasample',\n        'no_save_pred',\n    }\n\n    def __init__(self,\n                 model: Optional[Union[ModelType, str]] = None,\n                 weights: Optional[str] = None,\n                 device: Optional[str] = None,\n                 scope: Optional[str] = 'mmdet',\n                 palette: str = 'none') -> None:\n        # A global counter tracking the number of images processed, for\n        # naming of the output images\n        self.num_visualized_imgs = 0\n        self.num_predicted_imgs = 0\n        self.palette = palette\n        init_default_scope(scope)\n        super().__init__(\n            model=model, weights=weights, device=device, scope=scope)\n        self.model = revert_sync_batchnorm(self.model)\n\n    def _load_weights_to_model(self, model: nn.Module,\n                               checkpoint: Optional[dict],\n                               cfg: Optional[ConfigType]) -> None:\n        \"\"\"Loading model weights and meta information from cfg and checkpoint.\n\n        Args:\n            model (nn.Module): Model to load weights and meta information.\n            checkpoint (dict, optional): The loaded checkpoint.\n            cfg (Config or ConfigDict, optional): The loaded config.\n        \"\"\"\n\n        if checkpoint is not None:\n            _load_checkpoint_to_model(model, checkpoint)\n            checkpoint_meta = checkpoint.get('meta', {})\n            # save the dataset_meta in the model for convenience\n            if 'dataset_meta' in checkpoint_meta:\n                # mmdet 3.x, all keys should be lowercase\n                model.dataset_meta = {\n                    k.lower(): v\n                    for k, v in checkpoint_meta['dataset_meta'].items()\n                }\n            elif 'CLASSES' in checkpoint_meta:\n                # < mmdet 3.x\n                classes = checkpoint_meta['CLASSES']\n                model.dataset_meta = {'classes': classes}\n            else:\n                warnings.warn(\n                    'dataset_meta or class names are not saved in the '\n                    'checkpoint\\'s meta data, use COCO classes by default.')\n                model.dataset_meta = {'classes': get_classes('coco')}\n        else:\n            warnings.warn('Checkpoint is not loaded, and the inference '\n                          'result is calculated by the randomly initialized '\n                          'model!')\n            warnings.warn('weights is None, use COCO classes by default.')\n            model.dataset_meta = {'classes': get_classes('coco')}\n\n        # Priority:  args.palette -> config -> checkpoint\n        if self.palette != 'none':\n            model.dataset_meta['palette'] = self.palette\n        else:\n            test_dataset_cfg = copy.deepcopy(cfg.test_dataloader.dataset)\n            # lazy init. We only need the metainfo.\n            test_dataset_cfg['lazy_init'] = True\n            metainfo = DATASETS.build(test_dataset_cfg).metainfo\n            cfg_palette = metainfo.get('palette', None)\n            if cfg_palette is not None:\n                model.dataset_meta['palette'] = cfg_palette\n            else:\n                if 'palette' not in model.dataset_meta:\n                    warnings.warn(\n                        'palette does not exist, random is used by default. '\n                        'You can also set the palette to customize.')\n                    model.dataset_meta['palette'] = 'random'\n\n    def _init_pipeline(self, cfg: ConfigType) -> Compose:\n        \"\"\"Initialize the test pipeline.\"\"\"\n        pipeline_cfg = cfg.test_dataloader.dataset.pipeline\n\n        # For inference, the key of ``img_id`` is not used.\n        if 'meta_keys' in pipeline_cfg[-1]:\n            pipeline_cfg[-1]['meta_keys'] = tuple(\n                meta_key for meta_key in pipeline_cfg[-1]['meta_keys']\n                if meta_key != 'img_id')\n\n        load_img_idx = self._get_transform_idx(pipeline_cfg,\n                                               'LoadImageFromFile')\n        if load_img_idx == -1:\n            raise ValueError(\n                'LoadImageFromFile is not found in the test pipeline')\n        pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'\n        return Compose(pipeline_cfg)\n\n    def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int:\n        \"\"\"Returns the index of the transform in a pipeline.\n\n        If the transform is not found, returns -1.\n        \"\"\"\n        for i, transform in enumerate(pipeline_cfg):\n            if transform['type'] == name:\n                return i\n        return -1\n\n    def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]:\n        \"\"\"Initialize visualizers.\n\n        Args:\n            cfg (ConfigType): Config containing the visualizer information.\n\n        Returns:\n            Visualizer or None: Visualizer initialized with config.\n        \"\"\"\n        visualizer = super()._init_visualizer(cfg)\n        visualizer.dataset_meta = self.model.dataset_meta\n        return visualizer\n\n    def _inputs_to_list(self, inputs: InputsType) -> list:\n        \"\"\"Preprocess the inputs to a list.\n\n        Preprocess inputs to a list according to its type:\n\n        - list or tuple: return inputs\n        - str:\n            - Directory path: return all files in the directory\n            - other cases: return a list containing the string. The string\n              could be a path to file, a url or other types of string according\n              to the task.\n\n        Args:\n            inputs (InputsType): Inputs for the inferencer.\n\n        Returns:\n            list: List of input for the :meth:`preprocess`.\n        \"\"\"\n        if isinstance(inputs, str):\n            backend = get_file_backend(inputs)\n            if hasattr(backend, 'isdir') and isdir(inputs):\n                # Backends like HttpsBackend do not implement `isdir`, so only\n                # those backends that implement `isdir` could accept the inputs\n                # as a directory\n                filename_list = list_dir_or_file(\n                    inputs, list_dir=False, suffix=IMG_EXTENSIONS)\n                inputs = [\n                    join_path(inputs, filename) for filename in filename_list\n                ]\n\n        if not isinstance(inputs, (list, tuple)):\n            inputs = [inputs]\n\n        return list(inputs)\n\n    def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):\n        \"\"\"Process the inputs into a model-feedable format.\n\n        Customize your preprocess by overriding this method. Preprocess should\n        return an iterable object, of which each item will be used as the\n        input of ``model.test_step``.\n\n        ``BaseInferencer.preprocess`` will return an iterable chunked data,\n        which will be used in __call__ like this:\n\n        .. code-block:: python\n\n            def __call__(self, inputs, batch_size=1, **kwargs):\n                chunked_data = self.preprocess(inputs, batch_size, **kwargs)\n                for batch in chunked_data:\n                    preds = self.forward(batch, **kwargs)\n\n        Args:\n            inputs (InputsType): Inputs given by user.\n            batch_size (int): batch size. Defaults to 1.\n\n        Yields:\n            Any: Data processed by the ``pipeline`` and ``collate_fn``.\n        \"\"\"\n        chunked_data = self._get_chunk_data(inputs, batch_size)\n        yield from map(self.collate_fn, chunked_data)\n\n    def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n        \"\"\"Get batch data from inputs.\n\n        Args:\n            inputs (Iterable): An iterable dataset.\n            chunk_size (int): Equivalent to batch size.\n\n        Yields:\n            list: batch data.\n        \"\"\"\n        inputs_iter = iter(inputs)\n        while True:\n            try:\n                chunk_data = []\n                for _ in range(chunk_size):\n                    inputs_ = next(inputs_iter)\n                    chunk_data.append((inputs_, self.pipeline(inputs_)))\n                yield chunk_data\n            except StopIteration:\n                if chunk_data:\n                    yield chunk_data\n                break\n\n    # TODO: Video and Webcam are currently not supported and\n    #  may consume too much memory if your input folder has a lot of images.\n    #  We will be optimized later.\n    def __call__(self,\n                 inputs: InputsType,\n                 batch_size: int = 1,\n                 return_vis: bool = False,\n                 show: bool = False,\n                 wait_time: int = 0,\n                 no_save_vis: bool = False,\n                 draw_pred: bool = True,\n                 pred_score_thr: float = 0.3,\n                 return_datasample: bool = False,\n                 print_result: bool = False,\n                 no_save_pred: bool = True,\n                 out_dir: str = '',\n                 **kwargs) -> dict:\n        \"\"\"Call the inferencer.\n\n        Args:\n            inputs (InputsType): Inputs for the inferencer.\n            batch_size (int): Inference batch size. Defaults to 1.\n            show (bool): Whether to display the visualization results in a\n                popup window. Defaults to False.\n            wait_time (float): The interval of show (s). Defaults to 0.\n            no_save_vis (bool): Whether to force not to save prediction\n                vis results. Defaults to False.\n            draw_pred (bool): Whether to draw predicted bounding boxes.\n                Defaults to True.\n            pred_score_thr (float): Minimum score of bboxes to draw.\n                Defaults to 0.3.\n            return_datasample (bool): Whether to return results as\n                :obj:`DetDataSample`. Defaults to False.\n            print_result (bool): Whether to print the inference result w/o\n                visualization to the console. Defaults to False.\n            no_save_pred (bool): Whether to force not to save prediction\n                results. Defaults to True.\n            out_file: Dir to save the inference results or\n                visualization. If left as empty, no file will be saved.\n                Defaults to ''.\n\n            **kwargs: Other keyword arguments passed to :meth:`preprocess`,\n                :meth:`forward`, :meth:`visualize` and :meth:`postprocess`.\n                Each key in kwargs should be in the corresponding set of\n                ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``\n                and ``postprocess_kwargs``.\n\n        Returns:\n            dict: Inference and visualization results.\n        \"\"\"\n        (\n            preprocess_kwargs,\n            forward_kwargs,\n            visualize_kwargs,\n            postprocess_kwargs,\n        ) = self._dispatch_kwargs(**kwargs)\n\n        ori_inputs = self._inputs_to_list(inputs)\n        inputs = self.preprocess(\n            ori_inputs, batch_size=batch_size, **preprocess_kwargs)\n\n        results_dict = {'predictions': [], 'visualization': []}\n        for ori_inputs, data in track(inputs, description='Inference'):\n            preds = self.forward(data, **forward_kwargs)\n            visualization = self.visualize(\n                ori_inputs,\n                preds,\n                return_vis=return_vis,\n                show=show,\n                wait_time=wait_time,\n                draw_pred=draw_pred,\n                pred_score_thr=pred_score_thr,\n                no_save_vis=no_save_vis,\n                img_out_dir=out_dir,\n                **visualize_kwargs)\n            results = self.postprocess(\n                preds,\n                visualization,\n                return_datasample=return_datasample,\n                print_result=print_result,\n                no_save_pred=no_save_pred,\n                pred_out_dir=out_dir,\n                **postprocess_kwargs)\n            results_dict['predictions'].extend(results['predictions'])\n            if results['visualization'] is not None:\n                results_dict['visualization'].extend(results['visualization'])\n        return results_dict\n\n    def visualize(self,\n                  inputs: InputsType,\n                  preds: PredType,\n                  return_vis: bool = False,\n                  show: bool = False,\n                  wait_time: int = 0,\n                  draw_pred: bool = True,\n                  pred_score_thr: float = 0.3,\n                  no_save_vis: bool = False,\n                  img_out_dir: str = '',\n                  **kwargs) -> Union[List[np.ndarray], None]:\n        \"\"\"Visualize predictions.\n\n        Args:\n            inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.\n            preds (List[:obj:`DetDataSample`]): Predictions of the model.\n            return_vis (bool): Whether to return the visualization result.\n                Defaults to False.\n            show (bool): Whether to display the image in a popup window.\n                Defaults to False.\n            wait_time (float): The interval of show (s). Defaults to 0.\n            draw_pred (bool): Whether to draw predicted bounding boxes.\n                Defaults to True.\n            pred_score_thr (float): Minimum score of bboxes to draw.\n                Defaults to 0.3.\n            no_save_vis (bool): Whether to force not to save prediction\n                vis results. Defaults to False.\n            img_out_dir (str): Output directory of visualization results.\n                If left as empty, no file will be saved. Defaults to ''.\n\n        Returns:\n            List[np.ndarray] or None: Returns visualization results only if\n            applicable.\n        \"\"\"\n        if no_save_vis is True:\n            img_out_dir = ''\n\n        if not show and img_out_dir == '' and not return_vis:\n            return None\n\n        if self.visualizer is None:\n            raise ValueError('Visualization needs the \"visualizer\" term'\n                             'defined in the config, but got None.')\n\n        results = []\n\n        for single_input, pred in zip(inputs, preds):\n            if isinstance(single_input, str):\n                img_bytes = mmengine.fileio.get(single_input)\n                img = mmcv.imfrombytes(img_bytes)\n                img = img[:, :, ::-1]\n                img_name = osp.basename(single_input)\n            elif isinstance(single_input, np.ndarray):\n                img = single_input.copy()\n                img_num = str(self.num_visualized_imgs).zfill(8)\n                img_name = f'{img_num}.jpg'\n            else:\n                raise ValueError('Unsupported input type: '\n                                 f'{type(single_input)}')\n\n            out_file = osp.join(img_out_dir, 'vis',\n                                img_name) if img_out_dir != '' else None\n\n            self.visualizer.add_datasample(\n                img_name,\n                img,\n                pred,\n                show=show,\n                wait_time=wait_time,\n                draw_gt=False,\n                draw_pred=draw_pred,\n                pred_score_thr=pred_score_thr,\n                out_file=out_file,\n            )\n            results.append(self.visualizer.get_image())\n            self.num_visualized_imgs += 1\n\n        return results\n\n    def postprocess(\n        self,\n        preds: PredType,\n        visualization: Optional[List[np.ndarray]] = None,\n        return_datasample: bool = False,\n        print_result: bool = False,\n        no_save_pred: bool = False,\n        pred_out_dir: str = '',\n        **kwargs,\n    ) -> Dict:\n        \"\"\"Process the predictions and visualization results from ``forward``\n        and ``visualize``.\n\n        This method should be responsible for the following tasks:\n\n        1. Convert datasamples into a json-serializable dict if needed.\n        2. Pack the predictions and visualization results and return them.\n        3. Dump or log the predictions.\n\n        Args:\n            preds (List[:obj:`DetDataSample`]): Predictions of the model.\n            visualization (Optional[np.ndarray]): Visualized predictions.\n            return_datasample (bool): Whether to use Datasample to store\n                inference results. If False, dict will be used.\n            print_result (bool): Whether to print the inference result w/o\n                visualization to the console. Defaults to False.\n            no_save_pred (bool): Whether to force not to save prediction\n                results. Defaults to False.\n            pred_out_dir: Dir to save the inference results w/o\n                visualization. If left as empty, no file will be saved.\n                Defaults to ''.\n\n        Returns:\n            dict: Inference and visualization results with key ``predictions``\n            and ``visualization``.\n\n            - ``visualization`` (Any): Returned by :meth:`visualize`.\n            - ``predictions`` (dict or DataSample): Returned by\n                :meth:`forward` and processed in :meth:`postprocess`.\n                If ``return_datasample=False``, it usually should be a\n                json-serializable dict containing only basic data elements such\n                as strings and numbers.\n        \"\"\"\n        if no_save_pred is True:\n            pred_out_dir = ''\n\n        result_dict = {}\n        results = preds\n        if not return_datasample:\n            results = []\n            for pred in preds:\n                result = self.pred2dict(pred, pred_out_dir)\n                results.append(result)\n        elif pred_out_dir != '':\n            warnings.warn('Currently does not support saving datasample '\n                          'when return_datasample is set to True. '\n                          'Prediction results are not saved!')\n        # Add img to the results after printing and dumping\n        result_dict['predictions'] = results\n        if print_result:\n            print(result_dict)\n        result_dict['visualization'] = visualization\n        return result_dict\n\n    # TODO: The data format and fields saved in json need further discussion.\n    #  Maybe should include model name, timestamp, filename, image info etc.\n    def pred2dict(self,\n                  data_sample: DetDataSample,\n                  pred_out_dir: str = '') -> Dict:\n        \"\"\"Extract elements necessary to represent a prediction into a\n        dictionary.\n\n        It's better to contain only basic data elements such as strings and\n        numbers in order to guarantee it's json-serializable.\n\n        Args:\n            data_sample (:obj:`DetDataSample`): Predictions of the model.\n            pred_out_dir: Dir to save the inference results w/o\n                visualization. If left as empty, no file will be saved.\n                Defaults to ''.\n\n        Returns:\n            dict: Prediction results.\n        \"\"\"\n        is_save_pred = True\n        if pred_out_dir == '':\n            is_save_pred = False\n\n        if is_save_pred and 'img_path' in data_sample:\n            img_path = osp.basename(data_sample.img_path)\n            img_path = osp.splitext(img_path)[0]\n            out_img_path = osp.join(pred_out_dir, 'preds',\n                                    img_path + '_panoptic_seg.png')\n            out_json_path = osp.join(pred_out_dir, 'preds', img_path + '.json')\n        elif is_save_pred:\n            out_img_path = osp.join(\n                pred_out_dir, 'preds',\n                f'{self.num_predicted_imgs}_panoptic_seg.png')\n            out_json_path = osp.join(pred_out_dir, 'preds',\n                                     f'{self.num_predicted_imgs}.json')\n            self.num_predicted_imgs += 1\n\n        result = {}\n        if 'pred_instances' in data_sample:\n            masks = data_sample.pred_instances.get('masks')\n            pred_instances = data_sample.pred_instances.numpy()\n            result = {\n                'bboxes': pred_instances.bboxes.tolist(),\n                'labels': pred_instances.labels.tolist(),\n                'scores': pred_instances.scores.tolist()\n            }\n            if masks is not None:\n                if pred_instances.bboxes.sum() == 0:\n                    # Fake bbox, such as the SOLO.\n                    bboxes = mask2bbox(masks.cpu()).numpy().tolist()\n                    result['bboxes'] = bboxes\n                encode_masks = encode_mask_results(pred_instances.masks)\n                for encode_mask in encode_masks:\n                    if isinstance(encode_mask['counts'], bytes):\n                        encode_mask['counts'] = encode_mask['counts'].decode()\n                result['masks'] = encode_masks\n\n        if 'pred_panoptic_seg' in data_sample:\n            if VOID is None:\n                raise RuntimeError(\n                    'panopticapi is not installed, please install it by: '\n                    'pip install git+https://github.com/cocodataset/'\n                    'panopticapi.git.')\n\n            pan = data_sample.pred_panoptic_seg.sem_seg.cpu().numpy()[0]\n            pan[pan % INSTANCE_OFFSET == len(\n                self.model.dataset_meta['classes'])] = VOID\n            pan = id2rgb(pan).astype(np.uint8)\n\n            if is_save_pred:\n                mmcv.imwrite(pan[:, :, ::-1], out_img_path)\n                result['panoptic_seg_path'] = out_img_path\n            else:\n                result['panoptic_seg'] = pan\n\n        if is_save_pred:\n            mmengine.dump(result, out_json_path)\n\n        return result\n"
  },
  {
    "path": "mmdet/apis/inference.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\nfrom pathlib import Path\nfrom typing import Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.ops import RoIPool\nfrom mmcv.transforms import Compose\nfrom mmengine.config import Config\nfrom mmengine.model.utils import revert_sync_batchnorm\nfrom mmengine.registry import init_default_scope\nfrom mmengine.runner import load_checkpoint\n\nfrom mmdet.registry import DATASETS\nfrom ..evaluation import get_classes\nfrom ..registry import MODELS\nfrom ..structures import DetDataSample, SampleList\nfrom ..utils import get_test_pipeline_cfg\n\n\ndef init_detector(\n    config: Union[str, Path, Config],\n    checkpoint: Optional[str] = None,\n    palette: str = 'none',\n    device: str = 'cuda:0',\n    cfg_options: Optional[dict] = None,\n) -> nn.Module:\n    \"\"\"Initialize a detector from config file.\n\n    Args:\n        config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,\n            :obj:`Path`, or the config object.\n        checkpoint (str, optional): Checkpoint path. If left as None, the model\n            will not load any weights.\n        palette (str): Color palette used for visualization. If palette\n            is stored in checkpoint, use checkpoint's palette first, otherwise\n            use externally passed palette. Currently, supports 'coco', 'voc',\n            'citys' and 'random'. Defaults to none.\n        device (str): The device where the anchors will be put on.\n            Defaults to cuda:0.\n        cfg_options (dict, optional): Options to override some settings in\n            the used config.\n\n    Returns:\n        nn.Module: The constructed detector.\n    \"\"\"\n    if isinstance(config, (str, Path)):\n        config = Config.fromfile(config)\n    elif not isinstance(config, Config):\n        raise TypeError('config must be a filename or Config object, '\n                        f'but got {type(config)}')\n    if cfg_options is not None:\n        config.merge_from_dict(cfg_options)\n    elif 'init_cfg' in config.model.backbone:\n        config.model.backbone.init_cfg = None\n    init_default_scope(config.get('default_scope', 'mmdet'))\n\n    model = MODELS.build(config.model)\n    model = revert_sync_batchnorm(model)\n    if checkpoint is None:\n        warnings.simplefilter('once')\n        warnings.warn('checkpoint is None, use COCO classes by default.')\n        model.dataset_meta = {'classes': get_classes('coco')}\n    else:\n        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n        # Weights converted from elsewhere may not have meta fields.\n        checkpoint_meta = checkpoint.get('meta', {})\n\n        # save the dataset_meta in the model for convenience\n        if 'dataset_meta' in checkpoint_meta:\n            # mmdet 3.x, all keys should be lowercase\n            model.dataset_meta = {\n                k.lower(): v\n                for k, v in checkpoint_meta['dataset_meta'].items()\n            }\n        elif 'CLASSES' in checkpoint_meta:\n            # < mmdet 3.x\n            classes = checkpoint_meta['CLASSES']\n            model.dataset_meta = {'classes': classes}\n        else:\n            warnings.simplefilter('once')\n            warnings.warn(\n                'dataset_meta or class names are not saved in the '\n                'checkpoint\\'s meta data, use COCO classes by default.')\n            model.dataset_meta = {'classes': get_classes('coco')}\n\n    # Priority:  args.palette -> config -> checkpoint\n    if palette != 'none':\n        model.dataset_meta['palette'] = palette\n    else:\n        test_dataset_cfg = copy.deepcopy(config.test_dataloader.dataset)\n        # lazy init. We only need the metainfo.\n        test_dataset_cfg['lazy_init'] = True\n        metainfo = DATASETS.build(test_dataset_cfg).metainfo\n        cfg_palette = metainfo.get('palette', None)\n        if cfg_palette is not None:\n            model.dataset_meta['palette'] = cfg_palette\n        else:\n            if 'palette' not in model.dataset_meta:\n                warnings.warn(\n                    'palette does not exist, random is used by default. '\n                    'You can also set the palette to customize.')\n                model.dataset_meta['palette'] = 'random'\n\n    model.cfg = config  # save the config in the model for convenience\n    model.to(device)\n    model.eval()\n    return model\n\n\nImagesType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]\n\n\ndef inference_detector(\n    model: nn.Module,\n    imgs: ImagesType,\n    test_pipeline: Optional[Compose] = None\n) -> Union[DetDataSample, SampleList]:\n    \"\"\"Inference image(s) with the detector.\n\n    Args:\n        model (nn.Module): The loaded detector.\n        imgs (str, ndarray, Sequence[str/ndarray]):\n           Either image files or loaded images.\n        test_pipeline (:obj:`Compose`): Test pipeline.\n\n    Returns:\n        :obj:`DetDataSample` or list[:obj:`DetDataSample`]:\n        If imgs is a list or tuple, the same length list type results\n        will be returned, otherwise return the detection results directly.\n    \"\"\"\n\n    if isinstance(imgs, (list, tuple)):\n        is_batch = True\n    else:\n        imgs = [imgs]\n        is_batch = False\n\n    cfg = model.cfg\n\n    if test_pipeline is None:\n        cfg = cfg.copy()\n        test_pipeline = get_test_pipeline_cfg(cfg)\n        if isinstance(imgs[0], np.ndarray):\n            # Calling this method across libraries will result\n            # in module unregistered error if not prefixed with mmdet.\n            test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'\n\n        test_pipeline = Compose(test_pipeline)\n\n    if model.data_preprocessor.device.type == 'cpu':\n        for m in model.modules():\n            assert not isinstance(\n                m, RoIPool\n            ), 'CPU inference with RoIPool is not supported currently.'\n\n    result_list = []\n    for img in imgs:\n        # prepare data\n        if isinstance(img, np.ndarray):\n            # TODO: remove img_id.\n            data_ = dict(img=img, img_id=0)\n        else:\n            # TODO: remove img_id.\n            data_ = dict(img_path=img, img_id=0)\n        # build the data pipeline\n        data_ = test_pipeline(data_)\n\n        data_['inputs'] = [data_['inputs']]\n        data_['data_samples'] = [data_['data_samples']]\n\n        # forward the model\n        with torch.no_grad():\n            results = model.test_step(data_)[0]\n\n        result_list.append(results)\n\n    if not is_batch:\n        return result_list[0]\n    else:\n        return result_list\n\n\n# TODO: Awaiting refactoring\nasync def async_inference_detector(model, imgs):\n    \"\"\"Async inference image(s) with the detector.\n\n    Args:\n        model (nn.Module): The loaded detector.\n        img (str | ndarray): Either image files or loaded images.\n\n    Returns:\n        Awaitable detection results.\n    \"\"\"\n    if not isinstance(imgs, (list, tuple)):\n        imgs = [imgs]\n\n    cfg = model.cfg\n\n    if isinstance(imgs[0], np.ndarray):\n        cfg = cfg.copy()\n        # set loading pipeline type\n        cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray'\n\n    # cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n    test_pipeline = Compose(cfg.data.test.pipeline)\n\n    datas = []\n    for img in imgs:\n        # prepare data\n        if isinstance(img, np.ndarray):\n            # directly add img\n            data = dict(img=img)\n        else:\n            # add information into dict\n            data = dict(img_info=dict(filename=img), img_prefix=None)\n        # build the data pipeline\n        data = test_pipeline(data)\n        datas.append(data)\n\n    for m in model.modules():\n        assert not isinstance(\n            m,\n            RoIPool), 'CPU inference with RoIPool is not supported currently.'\n\n    # We don't restore `torch.is_grad_enabled()` value during concurrent\n    # inference since execution can overlap\n    torch.set_grad_enabled(False)\n    results = await model.aforward_test(data, rescale=True)\n    return results\n"
  },
  {
    "path": "mmdet/datasets/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_det_dataset import BaseDetDataset\nfrom .cityscapes import CityscapesDataset\nfrom .coco import CocoDataset\nfrom .coco_panoptic import CocoPanopticDataset\nfrom .crowdhuman import CrowdHumanDataset\nfrom .dataset_wrappers import MultiImageMixDataset\nfrom .deepfashion import DeepFashionDataset\nfrom .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset\nfrom .objects365 import Objects365V1Dataset, Objects365V2Dataset\nfrom .openimages import OpenImagesChallengeDataset, OpenImagesDataset\nfrom .samplers import (AspectRatioBatchSampler, ClassAwareSampler,\n                       GroupMultiSourceSampler, MultiSourceSampler)\nfrom .utils import get_loading_pipeline\nfrom .voc import VOCDataset\nfrom .wider_face import WIDERFaceDataset\nfrom .xml_style import XMLDataset\n\n__all__ = [\n    'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',\n    'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',\n    'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',\n    'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',\n    'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',\n    'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',\n    'Objects365V1Dataset', 'Objects365V2Dataset'\n]\n"
  },
  {
    "path": "mmdet/datasets/api_wrappers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .coco_api import COCO, COCOeval, COCOPanoptic\n\n__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']\n"
  },
  {
    "path": "mmdet/datasets/api_wrappers/coco_api.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# This file add snake case alias for coco api\n\nimport warnings\nfrom collections import defaultdict\nfrom typing import List, Optional, Union\n\nimport pycocotools\nfrom pycocotools.coco import COCO as _COCO\nfrom pycocotools.cocoeval import COCOeval as _COCOeval\n\n\nclass COCO(_COCO):\n    \"\"\"This class is almost the same as official pycocotools package.\n\n    It implements some snake case function aliases. So that the COCO class has\n    the same interface as LVIS class.\n    \"\"\"\n\n    def __init__(self, annotation_file=None):\n        if getattr(pycocotools, '__version__', '0') >= '12.0.2':\n            warnings.warn(\n                'mmpycocotools is deprecated. Please install official pycocotools by \"pip install pycocotools\"',  # noqa: E501\n                UserWarning)\n        super().__init__(annotation_file=annotation_file)\n        self.img_ann_map = self.imgToAnns\n        self.cat_img_map = self.catToImgs\n\n    def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):\n        return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)\n\n    def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):\n        return self.getCatIds(cat_names, sup_names, cat_ids)\n\n    def get_img_ids(self, img_ids=[], cat_ids=[]):\n        return self.getImgIds(img_ids, cat_ids)\n\n    def load_anns(self, ids):\n        return self.loadAnns(ids)\n\n    def load_cats(self, ids):\n        return self.loadCats(ids)\n\n    def load_imgs(self, ids):\n        return self.loadImgs(ids)\n\n\n# just for the ease of import\nCOCOeval = _COCOeval\n\n\nclass COCOPanoptic(COCO):\n    \"\"\"This wrapper is for loading the panoptic style annotation file.\n\n    The format is shown in the CocoPanopticDataset class.\n\n    Args:\n        annotation_file (str, optional): Path of annotation file.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self, annotation_file: Optional[str] = None) -> None:\n        super(COCOPanoptic, self).__init__(annotation_file)\n\n    def createIndex(self) -> None:\n        \"\"\"Create index.\"\"\"\n        # create index\n        print('creating index...')\n        # anns stores 'segment_id -> annotation'\n        anns, cats, imgs = {}, {}, {}\n        img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)\n        if 'annotations' in self.dataset:\n            for ann in self.dataset['annotations']:\n                for seg_ann in ann['segments_info']:\n                    # to match with instance.json\n                    seg_ann['image_id'] = ann['image_id']\n                    img_to_anns[ann['image_id']].append(seg_ann)\n                    # segment_id is not unique in coco dataset orz...\n                    # annotations from different images but\n                    # may have same segment_id\n                    if seg_ann['id'] in anns.keys():\n                        anns[seg_ann['id']].append(seg_ann)\n                    else:\n                        anns[seg_ann['id']] = [seg_ann]\n\n            # filter out annotations from other images\n            img_to_anns_ = defaultdict(list)\n            for k, v in img_to_anns.items():\n                img_to_anns_[k] = [x for x in v if x['image_id'] == k]\n            img_to_anns = img_to_anns_\n\n        if 'images' in self.dataset:\n            for img_info in self.dataset['images']:\n                img_info['segm_file'] = img_info['file_name'].replace(\n                    'jpg', 'png')\n                imgs[img_info['id']] = img_info\n\n        if 'categories' in self.dataset:\n            for cat in self.dataset['categories']:\n                cats[cat['id']] = cat\n\n        if 'annotations' in self.dataset and 'categories' in self.dataset:\n            for ann in self.dataset['annotations']:\n                for seg_ann in ann['segments_info']:\n                    cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])\n\n        print('index created!')\n\n        self.anns = anns\n        self.imgToAnns = img_to_anns\n        self.catToImgs = cat_to_imgs\n        self.imgs = imgs\n        self.cats = cats\n\n    def load_anns(self,\n                  ids: Union[List[int], int] = []) -> Optional[List[dict]]:\n        \"\"\"Load anns with the specified ids.\n\n        ``self.anns`` is a list of annotation lists instead of a\n        list of annotations.\n\n        Args:\n            ids (Union[List[int], int]): Integer ids specifying anns.\n\n        Returns:\n            anns (List[dict], optional): Loaded ann objects.\n        \"\"\"\n        anns = []\n\n        if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):\n            # self.anns is a list of annotation lists instead of\n            # a list of annotations\n            for id in ids:\n                anns += self.anns[id]\n            return anns\n        elif type(ids) == int:\n            return self.anns[ids]\n"
  },
  {
    "path": "mmdet/datasets/base_det_dataset.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom typing import List, Optional\n\nfrom mmengine.dataset import BaseDataset\nfrom mmengine.fileio import FileClient, load\nfrom mmengine.utils import is_abs\n\nfrom ..registry import DATASETS\n\n\n@DATASETS.register_module()\nclass BaseDetDataset(BaseDataset):\n    \"\"\"Base dataset for detection.\n\n    Args:\n        proposal_file (str, optional): Proposals file path. Defaults to None.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 seg_map_suffix: str = '.png',\n                 proposal_file: Optional[str] = None,\n                 file_client_args: dict = dict(backend='disk'),\n                 **kwargs) -> None:\n        self.seg_map_suffix = seg_map_suffix\n        self.proposal_file = proposal_file\n        self.file_client_args = file_client_args\n        self.file_client = FileClient(**file_client_args)\n        super().__init__(*args, **kwargs)\n\n    def full_init(self) -> None:\n        \"\"\"Load annotation file and set ``BaseDataset._fully_initialized`` to\n        True.\n\n        If ``lazy_init=False``, ``full_init`` will be called during the\n        instantiation and ``self._fully_initialized`` will be set to True. If\n        ``obj._fully_initialized=False``, the class method decorated by\n        ``force_full_init`` will call ``full_init`` automatically.\n\n        Several steps to initialize annotation:\n\n            - load_data_list: Load annotations from annotation file.\n            - load_proposals: Load proposals from proposal file, if\n              `self.proposal_file` is not None.\n            - filter data information: Filter annotations according to\n              filter_cfg.\n            - slice_data: Slice dataset according to ``self._indices``\n            - serialize_data: Serialize ``self.data_list`` if\n            ``self.serialize_data`` is True.\n        \"\"\"\n        if self._fully_initialized:\n            return\n        # load data information\n        self.data_list = self.load_data_list()\n        # get proposals from file\n        if self.proposal_file is not None:\n            self.load_proposals()\n        # filter illegal data, such as data that has no annotations.\n        self.data_list = self.filter_data()\n\n        # Get subset data according to indices.\n        if self._indices is not None:\n            self.data_list = self._get_unserialized_subset(self._indices)\n\n        # serialize data_list\n        if self.serialize_data:\n            self.data_bytes, self.data_address = self._serialize_data()\n\n        self._fully_initialized = True\n\n    def load_proposals(self) -> None:\n        \"\"\"Load proposals from proposals file.\n\n        The `proposals_list` should be a dict[img_path: proposals]\n        with the same length as `data_list`. And the `proposals` should be\n        a `dict` or :obj:`InstanceData` usually contains following keys.\n\n            - bboxes (np.ndarry): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n            - scores (np.ndarry): Classification scores, has a shape\n              (num_instance, ).\n        \"\"\"\n        # TODO: Add Unit Test after fully support Dump-Proposal Metric\n        if not is_abs(self.proposal_file):\n            self.proposal_file = osp.join(self.data_root, self.proposal_file)\n        proposals_list = load(\n            self.proposal_file, file_client_args=self.file_client_args)\n        assert len(self.data_list) == len(proposals_list)\n        for data_info in self.data_list:\n            img_path = data_info['img_path']\n            # `file_name` is the key to obtain the proposals from the\n            # `proposals_list`.\n            file_name = osp.join(\n                osp.split(osp.split(img_path)[0])[-1],\n                osp.split(img_path)[-1])\n            proposals = proposals_list[file_name]\n            data_info['proposals'] = proposals\n\n    def get_cat_ids(self, idx: int) -> List[int]:\n        \"\"\"Get COCO category ids by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            List[int]: All categories in the image of specified index.\n        \"\"\"\n        instances = self.get_data_info(idx)['instances']\n        return [instance['bbox_label'] for instance in instances]\n"
  },
  {
    "path": "mmdet/datasets/cityscapes.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa\n# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa\n\nfrom typing import List\n\nfrom mmdet.registry import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass CityscapesDataset(CocoDataset):\n    \"\"\"Dataset for Cityscapes.\"\"\"\n\n    METAINFO = {\n        'classes': ('person', 'rider', 'car', 'truck', 'bus', 'train',\n                    'motorcycle', 'bicycle'),\n        'palette': [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n                    (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]\n    }\n\n    def filter_data(self) -> List[dict]:\n        \"\"\"Filter annotations according to filter_cfg.\n\n        Returns:\n            List[dict]: Filtered results.\n        \"\"\"\n        if self.test_mode:\n            return self.data_list\n\n        if self.filter_cfg is None:\n            return self.data_list\n\n        filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)\n        min_size = self.filter_cfg.get('min_size', 0)\n\n        # obtain images that contain annotation\n        ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)\n        # obtain images that contain annotations of the required categories\n        ids_in_cat = set()\n        for i, class_id in enumerate(self.cat_ids):\n            ids_in_cat |= set(self.cat_img_map[class_id])\n        # merge the image id sets of the two conditions and use the merged set\n        # to filter out images if self.filter_empty_gt=True\n        ids_in_cat &= ids_with_ann\n\n        valid_data_infos = []\n        for i, data_info in enumerate(self.data_list):\n            img_id = data_info['img_id']\n            width = data_info['width']\n            height = data_info['height']\n            all_is_crowd = all([\n                instance['ignore_flag'] == 1\n                for instance in data_info['instances']\n            ])\n            if filter_empty_gt and (img_id not in ids_in_cat or all_is_crowd):\n                continue\n            if min(width, height) >= min_size:\n                valid_data_infos.append(data_info)\n\n        return valid_data_infos\n"
  },
  {
    "path": "mmdet/datasets/coco.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nfrom typing import List, Union\n\nfrom mmdet.registry import DATASETS\nfrom .api_wrappers import COCO\nfrom .base_det_dataset import BaseDetDataset\n\n\n@DATASETS.register_module()\nclass CocoDataset(BaseDetDataset):\n    \"\"\"Dataset for COCO.\"\"\"\n\n    METAINFO = {\n        'classes':\n        ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n         'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n         'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n         'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n         'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n         'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n         'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n         'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n         'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n         'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n         'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n         'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n         'scissors', 'teddy bear', 'hair drier', 'toothbrush'),\n        # palette is a list of color tuples, which is used for visualization.\n        'palette':\n        [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228),\n         (0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30),\n         (100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30),\n         (165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255),\n         (0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255),\n         (199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92),\n         (209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164),\n         (92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0),\n         (174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174),\n         (255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54),\n         (207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51),\n         (74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65),\n         (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0),\n         (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161),\n         (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120),\n         (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133),\n         (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62),\n         (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45),\n         (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1),\n         (246, 0, 122), (191, 162, 208)]\n    }\n    COCOAPI = COCO\n    # ann_id is unique in coco dataset.\n    ANN_ID_UNIQUE = True\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            self.coco = self.COCOAPI(local_path)\n        # The order of returned `cat_ids` will not\n        # change with the order of the `classes`\n        self.cat_ids = self.coco.get_cat_ids(\n            cat_names=self.metainfo['classes'])\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)\n\n        img_ids = self.coco.get_img_ids()\n        data_list = []\n        total_ann_ids = []\n        for img_id in img_ids:\n            raw_img_info = self.coco.load_imgs([img_id])[0]\n            raw_img_info['img_id'] = img_id\n\n            ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n            raw_ann_info = self.coco.load_anns(ann_ids)\n            total_ann_ids.extend(ann_ids)\n\n            parsed_data_info = self.parse_data_info({\n                'raw_ann_info':\n                raw_ann_info,\n                'raw_img_info':\n                raw_img_info\n            })\n            data_list.append(parsed_data_info)\n        if self.ANN_ID_UNIQUE:\n            assert len(set(total_ann_ids)) == len(\n                total_ann_ids\n            ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n        del self.coco\n\n        return data_list\n\n    def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n        \"\"\"Parse raw annotation to target format.\n\n        Args:\n            raw_data_info (dict): Raw data information load from ``ann_file``\n\n        Returns:\n            Union[dict, List[dict]]: Parsed annotation.\n        \"\"\"\n        img_info = raw_data_info['raw_img_info']\n        ann_info = raw_data_info['raw_ann_info']\n\n        data_info = {}\n\n        # TODO: need to change data_prefix['img'] to data_prefix['img_path']\n        img_path = osp.join(self.data_prefix['img'], img_info['file_name'])\n        if self.data_prefix.get('seg', None):\n            seg_map_path = osp.join(\n                self.data_prefix['seg'],\n                img_info['file_name'].rsplit('.', 1)[0] + self.seg_map_suffix)\n        else:\n            seg_map_path = None\n        data_info['img_path'] = img_path\n        data_info['img_id'] = img_info['img_id']\n        data_info['seg_map_path'] = seg_map_path\n        data_info['height'] = img_info['height']\n        data_info['width'] = img_info['width']\n\n        instances = []\n        for i, ann in enumerate(ann_info):\n            instance = {}\n\n            if ann.get('ignore', False):\n                continue\n            x1, y1, w, h = ann['bbox']\n            inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n            inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n            if inter_w * inter_h == 0:\n                continue\n            if ann['area'] <= 0 or w < 1 or h < 1:\n                continue\n            if ann['category_id'] not in self.cat_ids:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n\n            if ann.get('iscrowd', False):\n                instance['ignore_flag'] = 1\n            else:\n                instance['ignore_flag'] = 0\n            instance['bbox'] = bbox\n            instance['bbox_label'] = self.cat2label[ann['category_id']]\n\n            if ann.get('segmentation', None):\n                instance['mask'] = ann['segmentation']\n\n            instances.append(instance)\n        data_info['instances'] = instances\n        return data_info\n\n    def filter_data(self) -> List[dict]:\n        \"\"\"Filter annotations according to filter_cfg.\n\n        Returns:\n            List[dict]: Filtered results.\n        \"\"\"\n        if self.test_mode:\n            return self.data_list\n\n        if self.filter_cfg is None:\n            return self.data_list\n\n        filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)\n        min_size = self.filter_cfg.get('min_size', 0)\n\n        # obtain images that contain annotation\n        ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)\n        # obtain images that contain annotations of the required categories\n        ids_in_cat = set()\n        for i, class_id in enumerate(self.cat_ids):\n            ids_in_cat |= set(self.cat_img_map[class_id])\n        # merge the image id sets of the two conditions and use the merged set\n        # to filter out images if self.filter_empty_gt=True\n        ids_in_cat &= ids_with_ann\n\n        valid_data_infos = []\n        for i, data_info in enumerate(self.data_list):\n            img_id = data_info['img_id']\n            width = data_info['width']\n            height = data_info['height']\n            if filter_empty_gt and img_id not in ids_in_cat:\n                continue\n            if min(width, height) >= min_size:\n                valid_data_infos.append(data_info)\n\n        return valid_data_infos\n"
  },
  {
    "path": "mmdet/datasets/coco_panoptic.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom typing import Callable, List, Optional, Sequence, Union\n\nfrom mmdet.registry import DATASETS\nfrom .api_wrappers import COCOPanoptic\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass CocoPanopticDataset(CocoDataset):\n    \"\"\"Coco dataset for Panoptic segmentation.\n\n    The annotation format is shown as follows. The `ann` field is optional\n    for testing.\n\n    .. code-block:: none\n\n        [\n            {\n                'filename': f'{image_id:012}.png',\n                'image_id':9\n                'segments_info':\n                [\n                    {\n                        'id': 8345037, (segment_id in panoptic png,\n                                        convert from rgb)\n                        'category_id': 51,\n                        'iscrowd': 0,\n                        'bbox': (x1, y1, w, h),\n                        'area': 24315\n                    },\n                    ...\n                ]\n            },\n            ...\n        ]\n\n    Args:\n        ann_file (str): Annotation file path. Defaults to ''.\n        metainfo (dict, optional): Meta information for dataset, such as class\n            information. Defaults to None.\n        data_root (str, optional): The root directory for ``data_prefix`` and\n            ``ann_file``. Defaults to None.\n        data_prefix (dict, optional): Prefix for training data. Defaults to\n            ``dict(img=None, ann=None, seg=None)``. The prefix ``seg`` which is\n            for panoptic segmentation map must be not None.\n        filter_cfg (dict, optional): Config for filter data. Defaults to None.\n        indices (int or Sequence[int], optional): Support using first few\n            data in annotation file to facilitate training/testing on a smaller\n            dataset. Defaults to None which means using all ``data_infos``.\n        serialize_data (bool, optional): Whether to hold memory using\n            serialized objects, when enabled, data loader workers can use\n            shared RAM from master process instead of making a copy. Defaults\n            to True.\n        pipeline (list, optional): Processing pipeline. Defaults to [].\n        test_mode (bool, optional): ``test_mode=True`` means in test phase.\n            Defaults to False.\n        lazy_init (bool, optional): Whether to load annotation during\n            instantiation. In some cases, such as visualization, only the meta\n            information of the dataset is needed, which is not necessary to\n            load annotation file. ``Basedataset`` can skip load annotations to\n            save time by set ``lazy_init=False``. Defaults to False.\n        max_refetch (int, optional): If ``Basedataset.prepare_data`` get a\n            None img. The maximum extra number of cycles to get a valid\n            image. Defaults to 1000.\n    \"\"\"\n\n    METAINFO = {\n        'classes':\n        ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n         'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n         'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n         'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n         'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n         'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n         'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n         'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n         'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n         'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n         'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n         'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n         'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',\n         'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',\n         'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',\n         'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',\n         'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',\n         'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',\n         'wall-wood', 'water-other', 'window-blind', 'window-other',\n         'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',\n         'cabinet-merged', 'table-merged', 'floor-other-merged',\n         'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',\n         'paper-merged', 'food-other-merged', 'building-other-merged',\n         'rock-merged', 'wall-other-merged', 'rug-merged'),\n        'thing_classes':\n        ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n         'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n         'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n         'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n         'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n         'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n         'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n         'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n         'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n         'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n         'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n         'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n         'scissors', 'teddy bear', 'hair drier', 'toothbrush'),\n        'stuff_classes':\n        ('banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain',\n         'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house',\n         'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',\n         'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',\n         'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',\n         'wall-wood', 'water-other', 'window-blind', 'window-other',\n         'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',\n         'cabinet-merged', 'table-merged', 'floor-other-merged',\n         'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',\n         'paper-merged', 'food-other-merged', 'building-other-merged',\n         'rock-merged', 'wall-other-merged', 'rug-merged'),\n        'palette':\n        [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228),\n         (0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30),\n         (100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30),\n         (165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255),\n         (0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255),\n         (199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92),\n         (209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164),\n         (92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0),\n         (174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174),\n         (255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54),\n         (207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51),\n         (74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65),\n         (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0),\n         (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161),\n         (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120),\n         (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133),\n         (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62),\n         (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45),\n         (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1),\n         (246, 0, 122), (191, 162, 208), (255, 255, 128), (147, 211, 203),\n         (150, 100, 100), (168, 171, 172), (146, 112, 198), (210, 170, 100),\n         (92, 136, 89), (218, 88, 184), (241, 129, 0), (217, 17, 255),\n         (124, 74, 181), (70, 70, 70), (255, 228, 255), (154, 208, 0),\n         (193, 0, 92), (76, 91, 113), (255, 180, 195), (106, 154, 176),\n         (230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55),\n         (254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255),\n         (104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74),\n         (135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149),\n         (183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153),\n         (146, 139, 141), (70, 130, 180), (134, 199, 156), (209, 226, 140),\n         (96, 36, 108), (96, 96, 96), (64, 170, 64), (152, 251, 152),\n         (208, 229, 228), (206, 186, 171), (152, 161, 64), (116, 112, 0),\n         (0, 114, 143), (102, 102, 156), (250, 141, 255)]\n    }\n    COCOAPI = COCOPanoptic\n    # ann_id is not unique in coco panoptic dataset.\n    ANN_ID_UNIQUE = False\n\n    def __init__(self,\n                 ann_file: str = '',\n                 metainfo: Optional[dict] = None,\n                 data_root: Optional[str] = None,\n                 data_prefix: dict = dict(img=None, ann=None, seg=None),\n                 filter_cfg: Optional[dict] = None,\n                 indices: Optional[Union[int, Sequence[int]]] = None,\n                 serialize_data: bool = True,\n                 pipeline: List[Union[dict, Callable]] = [],\n                 test_mode: bool = False,\n                 lazy_init: bool = False,\n                 max_refetch: int = 1000) -> None:\n        super().__init__(\n            ann_file=ann_file,\n            metainfo=metainfo,\n            data_root=data_root,\n            data_prefix=data_prefix,\n            filter_cfg=filter_cfg,\n            indices=indices,\n            serialize_data=serialize_data,\n            pipeline=pipeline,\n            test_mode=test_mode,\n            lazy_init=lazy_init,\n            max_refetch=max_refetch)\n\n    def parse_data_info(self, raw_data_info: dict) -> dict:\n        \"\"\"Parse raw annotation to target format.\n\n        Args:\n            raw_data_info (dict): Raw data information load from ``ann_file``.\n\n        Returns:\n            dict: Parsed annotation.\n        \"\"\"\n        img_info = raw_data_info['raw_img_info']\n        ann_info = raw_data_info['raw_ann_info']\n        # filter out unmatched annotations which have\n        # same segment_id but belong to other image\n        ann_info = [\n            ann for ann in ann_info if ann['image_id'] == img_info['img_id']\n        ]\n        data_info = {}\n\n        img_path = osp.join(self.data_prefix['img'], img_info['file_name'])\n        if self.data_prefix.get('seg', None):\n            seg_map_path = osp.join(\n                self.data_prefix['seg'],\n                img_info['file_name'].replace('jpg', 'png'))\n        else:\n            seg_map_path = None\n        data_info['img_path'] = img_path\n        data_info['img_id'] = img_info['img_id']\n        data_info['seg_map_path'] = seg_map_path\n        data_info['height'] = img_info['height']\n        data_info['width'] = img_info['width']\n\n        instances = []\n        segments_info = []\n        for ann in ann_info:\n            instance = {}\n            x1, y1, w, h = ann['bbox']\n            if ann['area'] <= 0 or w < 1 or h < 1:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n            category_id = ann['category_id']\n            contiguous_cat_id = self.cat2label[category_id]\n\n            is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']\n            if is_thing:\n                is_crowd = ann.get('iscrowd', False)\n                instance['bbox'] = bbox\n                instance['bbox_label'] = contiguous_cat_id\n                if not is_crowd:\n                    instance['ignore_flag'] = 0\n                else:\n                    instance['ignore_flag'] = 1\n                    is_thing = False\n\n            segment_info = {\n                'id': ann['id'],\n                'category': contiguous_cat_id,\n                'is_thing': is_thing\n            }\n            segments_info.append(segment_info)\n            if len(instance) > 0 and is_thing:\n                instances.append(instance)\n        data_info['instances'] = instances\n        data_info['segments_info'] = segments_info\n        return data_info\n\n    def filter_data(self) -> List[dict]:\n        \"\"\"Filter images too small or without ground truth.\n\n        Returns:\n            List[dict]: ``self.data_list`` after filtering.\n        \"\"\"\n        if self.test_mode:\n            return self.data_list\n\n        if self.filter_cfg is None:\n            return self.data_list\n\n        filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)\n        min_size = self.filter_cfg.get('min_size', 0)\n\n        ids_with_ann = set()\n        # check whether images have legal thing annotations.\n        for data_info in self.data_list:\n            for segment_info in data_info['segments_info']:\n                if not segment_info['is_thing']:\n                    continue\n                ids_with_ann.add(data_info['img_id'])\n\n        valid_data_list = []\n        for data_info in self.data_list:\n            img_id = data_info['img_id']\n            width = data_info['width']\n            height = data_info['height']\n            if filter_empty_gt and img_id not in ids_with_ann:\n                continue\n            if min(width, height) >= min_size:\n                valid_data_list.append(data_info)\n\n        return valid_data_list\n"
  },
  {
    "path": "mmdet/datasets/crowdhuman.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport json\nimport logging\nimport os.path as osp\nimport warnings\nfrom typing import List, Union\n\nimport mmcv\nfrom mmengine.dist import get_rank\nfrom mmengine.fileio import dump, load\nfrom mmengine.logging import print_log\nfrom mmengine.utils import ProgressBar\n\nfrom mmdet.registry import DATASETS\nfrom .base_det_dataset import BaseDetDataset\n\n\n@DATASETS.register_module()\nclass CrowdHumanDataset(BaseDetDataset):\n    r\"\"\"Dataset for CrowdHuman.\n\n    Args:\n        data_root (str): The root directory for\n            ``data_prefix`` and ``ann_file``.\n        ann_file (str): Annotation file path.\n        extra_ann_file (str | optional):The path of extra image metas\n            for CrowdHuman. It can be created by CrowdHumanDataset\n            automatically or by tools/misc/get_crowdhuman_id_hw.py\n            manually. Defaults to None.\n    \"\"\"\n\n    METAINFO = {\n        'classes': ('person', ),\n        # palette is a list of color tuples, which is used for visualization.\n        'palette': [(220, 20, 60)]\n    }\n\n    def __init__(self, data_root, ann_file, extra_ann_file=None, **kwargs):\n        # extra_ann_file record the size of each image. This file is\n        # automatically created when you first load the CrowdHuman\n        # dataset by mmdet.\n        if extra_ann_file is not None:\n            self.extra_ann_exist = True\n            self.extra_anns = load(extra_ann_file)\n        else:\n            ann_file_name = osp.basename(ann_file)\n            if 'train' in ann_file_name:\n                self.extra_ann_file = osp.join(data_root, 'id_hw_train.json')\n            elif 'val' in ann_file_name:\n                self.extra_ann_file = osp.join(data_root, 'id_hw_val.json')\n            self.extra_ann_exist = False\n            if not osp.isfile(self.extra_ann_file):\n                print_log(\n                    'extra_ann_file does not exist, prepare to collect '\n                    'image height and width...',\n                    level=logging.INFO)\n                self.extra_anns = {}\n            else:\n                self.extra_ann_exist = True\n                self.extra_anns = load(self.extra_ann_file)\n        super().__init__(data_root=data_root, ann_file=ann_file, **kwargs)\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        anno_strs = self.file_client.get_text(\n            self.ann_file).strip().split('\\n')\n        print_log('loading CrowdHuman annotation...', level=logging.INFO)\n        data_list = []\n        prog_bar = ProgressBar(len(anno_strs))\n        for i, anno_str in enumerate(anno_strs):\n            anno_dict = json.loads(anno_str)\n            parsed_data_info = self.parse_data_info(anno_dict)\n            data_list.append(parsed_data_info)\n            prog_bar.update()\n        if not self.extra_ann_exist and get_rank() == 0:\n            #  TODO: support file client\n            try:\n                dump(self.extra_anns, self.extra_ann_file, file_format='json')\n            except:  # noqa\n                warnings.warn(\n                    'Cache files can not be saved automatically! To speed up'\n                    'loading the dataset, please manually generate the cache'\n                    ' file by file tools/misc/get_crowdhuman_id_hw.py')\n\n            print_log(\n                f'\\nsave extra_ann_file in {self.data_root}',\n                level=logging.INFO)\n\n        del self.extra_anns\n        print_log('\\nDone', level=logging.INFO)\n        return data_list\n\n    def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n        \"\"\"Parse raw annotation to target format.\n\n        Args:\n            raw_data_info (dict): Raw data information load from ``ann_file``\n\n        Returns:\n            Union[dict, List[dict]]: Parsed annotation.\n        \"\"\"\n        data_info = {}\n        img_path = osp.join(self.data_prefix['img'],\n                            f\"{raw_data_info['ID']}.jpg\")\n        data_info['img_path'] = img_path\n        data_info['img_id'] = raw_data_info['ID']\n\n        if not self.extra_ann_exist:\n            img_bytes = self.file_client.get(img_path)\n            img = mmcv.imfrombytes(img_bytes, backend='cv2')\n            data_info['height'], data_info['width'] = img.shape[:2]\n            self.extra_anns[raw_data_info['ID']] = img.shape[:2]\n            del img, img_bytes\n        else:\n            data_info['height'], data_info['width'] = self.extra_anns[\n                raw_data_info['ID']]\n\n        instances = []\n        for i, ann in enumerate(raw_data_info['gtboxes']):\n            instance = {}\n            if ann['tag'] not in self.metainfo['classes']:\n                instance['bbox_label'] = -1\n                instance['ignore_flag'] = 1\n            else:\n                instance['bbox_label'] = self.metainfo['classes'].index(\n                    ann['tag'])\n                instance['ignore_flag'] = 0\n            if 'extra' in ann:\n                if 'ignore' in ann['extra']:\n                    if ann['extra']['ignore'] != 0:\n                        instance['bbox_label'] = -1\n                        instance['ignore_flag'] = 1\n\n            x1, y1, w, h = ann['fbox']\n            bbox = [x1, y1, x1 + w, y1 + h]\n            instance['bbox'] = bbox\n\n            # Record the full bbox(fbox), head bbox(hbox) and visible\n            # bbox(vbox) as additional information. If you need to use\n            # this information, you just need to design the pipeline\n            # instead of overriding the CrowdHumanDataset.\n            instance['fbox'] = bbox\n            hbox = ann['hbox']\n            instance['hbox'] = [\n                hbox[0], hbox[1], hbox[0] + hbox[2], hbox[1] + hbox[3]\n            ]\n            vbox = ann['vbox']\n            instance['vbox'] = [\n                vbox[0], vbox[1], vbox[0] + vbox[2], vbox[1] + vbox[3]\n            ]\n\n            instances.append(instance)\n\n        data_info['instances'] = instances\n        return data_info\n"
  },
  {
    "path": "mmdet/datasets/dataset_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport collections\nimport copy\nfrom typing import Sequence, Union\n\nfrom mmengine.dataset import BaseDataset, force_full_init\n\nfrom mmdet.registry import DATASETS, TRANSFORMS\n\n\n@DATASETS.register_module()\nclass MultiImageMixDataset:\n    \"\"\"A wrapper of multiple images mixed dataset.\n\n    Suitable for training on multiple images mixed data augmentation like\n    mosaic and mixup. For the augmentation pipeline of mixed image data,\n    the `get_indexes` method needs to be provided to obtain the image\n    indexes, and you can set `skip_flags` to change the pipeline running\n    process. At the same time, we provide the `dynamic_scale` parameter\n    to dynamically change the output image size.\n\n    Args:\n        dataset (:obj:`CustomDataset`): The dataset to be mixed.\n        pipeline (Sequence[dict]): Sequence of transform object or\n            config dict to be composed.\n        dynamic_scale (tuple[int], optional): The image scale can be changed\n            dynamically. Default to None. It is deprecated.\n        skip_type_keys (list[str], optional): Sequence of type string to\n            be skip pipeline. Default to None.\n        max_refetch (int): The maximum number of retry iterations for getting\n            valid results from the pipeline. If the number of iterations is\n            greater than `max_refetch`, but results is still None, then the\n            iteration is terminated and raise the error. Default: 15.\n    \"\"\"\n\n    def __init__(self,\n                 dataset: Union[BaseDataset, dict],\n                 pipeline: Sequence[str],\n                 skip_type_keys: Union[Sequence[str], None] = None,\n                 max_refetch: int = 15,\n                 lazy_init: bool = False) -> None:\n        assert isinstance(pipeline, collections.abc.Sequence)\n        if skip_type_keys is not None:\n            assert all([\n                isinstance(skip_type_key, str)\n                for skip_type_key in skip_type_keys\n            ])\n        self._skip_type_keys = skip_type_keys\n\n        self.pipeline = []\n        self.pipeline_types = []\n        for transform in pipeline:\n            if isinstance(transform, dict):\n                self.pipeline_types.append(transform['type'])\n                transform = TRANSFORMS.build(transform)\n                self.pipeline.append(transform)\n            else:\n                raise TypeError('pipeline must be a dict')\n\n        self.dataset: BaseDataset\n        if isinstance(dataset, dict):\n            self.dataset = DATASETS.build(dataset)\n        elif isinstance(dataset, BaseDataset):\n            self.dataset = dataset\n        else:\n            raise TypeError(\n                'elements in datasets sequence should be config or '\n                f'`BaseDataset` instance, but got {type(dataset)}')\n\n        self._metainfo = self.dataset.metainfo\n        if hasattr(self.dataset, 'flag'):\n            self.flag = self.dataset.flag\n        self.num_samples = len(self.dataset)\n        self.max_refetch = max_refetch\n\n        self._fully_initialized = False\n        if not lazy_init:\n            self.full_init()\n\n    @property\n    def metainfo(self) -> dict:\n        \"\"\"Get the meta information of the multi-image-mixed dataset.\n\n        Returns:\n            dict: The meta information of multi-image-mixed dataset.\n        \"\"\"\n        return copy.deepcopy(self._metainfo)\n\n    def full_init(self):\n        \"\"\"Loop to ``full_init`` each dataset.\"\"\"\n        if self._fully_initialized:\n            return\n\n        self.dataset.full_init()\n        self._ori_len = len(self.dataset)\n        self._fully_initialized = True\n\n    @force_full_init\n    def get_data_info(self, idx: int) -> dict:\n        \"\"\"Get annotation by index.\n\n        Args:\n            idx (int): Global index of ``ConcatDataset``.\n\n        Returns:\n            dict: The idx-th annotation of the datasets.\n        \"\"\"\n        return self.dataset.get_data_info(idx)\n\n    @force_full_init\n    def __len__(self):\n        return self.num_samples\n\n    def __getitem__(self, idx):\n        results = copy.deepcopy(self.dataset[idx])\n        for (transform, transform_type) in zip(self.pipeline,\n                                               self.pipeline_types):\n            if self._skip_type_keys is not None and \\\n                    transform_type in self._skip_type_keys:\n                continue\n\n            if hasattr(transform, 'get_indexes'):\n                for i in range(self.max_refetch):\n                    # Make sure the results passed the loading pipeline\n                    # of the original dataset is not None.\n                    indexes = transform.get_indexes(self.dataset)\n                    if not isinstance(indexes, collections.abc.Sequence):\n                        indexes = [indexes]\n                    mix_results = [\n                        copy.deepcopy(self.dataset[index]) for index in indexes\n                    ]\n                    if None not in mix_results:\n                        results['mix_results'] = mix_results\n                        break\n                else:\n                    raise RuntimeError(\n                        'The loading pipeline of the original dataset'\n                        ' always return None. Please check the correctness '\n                        'of the dataset and its pipeline.')\n\n            for i in range(self.max_refetch):\n                # To confirm the results passed the training pipeline\n                # of the wrapper is not None.\n                updated_results = transform(copy.deepcopy(results))\n                if updated_results is not None:\n                    results = updated_results\n                    break\n            else:\n                raise RuntimeError(\n                    'The training pipeline of the dataset wrapper'\n                    ' always return None.Please check the correctness '\n                    'of the dataset and its pipeline.')\n\n            if 'mix_results' in results:\n                results.pop('mix_results')\n\n        return results\n\n    def update_skip_type_keys(self, skip_type_keys):\n        \"\"\"Update skip_type_keys. It is called by an external hook.\n\n        Args:\n            skip_type_keys (list[str], optional): Sequence of type\n                string to be skip pipeline.\n        \"\"\"\n        assert all([\n            isinstance(skip_type_key, str) for skip_type_key in skip_type_keys\n        ])\n        self._skip_type_keys = skip_type_keys\n"
  },
  {
    "path": "mmdet/datasets/deepfashion.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass DeepFashionDataset(CocoDataset):\n    \"\"\"Dataset for DeepFashion.\"\"\"\n\n    METAINFO = {\n        'classes': ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants',\n                    'bag', 'neckwear', 'headwear', 'eyeglass', 'belt',\n                    'footwear', 'hair', 'skin', 'face'),\n        # palette is a list of color tuples, which is used for visualization.\n        'palette': [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),\n                    (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),\n                    (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),\n                    (128, 0, 96), (128, 0, 192), (0, 32, 192)]\n    }\n"
  },
  {
    "path": "mmdet/datasets/lvis.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\nfrom typing import List\n\nfrom mmdet.registry import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass LVISV05Dataset(CocoDataset):\n    \"\"\"LVIS v0.5 dataset for detection.\"\"\"\n\n    METAINFO = {\n        'classes':\n        ('acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',\n         'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',\n         'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',\n         'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',\n         'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',\n         'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',\n         'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',\n         'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',\n         'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',\n         'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',\n         'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',\n         'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',\n         'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball',\n         'bead', 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',\n         'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle',\n         'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle',\n         'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder',\n         'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage',\n         'birdhouse', 'birthday_cake', 'birthday_card', 'biscuit_(bread)',\n         'pirate_flag', 'black_sheep', 'blackboard', 'blanket', 'blazer',\n         'blender', 'blimp', 'blinker', 'blueberry', 'boar', 'gameboard',\n         'boat', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt',\n         'bolt', 'bonnet', 'book', 'book_bag', 'bookcase', 'booklet',\n         'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener',\n         'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie',\n         'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',\n         'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',\n         'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',\n         'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',\n         'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',\n         'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',\n         'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',\n         'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',\n         'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',\n         'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',\n         'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',\n         'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',\n         'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',\n         'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',\n         'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',\n         'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',\n         'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',\n         'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',\n         'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',\n         'celery', 'cellular_telephone', 'chain_mail', 'chair',\n         'chaise_longue', 'champagne', 'chandelier', 'chap', 'checkbook',\n         'checkerboard', 'cherry', 'chessboard',\n         'chest_of_drawers_(furniture)', 'chicken_(animal)', 'chicken_wire',\n         'chickpea', 'Chihuahua', 'chili_(vegetable)', 'chime', 'chinaware',\n         'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',\n         'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',\n         'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',\n         'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',\n         'clasp', 'cleansing_agent', 'clementine', 'clip', 'clipboard',\n         'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag',\n         'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'coconut',\n         'coffee_filter', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil',\n         'coin', 'colander', 'coleslaw', 'coloring_material',\n         'combination_lock', 'pacifier', 'comic_book', 'computer_keyboard',\n         'concrete_mixer', 'cone', 'control', 'convertible_(automobile)',\n         'sofa_bed', 'cookie', 'cookie_jar', 'cooking_utensil',\n         'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew',\n         'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',\n         'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',\n         'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',\n         'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',\n         'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',\n         'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',\n         'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',\n         'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',\n         'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',\n         'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',\n         'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table',\n         'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',\n         'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',\n         'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',\n         'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',\n         'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',\n         'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',\n         'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',\n         'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',\n         'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',\n         'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',\n         'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',\n         'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',\n         'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',\n         'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)',\n         'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose',\n         'fireplace', 'fireplug', 'fish', 'fish_(food)', 'fishbowl',\n         'fishing_boat', 'fishing_rod', 'flag', 'flagpole', 'flamingo',\n         'flannel', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',\n         'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',\n         'folding_chair', 'food_processor', 'football_(American)',\n         'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',\n         'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',\n         'fruit_salad', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag',\n         'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle',\n         'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',\n         'gift_wrap', 'ginger', 'giraffe', 'cincture',\n         'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',\n         'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',\n         'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',\n         'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',\n         'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',\n         'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',\n         'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',\n         'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',\n         'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',\n         'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',\n         'headband', 'headboard', 'headlight', 'headscarf', 'headset',\n         'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',\n         'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',\n         'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',\n         'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',\n         'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',\n         'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',\n         'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',\n         'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',\n         'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',\n         'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',\n         'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',\n         'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',\n         'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',\n         'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',\n         'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',\n         'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',\n         'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',\n         'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',\n         'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',\n         'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',\n         'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine',\n         'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',\n         'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',\n         'mascot', 'mashed_potato', 'masher', 'mask', 'mast',\n         'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',\n         'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',\n         'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',\n         'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',\n         'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',\n         'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',\n         'mound_(baseball)', 'mouse_(animal_rodent)',\n         'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',\n         'music_stool', 'musical_instrument', 'nailfile', 'nameplate',\n         'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest',\n         'newsstand', 'nightshirt', 'nosebag_(for_animals)',\n         'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',\n         'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',\n         'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'oregano',\n         'ostrich', 'ottoman', 'overalls_(clothing)', 'owl', 'packet',\n         'inkpad', 'pad', 'paddle', 'padlock', 'paintbox', 'paintbrush',\n         'painting', 'pajamas', 'palette', 'pan_(for_cooking)',\n         'pan_(metal_container)', 'pancake', 'pantyhose', 'papaya',\n         'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',\n         'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',\n         'parchment', 'parka', 'parking_meter', 'parrot',\n         'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',\n         'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',\n         'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',\n         'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',\n         'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',\n         'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',\n         'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',\n         'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',\n         'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',\n         'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',\n         'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',\n         'plate', 'platter', 'playing_card', 'playpen', 'pliers',\n         'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',\n         'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',\n         'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',\n         'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot',\n         'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn',\n         'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',\n         'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',\n         'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',\n         'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',\n         'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',\n         'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',\n         'recliner', 'record_player', 'red_cabbage', 'reflector',\n         'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',\n         'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',\n         'Rollerblade', 'rolling_pin', 'root_beer',\n         'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',\n         'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',\n         'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',\n         'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',\n         'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',\n         'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',\n         'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',\n         'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',\n         'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',\n         'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',\n         'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',\n         'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',\n         'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag',\n         'shopping_cart', 'short_pants', 'shot_glass', 'shoulder_bag',\n         'shovel', 'shower_head', 'shower_curtain', 'shredder_(for_paper)',\n         'sieve', 'signboard', 'silo', 'sink', 'skateboard', 'skewer', 'ski',\n         'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'sled', 'sleeping_bag',\n         'sling_(bandage)', 'slipper_(footwear)', 'smoothie', 'snake',\n         'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock',\n         'soda_fountain', 'carbonated_water', 'sofa', 'softball',\n         'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',\n         'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',\n         'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'sponge',\n         'spoon', 'sportswear', 'spotlight', 'squirrel',\n         'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)',\n         'steak_(food)', 'steak_knife', 'steamer_(kitchen_appliance)',\n         'steering_wheel', 'stencil', 'stepladder', 'step_stool',\n         'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup',\n         'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light', 'stove',\n         'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',\n         'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',\n         'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',\n         'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',\n         'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',\n         'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',\n         'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',\n         'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',\n         'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',\n         'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',\n         'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',\n         'telephone_pole', 'telephoto_lens', 'television_camera',\n         'television_set', 'tennis_ball', 'tennis_racket', 'tequila',\n         'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',\n         'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer',\n         'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster',\n         'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs',\n         'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover',\n         'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy',\n         'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike',\n         'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray',\n         'tree_house', 'trench_coat', 'triangle_(musical_instrument)',\n         'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',\n         'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',\n         'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',\n         'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',\n         'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',\n         'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',\n         'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',\n         'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',\n         'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',\n         'water_heater', 'water_jug', 'water_gun', 'water_scooter',\n         'water_ski', 'water_tower', 'watering_can', 'watermelon',\n         'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit',\n         'wheel', 'wheelchair', 'whipped_cream', 'whiskey', 'whistle', 'wick',\n         'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',\n         'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',\n         'wineglass', 'wing_chair', 'blinder_(for_horses)', 'wok', 'wolf',\n         'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht',\n         'yak', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini'),\n        'palette':\n        None\n    }\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        try:\n            import lvis\n            if getattr(lvis, '__version__', '0') >= '10.5.3':\n                warnings.warn(\n                    'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"',  # noqa: E501\n                    UserWarning)\n            from lvis import LVIS\n        except ImportError:\n            raise ImportError(\n                'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".'  # noqa: E501\n            )\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            self.lvis = LVIS(local_path)\n        self.cat_ids = self.lvis.get_cat_ids()\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n        img_ids = self.lvis.get_img_ids()\n        data_list = []\n        total_ann_ids = []\n        for img_id in img_ids:\n            raw_img_info = self.lvis.load_imgs([img_id])[0]\n            raw_img_info['img_id'] = img_id\n            if raw_img_info['file_name'].startswith('COCO'):\n                # Convert form the COCO 2014 file naming convention of\n                # COCO_[train/val/test]2014_000000000000.jpg to the 2017\n                # naming convention of 000000000000.jpg\n                # (LVIS v1 will fix this naming issue)\n                raw_img_info['file_name'] = raw_img_info['file_name'][-16:]\n            ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n            raw_ann_info = self.lvis.load_anns(ann_ids)\n            total_ann_ids.extend(ann_ids)\n\n            parsed_data_info = self.parse_data_info({\n                'raw_ann_info':\n                raw_ann_info,\n                'raw_img_info':\n                raw_img_info\n            })\n            data_list.append(parsed_data_info)\n        if self.ANN_ID_UNIQUE:\n            assert len(set(total_ann_ids)) == len(\n                total_ann_ids\n            ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n        del self.lvis\n\n        return data_list\n\n\nLVISDataset = LVISV05Dataset\nDATASETS.register_module(name='LVISDataset', module=LVISDataset)\n\n\n@DATASETS.register_module()\nclass LVISV1Dataset(LVISDataset):\n    \"\"\"LVIS v1 dataset for detection.\"\"\"\n\n    METAINFO = {\n        'classes':\n        ('aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',\n         'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',\n         'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium',\n         'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',\n         'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',\n         'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',\n         'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',\n         'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',\n         'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',\n         'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',\n         'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',\n         'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',\n         'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',\n         'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',\n         'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',\n         'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',\n         'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',\n         'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',\n         'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',\n         'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',\n         'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',\n         'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',\n         'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',\n         'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',\n         'bottle_opener', 'bouquet', 'bow_(weapon)',\n         'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl',\n         'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders',\n         'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread',\n         'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach',\n         'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket',\n         'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train',\n         'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed',\n         'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter',\n         'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet',\n         'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder',\n         'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can',\n         'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane',\n         'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen',\n         'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',\n         'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',\n         'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',\n         'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',\n         'cash_register', 'casserole', 'cassette', 'cast', 'cat',\n         'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery',\n         'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',\n         'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard',\n         'cherry', 'chessboard', 'chicken_(animal)', 'chickpea',\n         'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',\n         'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',\n         'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',\n         'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',\n         'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',\n         'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard',\n         'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower',\n         'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',\n         'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)',\n         'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil',\n         'coin', 'colander', 'coleslaw', 'coloring_material',\n         'combination_lock', 'pacifier', 'comic_book', 'compass',\n         'computer_keyboard', 'condiment', 'cone', 'control',\n         'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',\n         'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',\n         'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',\n         'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',\n         'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',\n         'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',\n         'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',\n         'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',\n         'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',\n         'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',\n         'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',\n         'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',\n         'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table',\n         'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',\n         'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',\n         'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',\n         'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove',\n         'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat',\n         'dress_suit', 'dresser', 'drill', 'drone', 'dropper',\n         'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',\n         'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle',\n         'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg',\n         'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair',\n         'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot',\n         'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret',\n         'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine',\n         'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine',\n         'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug',\n         'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod',\n         'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash',\n         'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',\n         'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',\n         'food_processor', 'football_(American)', 'football_helmet',\n         'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',\n         'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge',\n         'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose',\n         'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin',\n         'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger',\n         'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove',\n         'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart',\n         'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater',\n         'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',\n         'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun',\n         'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger',\n         'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass',\n         'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle',\n         'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil',\n         'headband', 'headboard', 'headlight', 'headscarf', 'headset',\n         'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',\n         'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',\n         'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',\n         'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',\n         'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',\n         'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',\n         'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',\n         'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',\n         'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',\n         'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',\n         'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',\n         'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',\n         'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',\n         'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',\n         'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',\n         'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade',\n         'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',\n         'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',\n         'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',\n         'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',\n         'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange',\n         'manger', 'manhole', 'map', 'marker', 'martini', 'mascot',\n         'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)',\n         'matchbox', 'mattress', 'measuring_cup', 'measuring_stick',\n         'meatball', 'medicine', 'melon', 'microphone', 'microscope',\n         'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake',\n         'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)',\n         'money', 'monitor_(computer_equipment) computer_monitor', 'monkey',\n         'motor', 'motor_scooter', 'motor_vehicle', 'motorcycle',\n         'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad',\n         'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument',\n         'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle',\n         'nest', 'newspaper', 'newsstand', 'nightshirt',\n         'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook',\n         'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',\n         'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',\n         'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven',\n         'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',\n         'padlock', 'paintbrush', 'painting', 'pajamas', 'palette',\n         'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',\n         'papaya', 'paper_plate', 'paper_towel', 'paperback_book',\n         'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',\n         'parasol', 'parchment', 'parka', 'parking_meter', 'parrot',\n         'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',\n         'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',\n         'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',\n         'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',\n         'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',\n         'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',\n         'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',\n         'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',\n         'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',\n         'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',\n         'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',\n         'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',\n         'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',\n         'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',\n         'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot',\n         'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn',\n         'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller',\n         'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin',\n         'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt',\n         'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver',\n         'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry',\n         'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',\n         'recliner', 'record_player', 'reflector', 'remote_control',\n         'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',\n         'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',\n         'rolling_pin', 'root_beer', 'router_(computer_equipment)',\n         'rubber_band', 'runner_(carpet)', 'plastic_bag',\n         'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',\n         'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',\n         'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',\n         'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',\n         'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',\n         'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',\n         'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',\n         'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',\n         'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',\n         'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',\n         'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',\n         'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',\n         'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',\n         'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',\n         'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',\n         'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',\n         'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',\n         'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',\n         'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',\n         'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',\n         'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',\n         'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',\n         'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',\n         'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew',\n         'stirrer', 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove',\n         'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',\n         'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',\n         'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',\n         'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants',\n         'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit',\n         'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',\n         'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',\n         'tambourine', 'army_tank', 'tank_(storage_vessel)',\n         'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',\n         'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',\n         'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',\n         'telephone_pole', 'telephoto_lens', 'television_camera',\n         'television_set', 'tennis_ball', 'tennis_racket', 'tequila',\n         'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',\n         'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer',\n         'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster',\n         'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs',\n         'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover',\n         'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy',\n         'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike',\n         'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray',\n         'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod',\n         'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban',\n         'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',\n         'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',\n         'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',\n         'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',\n         'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',\n         'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',\n         'washbasin', 'automatic_washer', 'watch', 'water_bottle',\n         'water_cooler', 'water_faucet', 'water_heater', 'water_jug',\n         'water_gun', 'water_scooter', 'water_ski', 'water_tower',\n         'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',\n         'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',\n         'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',\n         'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',\n         'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',\n         'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',\n         'yoke_(animal_equipment)', 'zebra', 'zucchini'),\n        'palette':\n        None\n    }\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        try:\n            import lvis\n            if getattr(lvis, '__version__', '0') >= '10.5.3':\n                warnings.warn(\n                    'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"',  # noqa: E501\n                    UserWarning)\n            from lvis import LVIS\n        except ImportError:\n            raise ImportError(\n                'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".'  # noqa: E501\n            )\n        self.lvis = LVIS(self.ann_file)\n        self.cat_ids = self.lvis.get_cat_ids()\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n        img_ids = self.lvis.get_img_ids()\n        data_list = []\n        total_ann_ids = []\n        for img_id in img_ids:\n            raw_img_info = self.lvis.load_imgs([img_id])[0]\n            raw_img_info['img_id'] = img_id\n            # coco_url is used in LVISv1 instead of file_name\n            # e.g. http://images.cocodataset.org/train2017/000000391895.jpg\n            # train/val split in specified in url\n            raw_img_info['file_name'] = raw_img_info['coco_url'].replace(\n                'http://images.cocodataset.org/', '')\n            ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n            raw_ann_info = self.lvis.load_anns(ann_ids)\n            total_ann_ids.extend(ann_ids)\n            parsed_data_info = self.parse_data_info({\n                'raw_ann_info':\n                raw_ann_info,\n                'raw_img_info':\n                raw_img_info\n            })\n            data_list.append(parsed_data_info)\n        if self.ANN_ID_UNIQUE:\n            assert len(set(total_ann_ids)) == len(\n                total_ann_ids\n            ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n        del self.lvis\n\n        return data_list\n"
  },
  {
    "path": "mmdet/datasets/objects365.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nfrom typing import List\n\nfrom mmdet.registry import DATASETS\nfrom .api_wrappers import COCO\nfrom .coco import CocoDataset\n\n# images exist in annotations but not in image folder.\nobjv2_ignore_list = [\n    osp.join('patch16', 'objects365_v2_00908726.jpg'),\n    osp.join('patch6', 'objects365_v1_00320532.jpg'),\n    osp.join('patch6', 'objects365_v1_00320534.jpg'),\n]\n\n\n@DATASETS.register_module()\nclass Objects365V1Dataset(CocoDataset):\n    \"\"\"Objects365 v1 dataset for detection.\"\"\"\n\n    METAINFO = {\n        'classes':\n        ('person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle',\n         'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk',\n         'handbag', 'street lights', 'book', 'plate', 'helmet',\n         'leather shoes', 'pillow', 'glove', 'potted plant', 'bracelet',\n         'flower', 'tv', 'storage box', 'vase', 'bench', 'wine glass', 'boots',\n         'bowl', 'dining table', 'umbrella', 'boat', 'flag', 'speaker',\n         'trash bin/can', 'stool', 'backpack', 'couch', 'belt', 'carpet',\n         'basket', 'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table',\n         'suv', 'toy', 'tie', 'bed', 'traffic light', 'pen/pencil',\n         'microphone', 'sandals', 'canned', 'necklace', 'mirror', 'faucet',\n         'bicycle', 'bread', 'high heels', 'ring', 'van', 'watch', 'sink',\n         'horse', 'fish', 'apple', 'camera', 'candle', 'teddy bear', 'cake',\n         'motorcycle', 'wild bird', 'laptop', 'knife', 'traffic sign',\n         'cell phone', 'paddle', 'truck', 'cow', 'power outlet', 'clock',\n         'drum', 'fork', 'bus', 'hanger', 'nightstand', 'pot/pan', 'sheep',\n         'guitar', 'traffic cone', 'tea pot', 'keyboard', 'tripod', 'hockey',\n         'fan', 'dog', 'spoon', 'blackboard/whiteboard', 'balloon',\n         'air conditioner', 'cymbal', 'mouse', 'telephone', 'pickup truck',\n         'orange', 'banana', 'airplane', 'luggage', 'skis', 'soccer',\n         'trolley', 'oven', 'remote', 'baseball glove', 'paper towel',\n         'refrigerator', 'train', 'tomato', 'machinery vehicle', 'tent',\n         'shampoo/shower gel', 'head phone', 'lantern', 'donut',\n         'cleaning products', 'sailboat', 'tangerine', 'pizza', 'kite',\n         'computer box', 'elephant', 'toiletries', 'gas stove', 'broccoli',\n         'toilet', 'stroller', 'shovel', 'baseball bat', 'microwave',\n         'skateboard', 'surfboard', 'surveillance camera', 'gun', 'life saver',\n         'cat', 'lemon', 'liquid soap', 'zebra', 'duck', 'sports car',\n         'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator', 'converter',\n         'tissue ', 'carrot', 'washing machine', 'vent', 'cookies',\n         'cutting/chopping board', 'tennis racket', 'candy',\n         'skating and skiing shoes', 'scissors', 'folder', 'baseball',\n         'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine',\n         'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear',\n         'american football', 'basketball', 'potato', 'paint brush', 'printer',\n         'billiards', 'fire hydrant', 'goose', 'projector', 'sausage',\n         'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball',\n         'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee',\n         'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender',\n         'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango',\n         'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion',\n         'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale',\n         'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple',\n         'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle',\n         'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar',\n         'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD',\n         'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado',\n         'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear',\n         'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn',\n         'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball',\n         'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice',\n         'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel',\n         'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste',\n         'antelope', 'shrimp', 'rickshaw', 'trombone', 'pomegranate',\n         'coconut', 'jellyfish', 'mushroom', 'calculator', 'treadmill',\n         'butterfly', 'egg tart', 'cheese', 'pig', 'pomelo', 'race car',\n         'rice cooker', 'tuba', 'crosswalk sign', 'papaya', 'hair drier',\n         'green onion', 'chips', 'dolphin', 'sushi', 'urinal', 'donkey',\n         'electric drill', 'spring rolls', 'tortoise/turtle', 'parrot',\n         'flute', 'measuring cup', 'shark', 'steak', 'poker card',\n         'binoculars', 'llama', 'radish', 'noodles', 'yak', 'mop', 'crab',\n         'microscope', 'barbell', 'bread/bun', 'baozi', 'lion', 'red cabbage',\n         'polar bear', 'lighter', 'seal', 'mangosteen', 'comb', 'eraser',\n         'pitaya', 'scallop', 'pencil case', 'saw', 'table tennis paddle',\n         'okra', 'starfish', 'eagle', 'monkey', 'durian', 'game board',\n         'rabbit', 'french horn', 'ambulance', 'asparagus', 'hoverboard',\n         'pasta', 'target', 'hotair balloon', 'chainsaw', 'lobster', 'iron',\n         'flashlight'),\n        'palette':\n        None\n    }\n\n    COCOAPI = COCO\n    # ann_id is unique in coco dataset.\n    ANN_ID_UNIQUE = True\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            self.coco = self.COCOAPI(local_path)\n\n        # 'categories' list in objects365_train.json and objects365_val.json\n        # is inconsistent, need sort list(or dict) before get cat_ids.\n        cats = self.coco.cats\n        sorted_cats = {i: cats[i] for i in sorted(cats)}\n        self.coco.cats = sorted_cats\n        categories = self.coco.dataset['categories']\n        sorted_categories = sorted(categories, key=lambda i: i['id'])\n        self.coco.dataset['categories'] = sorted_categories\n        # The order of returned `cat_ids` will not\n        # change with the order of the `classes`\n        self.cat_ids = self.coco.get_cat_ids(\n            cat_names=self.metainfo['classes'])\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)\n\n        img_ids = self.coco.get_img_ids()\n        data_list = []\n        total_ann_ids = []\n        for img_id in img_ids:\n            raw_img_info = self.coco.load_imgs([img_id])[0]\n            raw_img_info['img_id'] = img_id\n\n            ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n            raw_ann_info = self.coco.load_anns(ann_ids)\n            total_ann_ids.extend(ann_ids)\n\n            parsed_data_info = self.parse_data_info({\n                'raw_ann_info':\n                raw_ann_info,\n                'raw_img_info':\n                raw_img_info\n            })\n            data_list.append(parsed_data_info)\n        if self.ANN_ID_UNIQUE:\n            assert len(set(total_ann_ids)) == len(\n                total_ann_ids\n            ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n        del self.coco\n\n        return data_list\n\n\n@DATASETS.register_module()\nclass Objects365V2Dataset(CocoDataset):\n    \"\"\"Objects365 v2 dataset for detection.\"\"\"\n    METAINFO = {\n        'classes':\n        ('Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp',\n         'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf',\n         'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet',\n         'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower',\n         'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots',\n         'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt',\n         'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker',\n         'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool',\n         'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum',\n         'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle',\n         'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned',\n         'Truck', 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel',\n         'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed',\n         'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple',\n         'Air Conditioner', 'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck',\n         'Fork', 'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock',\n         'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger',\n         'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine',\n         'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle',\n         'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane',\n         'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage',\n         'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone',\n         'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane',\n         'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',\n         'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza',\n         'Elephant', 'Skateboard', 'Surfboard', 'Gun',\n         'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot',\n         'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper',\n         'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',\n         'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board',\n         'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder',\n         'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball',\n         'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle',\n         'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck',\n         'Billards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club',\n         'Briefcase', 'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear',\n         'Heavy Truck', 'Hamburger', 'Extractor', 'Extention Cord', 'Tong',\n         'Tennis Racket', 'Folder', 'American Football', 'earphone', 'Mask',\n         'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', 'Slide',\n         'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee',\n         'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon',\n         'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon',\n         'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog',\n         'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer',\n         'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple',\n         'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle',\n         'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone',\n         'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion',\n         'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom',\n         'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',\n         'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese',\n         'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue',\n         'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap',\n         'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut',\n         'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak',\n         'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate',\n         'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker',\n         'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal',\n         'Buttefly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin',\n         'Electric Drill', 'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill',\n         'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi',\n         'Target', 'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case',\n         'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop',\n         'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle',\n         'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster',\n         'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling',\n         'Table Tennis '),\n        'palette':\n        None\n    }\n\n    COCOAPI = COCO\n    # ann_id is unique in coco dataset.\n    ANN_ID_UNIQUE = True\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            self.coco = self.COCOAPI(local_path)\n        # The order of returned `cat_ids` will not\n        # change with the order of the `classes`\n        self.cat_ids = self.coco.get_cat_ids(\n            cat_names=self.metainfo['classes'])\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)\n\n        img_ids = self.coco.get_img_ids()\n        data_list = []\n        total_ann_ids = []\n        for img_id in img_ids:\n            raw_img_info = self.coco.load_imgs([img_id])[0]\n            raw_img_info['img_id'] = img_id\n\n            ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n            raw_ann_info = self.coco.load_anns(ann_ids)\n            total_ann_ids.extend(ann_ids)\n\n            # file_name should be `patchX/xxx.jpg`\n            file_name = osp.join(\n                osp.split(osp.split(raw_img_info['file_name'])[0])[-1],\n                osp.split(raw_img_info['file_name'])[-1])\n\n            if file_name in objv2_ignore_list:\n                continue\n\n            raw_img_info['file_name'] = file_name\n            parsed_data_info = self.parse_data_info({\n                'raw_ann_info':\n                raw_ann_info,\n                'raw_img_info':\n                raw_img_info\n            })\n            data_list.append(parsed_data_info)\n        if self.ANN_ID_UNIQUE:\n            assert len(set(total_ann_ids)) == len(\n                total_ann_ids\n            ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n        del self.coco\n\n        return data_list\n"
  },
  {
    "path": "mmdet/datasets/openimages.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport csv\nimport os.path as osp\nfrom collections import defaultdict\nfrom typing import Dict, List, Optional\n\nimport numpy as np\nfrom mmengine.fileio import load\nfrom mmengine.utils import is_abs\n\nfrom mmdet.registry import DATASETS\nfrom .base_det_dataset import BaseDetDataset\n\n\n@DATASETS.register_module()\nclass OpenImagesDataset(BaseDetDataset):\n    \"\"\"Open Images dataset for detection.\n\n    Args:\n        ann_file (str): Annotation file path.\n        label_file (str): File path of the label description file that\n            maps the classes names in MID format to their short\n            descriptions.\n        meta_file (str): File path to get image metas.\n        hierarchy_file (str): The file path of the class hierarchy.\n        image_level_ann_file (str): Human-verified image level annotation,\n            which is used in evaluation.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    METAINFO: dict = dict(dataset_type='oid_v6')\n\n    def __init__(self,\n                 label_file: str,\n                 meta_file: str,\n                 hierarchy_file: str,\n                 image_level_ann_file: Optional[str] = None,\n                 **kwargs) -> None:\n        self.label_file = label_file\n        self.meta_file = meta_file\n        self.hierarchy_file = hierarchy_file\n        self.image_level_ann_file = image_level_ann_file\n        super().__init__(**kwargs)\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"\n        classes_names, label_id_mapping = self._parse_label_file(\n            self.label_file)\n        self._metainfo['classes'] = classes_names\n        self.label_id_mapping = label_id_mapping\n\n        if self.image_level_ann_file is not None:\n            img_level_anns = self._parse_img_level_ann(\n                self.image_level_ann_file)\n        else:\n            img_level_anns = None\n\n        # OpenImagesMetric can get the relation matrix from the dataset meta\n        relation_matrix = self._get_relation_matrix(self.hierarchy_file)\n        self._metainfo['RELATION_MATRIX'] = relation_matrix\n\n        data_list = []\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            with open(local_path, 'r') as f:\n                reader = csv.reader(f)\n                last_img_id = None\n                instances = []\n                for i, line in enumerate(reader):\n                    if i == 0:\n                        continue\n                    img_id = line[0]\n                    if last_img_id is None:\n                        last_img_id = img_id\n                    label_id = line[2]\n                    assert label_id in self.label_id_mapping\n                    label = int(self.label_id_mapping[label_id])\n                    bbox = [\n                        float(line[4]),  # xmin\n                        float(line[6]),  # ymin\n                        float(line[5]),  # xmax\n                        float(line[7])  # ymax\n                    ]\n                    is_occluded = True if int(line[8]) == 1 else False\n                    is_truncated = True if int(line[9]) == 1 else False\n                    is_group_of = True if int(line[10]) == 1 else False\n                    is_depiction = True if int(line[11]) == 1 else False\n                    is_inside = True if int(line[12]) == 1 else False\n\n                    instance = dict(\n                        bbox=bbox,\n                        bbox_label=label,\n                        ignore_flag=0,\n                        is_occluded=is_occluded,\n                        is_truncated=is_truncated,\n                        is_group_of=is_group_of,\n                        is_depiction=is_depiction,\n                        is_inside=is_inside)\n                    last_img_path = osp.join(self.data_prefix['img'],\n                                             f'{last_img_id}.jpg')\n                    if img_id != last_img_id:\n                        # switch to a new image, record previous image's data.\n                        data_info = dict(\n                            img_path=last_img_path,\n                            img_id=last_img_id,\n                            instances=instances,\n                        )\n                        data_list.append(data_info)\n                        instances = []\n                    instances.append(instance)\n                    last_img_id = img_id\n                data_list.append(\n                    dict(\n                        img_path=last_img_path,\n                        img_id=last_img_id,\n                        instances=instances,\n                    ))\n\n        # add image metas to data list\n        img_metas = load(\n            self.meta_file,\n            file_format='pkl',\n            file_client_args=self.file_client_args)\n        assert len(img_metas) == len(data_list)\n        for i, meta in enumerate(img_metas):\n            img_id = data_list[i]['img_id']\n            assert f'{img_id}.jpg' == osp.split(meta['filename'])[-1]\n            h, w = meta['ori_shape'][:2]\n            data_list[i]['height'] = h\n            data_list[i]['width'] = w\n            # denormalize bboxes\n            for j in range(len(data_list[i]['instances'])):\n                data_list[i]['instances'][j]['bbox'][0] *= w\n                data_list[i]['instances'][j]['bbox'][2] *= w\n                data_list[i]['instances'][j]['bbox'][1] *= h\n                data_list[i]['instances'][j]['bbox'][3] *= h\n            # add image-level annotation\n            if img_level_anns is not None:\n                img_labels = []\n                confidences = []\n                img_ann_list = img_level_anns.get(img_id, [])\n                for ann in img_ann_list:\n                    img_labels.append(int(ann['image_level_label']))\n                    confidences.append(float(ann['confidence']))\n                data_list[i]['image_level_labels'] = np.array(\n                    img_labels, dtype=np.int64)\n                data_list[i]['confidences'] = np.array(\n                    confidences, dtype=np.float32)\n        return data_list\n\n    def _parse_label_file(self, label_file: str) -> tuple:\n        \"\"\"Get classes name and index mapping from cls-label-description file.\n\n        Args:\n            label_file (str): File path of the label description file that\n                maps the classes names in MID format to their short\n                descriptions.\n\n        Returns:\n            tuple: Class name of OpenImages.\n        \"\"\"\n\n        index_list = []\n        classes_names = []\n        with self.file_client.get_local_path(label_file) as local_path:\n            with open(local_path, 'r') as f:\n                reader = csv.reader(f)\n                for line in reader:\n                    # self.cat2label[line[0]] = line[1]\n                    classes_names.append(line[1])\n                    index_list.append(line[0])\n        index_mapping = {index: i for i, index in enumerate(index_list)}\n        return classes_names, index_mapping\n\n    def _parse_img_level_ann(self,\n                             img_level_ann_file: str) -> Dict[str, List[dict]]:\n        \"\"\"Parse image level annotations from csv style ann_file.\n\n        Args:\n            img_level_ann_file (str): CSV style image level annotation\n                file path.\n\n        Returns:\n            Dict[str, List[dict]]: Annotations where item of the defaultdict\n            indicates an image, each of which has (n) dicts.\n            Keys of dicts are:\n\n                - `image_level_label` (int): Label id.\n                - `confidence` (float): Labels that are human-verified to be\n                  present in an image have confidence = 1 (positive labels).\n                  Labels that are human-verified to be absent from an image\n                  have confidence = 0 (negative labels). Machine-generated\n                  labels have fractional confidences, generally >= 0.5.\n                  The higher the confidence, the smaller the chance for\n                  the label to be a false positive.\n        \"\"\"\n\n        item_lists = defaultdict(list)\n        with self.file_client.get_local_path(img_level_ann_file) as local_path:\n            with open(local_path, 'r') as f:\n                reader = csv.reader(f)\n                for i, line in enumerate(reader):\n                    if i == 0:\n                        continue\n                    img_id = line[0]\n                    item_lists[img_id].append(\n                        dict(\n                            image_level_label=int(\n                                self.label_id_mapping[line[2]]),\n                            confidence=float(line[3])))\n        return item_lists\n\n    def _get_relation_matrix(self, hierarchy_file: str) -> np.ndarray:\n        \"\"\"Get the matrix of class hierarchy from the hierarchy file. Hierarchy\n        for 600 classes can be found at https://storage.googleapis.com/openimag\n        es/2018_04/bbox_labels_600_hierarchy_visualizer/circle.html.\n\n        Args:\n            hierarchy_file (str): File path to the hierarchy for classes.\n\n        Returns:\n            np.ndarray: The matrix of the corresponding relationship between\n            the parent class and the child class, of shape\n            (class_num, class_num).\n        \"\"\"  # noqa\n\n        hierarchy = load(\n            hierarchy_file,\n            file_format='json',\n            file_client_args=self.file_client_args)\n        class_num = len(self._metainfo['classes'])\n        relation_matrix = np.eye(class_num, class_num)\n        relation_matrix = self._convert_hierarchy_tree(hierarchy,\n                                                       relation_matrix)\n        return relation_matrix\n\n    def _convert_hierarchy_tree(self,\n                                hierarchy_map: dict,\n                                relation_matrix: np.ndarray,\n                                parents: list = [],\n                                get_all_parents: bool = True) -> np.ndarray:\n        \"\"\"Get matrix of the corresponding relationship between the parent\n        class and the child class.\n\n        Args:\n            hierarchy_map (dict): Including label name and corresponding\n                subcategory. Keys of dicts are:\n\n                - `LabeName` (str): Name of the label.\n                - `Subcategory` (dict | list): Corresponding subcategory(ies).\n            relation_matrix (ndarray): The matrix of the corresponding\n                relationship between the parent class and the child class,\n                of shape (class_num, class_num).\n            parents (list): Corresponding parent class.\n            get_all_parents (bool): Whether get all parent names.\n                Default: True\n\n        Returns:\n            ndarray: The matrix of the corresponding relationship between\n            the parent class and the child class, of shape\n            (class_num, class_num).\n        \"\"\"\n\n        if 'Subcategory' in hierarchy_map:\n            for node in hierarchy_map['Subcategory']:\n                if 'LabelName' in node:\n                    children_name = node['LabelName']\n                    children_index = self.label_id_mapping[children_name]\n                    children = [children_index]\n                else:\n                    continue\n                if len(parents) > 0:\n                    for parent_index in parents:\n                        if get_all_parents:\n                            children.append(parent_index)\n                        relation_matrix[children_index, parent_index] = 1\n                relation_matrix = self._convert_hierarchy_tree(\n                    node, relation_matrix, parents=children)\n        return relation_matrix\n\n    def _join_prefix(self):\n        \"\"\"Join ``self.data_root`` with annotation path.\"\"\"\n        super()._join_prefix()\n        if not is_abs(self.label_file) and self.label_file:\n            self.label_file = osp.join(self.data_root, self.label_file)\n        if not is_abs(self.meta_file) and self.meta_file:\n            self.meta_file = osp.join(self.data_root, self.meta_file)\n        if not is_abs(self.hierarchy_file) and self.hierarchy_file:\n            self.hierarchy_file = osp.join(self.data_root, self.hierarchy_file)\n        if self.image_level_ann_file and not is_abs(self.image_level_ann_file):\n            self.image_level_ann_file = osp.join(self.data_root,\n                                                 self.image_level_ann_file)\n\n\n@DATASETS.register_module()\nclass OpenImagesChallengeDataset(OpenImagesDataset):\n    \"\"\"Open Images Challenge dataset for detection.\n\n    Args:\n        ann_file (str): Open Images Challenge box annotation in txt format.\n    \"\"\"\n\n    METAINFO: dict = dict(dataset_type='oid_challenge')\n\n    def __init__(self, ann_file: str, **kwargs) -> None:\n        if not ann_file.endswith('txt'):\n            raise TypeError('The annotation file of Open Images Challenge '\n                            'should be a txt file.')\n\n        super().__init__(ann_file=ann_file, **kwargs)\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"\n        classes_names, label_id_mapping = self._parse_label_file(\n            self.label_file)\n        self._metainfo['classes'] = classes_names\n        self.label_id_mapping = label_id_mapping\n\n        if self.image_level_ann_file is not None:\n            img_level_anns = self._parse_img_level_ann(\n                self.image_level_ann_file)\n        else:\n            img_level_anns = None\n\n        # OpenImagesMetric can get the relation matrix from the dataset meta\n        relation_matrix = self._get_relation_matrix(self.hierarchy_file)\n        self._metainfo['RELATION_MATRIX'] = relation_matrix\n\n        data_list = []\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            with open(local_path, 'r') as f:\n                lines = f.readlines()\n        i = 0\n        while i < len(lines):\n            instances = []\n            filename = lines[i].rstrip()\n            i += 2\n            img_gt_size = int(lines[i])\n            i += 1\n            for j in range(img_gt_size):\n                sp = lines[i + j].split()\n                instances.append(\n                    dict(\n                        bbox=[\n                            float(sp[1]),\n                            float(sp[2]),\n                            float(sp[3]),\n                            float(sp[4])\n                        ],\n                        bbox_label=int(sp[0]) - 1,  # labels begin from 1\n                        ignore_flag=0,\n                        is_group_ofs=True if int(sp[5]) == 1 else False))\n            i += img_gt_size\n            data_list.append(\n                dict(\n                    img_path=osp.join(self.data_prefix['img'], filename),\n                    instances=instances,\n                ))\n\n        # add image metas to data list\n        img_metas = load(\n            self.meta_file,\n            file_format='pkl',\n            file_client_args=self.file_client_args)\n        assert len(img_metas) == len(data_list)\n        for i, meta in enumerate(img_metas):\n            img_id = osp.split(data_list[i]['img_path'])[-1][:-4]\n            assert img_id == osp.split(meta['filename'])[-1][:-4]\n            h, w = meta['ori_shape'][:2]\n            data_list[i]['height'] = h\n            data_list[i]['width'] = w\n            data_list[i]['img_id'] = img_id\n            # denormalize bboxes\n            for j in range(len(data_list[i]['instances'])):\n                data_list[i]['instances'][j]['bbox'][0] *= w\n                data_list[i]['instances'][j]['bbox'][2] *= w\n                data_list[i]['instances'][j]['bbox'][1] *= h\n                data_list[i]['instances'][j]['bbox'][3] *= h\n            # add image-level annotation\n            if img_level_anns is not None:\n                img_labels = []\n                confidences = []\n                img_ann_list = img_level_anns.get(img_id, [])\n                for ann in img_ann_list:\n                    img_labels.append(int(ann['image_level_label']))\n                    confidences.append(float(ann['confidence']))\n                data_list[i]['image_level_labels'] = np.array(\n                    img_labels, dtype=np.int64)\n                data_list[i]['confidences'] = np.array(\n                    confidences, dtype=np.float32)\n        return data_list\n\n    def _parse_label_file(self, label_file: str) -> tuple:\n        \"\"\"Get classes name and index mapping from cls-label-description file.\n\n        Args:\n            label_file (str): File path of the label description file that\n                maps the classes names in MID format to their short\n                descriptions.\n\n        Returns:\n            tuple: Class name of OpenImages.\n        \"\"\"\n        label_list = []\n        id_list = []\n        index_mapping = {}\n        with self.file_client.get_local_path(label_file) as local_path:\n            with open(local_path, 'r') as f:\n                reader = csv.reader(f)\n                for line in reader:\n                    label_name = line[0]\n                    label_id = int(line[2])\n                    label_list.append(line[1])\n                    id_list.append(label_id)\n                    index_mapping[label_name] = label_id - 1\n        indexes = np.argsort(id_list)\n        classes_names = []\n        for index in indexes:\n            classes_names.append(label_list[index])\n        return classes_names, index_mapping\n\n    def _parse_img_level_ann(self, image_level_ann_file):\n        \"\"\"Parse image level annotations from csv style ann_file.\n\n        Args:\n            image_level_ann_file (str): CSV style image level annotation\n                file path.\n\n        Returns:\n            defaultdict[list[dict]]: Annotations where item of the defaultdict\n            indicates an image, each of which has (n) dicts.\n            Keys of dicts are:\n\n                - `image_level_label` (int): of shape 1.\n                - `confidence` (float): of shape 1.\n        \"\"\"\n\n        item_lists = defaultdict(list)\n        with self.file_client.get_local_path(\n                image_level_ann_file) as local_path:\n            with open(local_path, 'r') as f:\n                reader = csv.reader(f)\n                i = -1\n                for line in reader:\n                    i += 1\n                    if i == 0:\n                        continue\n                    else:\n                        img_id = line[0]\n                        label_id = line[1]\n                        assert label_id in self.label_id_mapping\n                        image_level_label = int(\n                            self.label_id_mapping[label_id])\n                        confidence = float(line[2])\n                        item_lists[img_id].append(\n                            dict(\n                                image_level_label=image_level_label,\n                                confidence=confidence))\n        return item_lists\n\n    def _get_relation_matrix(self, hierarchy_file: str) -> np.ndarray:\n        \"\"\"Get the matrix of class hierarchy from the hierarchy file.\n\n        Args:\n            hierarchy_file (str): File path to the hierarchy for classes.\n\n        Returns:\n            np.ndarray: The matrix of the corresponding\n            relationship between the parent class and the child class,\n            of shape (class_num, class_num).\n        \"\"\"\n        with self.file_client.get_local_path(hierarchy_file) as local_path:\n            class_label_tree = np.load(local_path, allow_pickle=True)\n        return class_label_tree[1:, 1:]\n"
  },
  {
    "path": "mmdet/datasets/samplers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .batch_sampler import AspectRatioBatchSampler\nfrom .class_aware_sampler import ClassAwareSampler\nfrom .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler\n\n__all__ = [\n    'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',\n    'GroupMultiSourceSampler'\n]\n"
  },
  {
    "path": "mmdet/datasets/samplers/batch_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Sequence\n\nfrom torch.utils.data import BatchSampler, Sampler\n\nfrom mmdet.registry import DATA_SAMPLERS\n\n\n# TODO: maybe replace with a data_loader wrapper\n@DATA_SAMPLERS.register_module()\nclass AspectRatioBatchSampler(BatchSampler):\n    \"\"\"A sampler wrapper for grouping images with similar aspect ratio (< 1 or.\n\n    >= 1) into a same batch.\n\n    Args:\n        sampler (Sampler): Base sampler.\n        batch_size (int): Size of mini-batch.\n        drop_last (bool): If ``True``, the sampler will drop the last batch if\n            its size would be less than ``batch_size``.\n    \"\"\"\n\n    def __init__(self,\n                 sampler: Sampler,\n                 batch_size: int,\n                 drop_last: bool = False) -> None:\n        if not isinstance(sampler, Sampler):\n            raise TypeError('sampler should be an instance of ``Sampler``, '\n                            f'but got {sampler}')\n        if not isinstance(batch_size, int) or batch_size <= 0:\n            raise ValueError('batch_size should be a positive integer value, '\n                             f'but got batch_size={batch_size}')\n        self.sampler = sampler\n        self.batch_size = batch_size\n        self.drop_last = drop_last\n        # two groups for w < h and w >= h\n        self._aspect_ratio_buckets = [[] for _ in range(2)]\n\n    def __iter__(self) -> Sequence[int]:\n        for idx in self.sampler:\n            data_info = self.sampler.dataset.get_data_info(idx)\n            width, height = data_info['width'], data_info['height']\n            bucket_id = 0 if width < height else 1\n            bucket = self._aspect_ratio_buckets[bucket_id]\n            bucket.append(idx)\n            # yield a batch of indices in the same aspect ratio group\n            if len(bucket) == self.batch_size:\n                yield bucket[:]\n                del bucket[:]\n\n        # yield the rest data and reset the bucket\n        left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[\n            1]\n        self._aspect_ratio_buckets = [[] for _ in range(2)]\n        while len(left_data) > 0:\n            if len(left_data) <= self.batch_size:\n                if not self.drop_last:\n                    yield left_data[:]\n                left_data = []\n            else:\n                yield left_data[:self.batch_size]\n                left_data = left_data[self.batch_size:]\n\n    def __len__(self) -> int:\n        if self.drop_last:\n            return len(self.sampler) // self.batch_size\n        else:\n            return (len(self.sampler) + self.batch_size - 1) // self.batch_size\n"
  },
  {
    "path": "mmdet/datasets/samplers/class_aware_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Dict, Iterator, Optional, Union\n\nimport numpy as np\nimport torch\nfrom mmengine.dataset import BaseDataset\nfrom mmengine.dist import get_dist_info, sync_random_seed\nfrom torch.utils.data import Sampler\n\nfrom mmdet.registry import DATA_SAMPLERS\n\n\n@DATA_SAMPLERS.register_module()\nclass ClassAwareSampler(Sampler):\n    r\"\"\"Sampler that restricts data loading to the label of the dataset.\n\n    A class-aware sampling strategy to effectively tackle the\n    non-uniform class distribution. The length of the training data is\n    consistent with source data. Simple improvements based on `Relay\n    Backpropagation for Effective Learning of Deep Convolutional\n    Neural Networks <https://arxiv.org/abs/1512.05830>`_\n\n    The implementation logic is referred to\n    https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py\n\n    Args:\n        dataset: Dataset used for sampling.\n        seed (int, optional): random seed used to shuffle the sampler.\n            This number should be identical across all\n            processes in the distributed group. Defaults to None.\n        num_sample_class (int): The number of samples taken from each\n            per-label list. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 dataset: BaseDataset,\n                 seed: Optional[int] = None,\n                 num_sample_class: int = 1) -> None:\n        rank, world_size = get_dist_info()\n        self.rank = rank\n        self.world_size = world_size\n\n        self.dataset = dataset\n        self.epoch = 0\n        # Must be the same across all workers. If None, will use a\n        # random seed shared among workers\n        # (require synchronization among all workers)\n        if seed is None:\n            seed = sync_random_seed()\n        self.seed = seed\n\n        # The number of samples taken from each per-label list\n        assert num_sample_class > 0 and isinstance(num_sample_class, int)\n        self.num_sample_class = num_sample_class\n        # Get per-label image list from dataset\n        self.cat_dict = self.get_cat2imgs()\n\n        self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size))\n        self.total_size = self.num_samples * self.world_size\n\n        # get number of images containing each category\n        self.num_cat_imgs = [len(x) for x in self.cat_dict.values()]\n        # filter labels without images\n        self.valid_cat_inds = [\n            i for i, length in enumerate(self.num_cat_imgs) if length != 0\n        ]\n        self.num_classes = len(self.valid_cat_inds)\n\n    def get_cat2imgs(self) -> Dict[int, list]:\n        \"\"\"Get a dict with class as key and img_ids as values.\n\n        Returns:\n            dict[int, list]: A dict of per-label image list,\n            the item of the dict indicates a label index,\n            corresponds to the image index that contains the label.\n        \"\"\"\n        classes = self.dataset.metainfo.get('classes', None)\n        if classes is None:\n            raise ValueError('dataset metainfo must contain `classes`')\n        # sort the label index\n        cat2imgs = {i: [] for i in range(len(classes))}\n        for i in range(len(self.dataset)):\n            cat_ids = set(self.dataset.get_cat_ids(i))\n            for cat in cat_ids:\n                cat2imgs[cat].append(i)\n        return cat2imgs\n\n    def __iter__(self) -> Iterator[int]:\n        # deterministically shuffle based on epoch\n        g = torch.Generator()\n        g.manual_seed(self.epoch + self.seed)\n\n        # initialize label list\n        label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g)\n        # initialize each per-label image list\n        data_iter_dict = dict()\n        for i in self.valid_cat_inds:\n            data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g)\n\n        def gen_cat_img_inds(cls_list, data_dict, num_sample_cls):\n            \"\"\"Traverse the categories and extract `num_sample_cls` image\n            indexes of the corresponding categories one by one.\"\"\"\n            id_indices = []\n            for _ in range(len(cls_list)):\n                cls_idx = next(cls_list)\n                for _ in range(num_sample_cls):\n                    id = next(data_dict[cls_idx])\n                    id_indices.append(id)\n            return id_indices\n\n        # deterministically shuffle based on epoch\n        num_bins = int(\n            math.ceil(self.total_size * 1.0 / self.num_classes /\n                      self.num_sample_class))\n        indices = []\n        for i in range(num_bins):\n            indices += gen_cat_img_inds(label_iter_list, data_iter_dict,\n                                        self.num_sample_class)\n\n        # fix extra samples to make it evenly divisible\n        if len(indices) >= self.total_size:\n            indices = indices[:self.total_size]\n        else:\n            indices += indices[:(self.total_size - len(indices))]\n        assert len(indices) == self.total_size\n\n        # subsample\n        offset = self.num_samples * self.rank\n        indices = indices[offset:offset + self.num_samples]\n        assert len(indices) == self.num_samples\n\n        return iter(indices)\n\n    def __len__(self) -> int:\n        \"\"\"The number of samples in this rank.\"\"\"\n        return self.num_samples\n\n    def set_epoch(self, epoch: int) -> None:\n        \"\"\"Sets the epoch for this sampler.\n\n        When :attr:`shuffle=True`, this ensures all replicas use a different\n        random ordering for each epoch. Otherwise, the next iteration of this\n        sampler will yield the same ordering.\n\n        Args:\n            epoch (int): Epoch number.\n        \"\"\"\n        self.epoch = epoch\n\n\nclass RandomCycleIter:\n    \"\"\"Shuffle the list and do it again after the list have traversed.\n\n    The implementation logic is referred to\n    https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py\n\n    Example:\n        >>> label_list = [0, 1, 2, 4, 5]\n        >>> g = torch.Generator()\n        >>> g.manual_seed(0)\n        >>> label_iter_list = RandomCycleIter(label_list, generator=g)\n        >>> index = next(label_iter_list)\n    Args:\n        data (list or ndarray): The data that needs to be shuffled.\n        generator: An torch.Generator object, which is used in setting the seed\n            for generating random numbers.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 data: Union[list, np.ndarray],\n                 generator: torch.Generator = None) -> None:\n        self.data = data\n        self.length = len(data)\n        self.index = torch.randperm(self.length, generator=generator).numpy()\n        self.i = 0\n        self.generator = generator\n\n    def __iter__(self) -> Iterator:\n        return self\n\n    def __len__(self) -> int:\n        return len(self.data)\n\n    def __next__(self):\n        if self.i == self.length:\n            self.index = torch.randperm(\n                self.length, generator=self.generator).numpy()\n            self.i = 0\n        idx = self.data[self.index[self.i]]\n        self.i += 1\n        return idx\n"
  },
  {
    "path": "mmdet/datasets/samplers/multi_source_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nfrom typing import Iterator, List, Optional, Sized, Union\n\nimport numpy as np\nimport torch\nfrom mmengine.dataset import BaseDataset\nfrom mmengine.dist import get_dist_info, sync_random_seed\nfrom torch.utils.data import Sampler\n\nfrom mmdet.registry import DATA_SAMPLERS\n\n\n@DATA_SAMPLERS.register_module()\nclass MultiSourceSampler(Sampler):\n    r\"\"\"Multi-Source Infinite Sampler.\n\n    According to the sampling ratio, sample data from different\n    datasets to form batches.\n\n    Args:\n        dataset (Sized): The dataset.\n        batch_size (int): Size of mini-batch.\n        source_ratio (list[int | float]): The sampling ratio of different\n            source datasets in a mini-batch.\n        shuffle (bool): Whether shuffle the dataset or not. Defaults to True.\n        seed (int, optional): Random seed. If None, set a random seed.\n            Defaults to None.\n\n    Examples:\n        >>> dataset_type = 'ConcatDataset'\n        >>> sub_dataset_type = 'CocoDataset'\n        >>> data_root = 'data/coco/'\n        >>> sup_ann = '../coco_semi_annos/instances_train2017.1@10.json'\n        >>> unsup_ann = '../coco_semi_annos/' \\\n        >>>             'instances_train2017.1@10-unlabeled.json'\n        >>> dataset = dict(type=dataset_type,\n        >>>     datasets=[\n        >>>         dict(\n        >>>             type=sub_dataset_type,\n        >>>             data_root=data_root,\n        >>>             ann_file=sup_ann,\n        >>>             data_prefix=dict(img='train2017/'),\n        >>>             filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        >>>             pipeline=sup_pipeline),\n        >>>         dict(\n        >>>             type=sub_dataset_type,\n        >>>             data_root=data_root,\n        >>>             ann_file=unsup_ann,\n        >>>             data_prefix=dict(img='train2017/'),\n        >>>             filter_cfg=dict(filter_empty_gt=True, min_size=32),\n        >>>             pipeline=unsup_pipeline),\n        >>>         ])\n        >>>     train_dataloader = dict(\n        >>>         batch_size=5,\n        >>>         num_workers=5,\n        >>>         persistent_workers=True,\n        >>>         sampler=dict(type='MultiSourceSampler',\n        >>>             batch_size=5, source_ratio=[1, 4]),\n        >>>         batch_sampler=None,\n        >>>         dataset=dataset)\n    \"\"\"\n\n    def __init__(self,\n                 dataset: Sized,\n                 batch_size: int,\n                 source_ratio: List[Union[int, float]],\n                 shuffle: bool = True,\n                 seed: Optional[int] = None) -> None:\n\n        assert hasattr(dataset, 'cumulative_sizes'),\\\n            f'The dataset must be ConcatDataset, but get {dataset}'\n        assert isinstance(batch_size, int) and batch_size > 0, \\\n            'batch_size must be a positive integer value, ' \\\n            f'but got batch_size={batch_size}'\n        assert isinstance(source_ratio, list), \\\n            f'source_ratio must be a list, but got source_ratio={source_ratio}'\n        assert len(source_ratio) == len(dataset.cumulative_sizes), \\\n            'The length of source_ratio must be equal to ' \\\n            f'the number of datasets, but got source_ratio={source_ratio}'\n\n        rank, world_size = get_dist_info()\n        self.rank = rank\n        self.world_size = world_size\n\n        self.dataset = dataset\n        self.cumulative_sizes = [0] + dataset.cumulative_sizes\n        self.batch_size = batch_size\n        self.source_ratio = source_ratio\n\n        self.num_per_source = [\n            int(batch_size * sr / sum(source_ratio)) for sr in source_ratio\n        ]\n        self.num_per_source[0] = batch_size - sum(self.num_per_source[1:])\n\n        assert sum(self.num_per_source) == batch_size, \\\n            'The sum of num_per_source must be equal to ' \\\n            f'batch_size, but get {self.num_per_source}'\n\n        self.seed = sync_random_seed() if seed is None else seed\n        self.shuffle = shuffle\n        self.source2inds = {\n            source: self._indices_of_rank(len(ds))\n            for source, ds in enumerate(dataset.datasets)\n        }\n\n    def _infinite_indices(self, sample_size: int) -> Iterator[int]:\n        \"\"\"Infinitely yield a sequence of indices.\"\"\"\n        g = torch.Generator()\n        g.manual_seed(self.seed)\n        while True:\n            if self.shuffle:\n                yield from torch.randperm(sample_size, generator=g).tolist()\n            else:\n                yield from torch.arange(sample_size).tolist()\n\n    def _indices_of_rank(self, sample_size: int) -> Iterator[int]:\n        \"\"\"Slice the infinite indices by rank.\"\"\"\n        yield from itertools.islice(\n            self._infinite_indices(sample_size), self.rank, None,\n            self.world_size)\n\n    def __iter__(self) -> Iterator[int]:\n        batch_buffer = []\n        while True:\n            for source, num in enumerate(self.num_per_source):\n                batch_buffer_per_source = []\n                for idx in self.source2inds[source]:\n                    idx += self.cumulative_sizes[source]\n                    batch_buffer_per_source.append(idx)\n                    if len(batch_buffer_per_source) == num:\n                        batch_buffer += batch_buffer_per_source\n                        break\n            yield from batch_buffer\n            batch_buffer = []\n\n    def __len__(self) -> int:\n        return len(self.dataset)\n\n    def set_epoch(self, epoch: int) -> None:\n        \"\"\"Not supported in `epoch-based runner.\"\"\"\n        pass\n\n\n@DATA_SAMPLERS.register_module()\nclass GroupMultiSourceSampler(MultiSourceSampler):\n    r\"\"\"Group Multi-Source Infinite Sampler.\n\n    According to the sampling ratio, sample data from different\n    datasets but the same group to form batches.\n\n    Args:\n        dataset (Sized): The dataset.\n        batch_size (int): Size of mini-batch.\n        source_ratio (list[int | float]): The sampling ratio of different\n            source datasets in a mini-batch.\n        shuffle (bool): Whether shuffle the dataset or not. Defaults to True.\n        seed (int, optional): Random seed. If None, set a random seed.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 dataset: BaseDataset,\n                 batch_size: int,\n                 source_ratio: List[Union[int, float]],\n                 shuffle: bool = True,\n                 seed: Optional[int] = None) -> None:\n        super().__init__(\n            dataset=dataset,\n            batch_size=batch_size,\n            source_ratio=source_ratio,\n            shuffle=shuffle,\n            seed=seed)\n\n        self._get_source_group_info()\n        self.group_source2inds = [{\n            source:\n            self._indices_of_rank(self.group2size_per_source[source][group])\n            for source in range(len(dataset.datasets))\n        } for group in range(len(self.group_ratio))]\n\n    def _get_source_group_info(self) -> None:\n        self.group2size_per_source = [{0: 0, 1: 0}, {0: 0, 1: 0}]\n        self.group2inds_per_source = [{0: [], 1: []}, {0: [], 1: []}]\n        for source, dataset in enumerate(self.dataset.datasets):\n            for idx in range(len(dataset)):\n                data_info = dataset.get_data_info(idx)\n                width, height = data_info['width'], data_info['height']\n                group = 0 if width < height else 1\n                self.group2size_per_source[source][group] += 1\n                self.group2inds_per_source[source][group].append(idx)\n\n        self.group_sizes = np.zeros(2, dtype=np.int64)\n        for group2size in self.group2size_per_source:\n            for group, size in group2size.items():\n                self.group_sizes[group] += size\n        self.group_ratio = self.group_sizes / sum(self.group_sizes)\n\n    def __iter__(self) -> Iterator[int]:\n        batch_buffer = []\n        while True:\n            group = np.random.choice(\n                list(range(len(self.group_ratio))), p=self.group_ratio)\n            for source, num in enumerate(self.num_per_source):\n                batch_buffer_per_source = []\n                for idx in self.group_source2inds[group][source]:\n                    idx = self.group2inds_per_source[source][group][\n                        idx] + self.cumulative_sizes[source]\n                    batch_buffer_per_source.append(idx)\n                    if len(batch_buffer_per_source) == num:\n                        batch_buffer += batch_buffer_per_source\n                        break\n            yield from batch_buffer\n            batch_buffer = []\n"
  },
  {
    "path": "mmdet/datasets/transforms/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .augment_wrappers import AutoAugment, RandAugment\nfrom .colorspace import (AutoContrast, Brightness, Color, ColorTransform,\n                         Contrast, Equalize, Invert, Posterize, Sharpness,\n                         Solarize, SolarizeAdd)\nfrom .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose\nfrom .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,\n                        TranslateY)\nfrom .instaboost import InstaBoost\nfrom .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,\n                      LoadEmptyAnnotations, LoadImageFromNDArray,\n                      LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,\n                      LoadProposals)\nfrom .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,\n                         Expand, FixShapeResize, MinIoURandomCrop, MixUp,\n                         Mosaic, Pad, PhotoMetricDistortion, RandomAffine,\n                         RandomCenterCropPad, RandomCrop, RandomErasing,\n                         RandomFlip, RandomShift, Resize, SegRescale,\n                         YOLOXHSVRandomAug)\nfrom .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder\n\n__all__ = [\n    'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',\n    'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',\n    'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',\n    'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',\n    'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',\n    'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',\n    'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',\n    'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',\n    'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',\n    'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',\n    'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',\n    'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',\n    'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader'\n]\n"
  },
  {
    "path": "mmdet/datasets/transforms/augment_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Union\n\nimport numpy as np\nfrom mmcv.transforms import RandomChoice\nfrom mmcv.transforms.utils import cache_randomness\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.registry import TRANSFORMS\n\n# AutoAugment uses reinforcement learning to search for\n# some widely useful data augmentation strategies,\n# here we provide AUTOAUG_POLICIES_V0.\n# For AUTOAUG_POLICIES_V0, each tuple is an augmentation\n# operation of the form (operation, probability, magnitude).\n# Each element in policies is a policy that will be applied\n# sequentially on the image.\n\n# RandAugment defines a data augmentation search space, RANDAUG_SPACE,\n# sampling 1~3 data augmentations each time, and\n# setting the magnitude of each data augmentation randomly,\n# which will be applied sequentially on the image.\n\n_MAX_LEVEL = 10\n\nAUTOAUG_POLICIES_V0 = [\n    [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],\n    [('Color', 0.4, 9), ('Equalize', 0.6, 3)],\n    [('Color', 0.4, 1), ('Rotate', 0.6, 8)],\n    [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],\n    [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],\n    [('Color', 0.2, 0), ('Equalize', 0.8, 8)],\n    [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],\n    [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],\n    [('Color', 0.6, 1), ('Equalize', 1.0, 2)],\n    [('Invert', 0.4, 9), ('Rotate', 0.6, 0)],\n    [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],\n    [('Color', 0.4, 7), ('Equalize', 0.6, 0)],\n    [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],\n    [('Solarize', 0.6, 8), ('Color', 0.6, 9)],\n    [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],\n    [('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],\n    [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],\n    [('ShearY', 0.8, 0), ('Color', 0.6, 4)],\n    [('Color', 1.0, 0), ('Rotate', 0.6, 2)],\n    [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],\n    [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],\n    [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],\n    [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],\n    [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],\n    [('Color', 0.8, 6), ('Rotate', 0.4, 5)],\n]\n\n\ndef policies_v0():\n    \"\"\"Autoaugment policies that was used in AutoAugment Paper.\"\"\"\n    policies = list()\n    for policy_args in AUTOAUG_POLICIES_V0:\n        policy = list()\n        for args in policy_args:\n            policy.append(dict(type=args[0], prob=args[1], level=args[2]))\n        policies.append(policy)\n    return policies\n\n\nRANDAUG_SPACE = [[dict(type='AutoContrast')], [dict(type='Equalize')],\n                 [dict(type='Invert')], [dict(type='Rotate')],\n                 [dict(type='Posterize')], [dict(type='Solarize')],\n                 [dict(type='SolarizeAdd')], [dict(type='Color')],\n                 [dict(type='Contrast')], [dict(type='Brightness')],\n                 [dict(type='Sharpness')], [dict(type='ShearX')],\n                 [dict(type='ShearY')], [dict(type='TranslateX')],\n                 [dict(type='TranslateY')]]\n\n\ndef level_to_mag(level: Optional[int], min_mag: float,\n                 max_mag: float) -> float:\n    \"\"\"Map from level to magnitude.\"\"\"\n    if level is None:\n        return round(np.random.rand() * (max_mag - min_mag) + min_mag, 1)\n    else:\n        return round(level / _MAX_LEVEL * (max_mag - min_mag) + min_mag, 1)\n\n\n@TRANSFORMS.register_module()\nclass AutoAugment(RandomChoice):\n    \"\"\"Auto augmentation.\n\n    This data augmentation is proposed in `AutoAugment: Learning\n    Augmentation Policies from Data <https://arxiv.org/abs/1805.09501>`_\n    and in `Learning Data Augmentation Strategies for Object Detection\n    <https://arxiv.org/pdf/1906.11172>`_.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes\n    - gt_bboxes_labels\n    - gt_masks\n    - gt_ignore_flags\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        policies (List[List[Union[dict, ConfigDict]]]):\n            The policies of auto augmentation.Each policy in ``policies``\n            is a specific augmentation policy, and is composed by several\n            augmentations. When AutoAugment is called, a random policy in\n            ``policies`` will be selected to augment images.\n            Defaults to policy_v0().\n        prob (list[float], optional): The probabilities associated\n            with each policy. The length should be equal to the policy\n            number and the sum should be 1. If not given, a uniform\n            distribution will be assumed. Defaults to None.\n\n    Examples:\n        >>> policies = [\n        >>>     [\n        >>>         dict(type='Sharpness', prob=0.0, level=8),\n        >>>         dict(type='ShearX', prob=0.4, level=0,)\n        >>>     ],\n        >>>     [\n        >>>         dict(type='Rotate', prob=0.6, level=10),\n        >>>         dict(type='Color', prob=1.0, level=6)\n        >>>     ]\n        >>> ]\n        >>> augmentation = AutoAugment(policies)\n        >>> img = np.ones(100, 100, 3)\n        >>> gt_bboxes = np.ones(10, 4)\n        >>> results = dict(img=img, gt_bboxes=gt_bboxes)\n        >>> results = augmentation(results)\n    \"\"\"\n\n    def __init__(self,\n                 policies: List[List[Union[dict, ConfigDict]]] = policies_v0(),\n                 prob: Optional[List[float]] = None) -> None:\n        assert isinstance(policies, list) and len(policies) > 0, \\\n            'Policies must be a non-empty list.'\n        for policy in policies:\n            assert isinstance(policy, list) and len(policy) > 0, \\\n                'Each policy in policies must be a non-empty list.'\n            for augment in policy:\n                assert isinstance(augment, dict) and 'type' in augment, \\\n                    'Each specific augmentation must be a dict with key' \\\n                    ' \"type\".'\n        super().__init__(transforms=policies, prob=prob)\n        self.policies = policies\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(policies={self.policies}, ' \\\n               f'prob={self.prob})'\n\n\n@TRANSFORMS.register_module()\nclass RandAugment(RandomChoice):\n    \"\"\"Rand augmentation.\n\n    This data augmentation is proposed in `RandAugment:\n    Practical automated data augmentation with a reduced\n    search space <https://arxiv.org/abs/1909.13719>`_.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes\n    - gt_bboxes_labels\n    - gt_masks\n    - gt_ignore_flags\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        aug_space (List[List[Union[dict, ConfigDict]]]): The augmentation space\n            of rand augmentation. Each augmentation transform in ``aug_space``\n            is a specific transform, and is composed by several augmentations.\n            When RandAugment is called, a random transform in ``aug_space``\n            will be selected to augment images. Defaults to aug_space.\n        aug_num (int): Number of augmentation to apply equentially.\n            Defaults to 2.\n        prob (list[float], optional): The probabilities associated with\n            each augmentation. The length should be equal to the\n            augmentation space and the sum should be 1. If not given,\n            a uniform distribution will be assumed. Defaults to None.\n\n    Examples:\n        >>> aug_space = [\n        >>>     dict(type='Sharpness'),\n        >>>     dict(type='ShearX'),\n        >>>     dict(type='Color'),\n        >>>     ],\n        >>> augmentation = RandAugment(aug_space)\n        >>> img = np.ones(100, 100, 3)\n        >>> gt_bboxes = np.ones(10, 4)\n        >>> results = dict(img=img, gt_bboxes=gt_bboxes)\n        >>> results = augmentation(results)\n    \"\"\"\n\n    def __init__(self,\n                 aug_space: List[Union[dict, ConfigDict]] = RANDAUG_SPACE,\n                 aug_num: int = 2,\n                 prob: Optional[List[float]] = None) -> None:\n        assert isinstance(aug_space, list) and len(aug_space) > 0, \\\n            'Augmentation space must be a non-empty list.'\n        for aug in aug_space:\n            assert isinstance(aug, list) and len(aug) == 1, \\\n                'Each augmentation in aug_space must be a list.'\n            for transform in aug:\n                assert isinstance(transform, dict) and 'type' in transform, \\\n                    'Each specific transform must be a dict with key' \\\n                    ' \"type\".'\n        super().__init__(transforms=aug_space, prob=prob)\n        self.aug_space = aug_space\n        self.aug_num = aug_num\n\n    @cache_randomness\n    def random_pipeline_index(self):\n        indices = np.arange(len(self.transforms))\n        return np.random.choice(\n            indices, self.aug_num, p=self.prob, replace=False)\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to use RandAugment.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with RandAugment.\n        \"\"\"\n        for idx in self.random_pipeline_index():\n            results = self.transforms[idx](results)\n        return results\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(' \\\n               f'aug_space={self.aug_space}, '\\\n               f'aug_num={self.aug_num}, ' \\\n               f'prob={self.prob})'\n"
  },
  {
    "path": "mmdet/datasets/transforms/colorspace.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Optional\n\nimport mmcv\nimport numpy as np\nfrom mmcv.transforms import BaseTransform\nfrom mmcv.transforms.utils import cache_randomness\n\nfrom mmdet.registry import TRANSFORMS\nfrom .augment_wrappers import _MAX_LEVEL, level_to_mag\n\n\n@TRANSFORMS.register_module()\nclass ColorTransform(BaseTransform):\n    \"\"\"Base class for color transformations. All color transformations need to\n    inherit from this base class. ``ColorTransform`` unifies the class\n    attributes and class functions of color transformations (Color, Brightness,\n    Contrast, Sharpness, Solarize, SolarizeAdd, Equalize, AutoContrast, Invert,\n    and Posterize), and only distort color channels, without impacting the\n    locations of the instances.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing the geometric\n            transformation and should be in range [0, 1]. Defaults to 1.0.\n        level (int, optional): The level should be in range [0, _MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for color transformation.\n            Defaults to 0.1.\n        max_mag (float): The maximum magnitude for color transformation.\n            Defaults to 1.9.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.1,\n                 max_mag: float = 1.9) -> None:\n        assert 0 <= prob <= 1.0, f'The probability of the transformation ' \\\n                                 f'should be in range [0,1], got {prob}.'\n        assert level is None or isinstance(level, int), \\\n            f'The level should be None or type int, got {type(level)}.'\n        assert level is None or 0 <= level <= _MAX_LEVEL, \\\n            f'The level should be in range [0,{_MAX_LEVEL}], got {level}.'\n        assert isinstance(min_mag, float), \\\n            f'min_mag should be type float, got {type(min_mag)}.'\n        assert isinstance(max_mag, float), \\\n            f'max_mag should be type float, got {type(max_mag)}.'\n        assert min_mag <= max_mag, \\\n            f'min_mag should smaller than max_mag, ' \\\n            f'got min_mag={min_mag} and max_mag={max_mag}'\n        self.prob = prob\n        self.level = level\n        self.min_mag = min_mag\n        self.max_mag = max_mag\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Transform the image.\"\"\"\n        pass\n\n    @cache_randomness\n    def _random_disable(self):\n        \"\"\"Randomly disable the transform.\"\"\"\n        return np.random.rand() > self.prob\n\n    @cache_randomness\n    def _get_mag(self):\n        \"\"\"Get the magnitude of the transform.\"\"\"\n        return level_to_mag(self.level, self.min_mag, self.max_mag)\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function for images.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Transformed results.\n        \"\"\"\n\n        if self._random_disable():\n            return results\n        mag = self._get_mag()\n        self._transform_img(results, mag)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(prob={self.prob}, '\n        repr_str += f'level={self.level}, '\n        repr_str += f'min_mag={self.min_mag}, '\n        repr_str += f'max_mag={self.max_mag})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass Color(ColorTransform):\n    \"\"\"Adjust the color balance of the image, in a manner similar to the\n    controls on a colour TV set. A magnitude=0 gives a black & white image,\n    whereas magnitude=1 gives the original image. The bboxes, masks and\n    segmentations are not modified.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Color transformation.\n            Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for Color transformation.\n            Defaults to 0.1.\n        max_mag (float): The maximum magnitude for Color transformation.\n            Defaults to 1.9.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.1,\n                 max_mag: float = 1.9) -> None:\n        assert 0. <= min_mag <= 2.0, \\\n            f'min_mag for Color should be in range [0,2], got {min_mag}.'\n        assert 0. <= max_mag <= 2.0, \\\n            f'max_mag for Color should be in range [0,2], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Apply Color transformation to image.\"\"\"\n        # NOTE defaultly the image should be BGR format\n        img = results['img']\n        results['img'] = mmcv.adjust_color(img, mag).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Brightness(ColorTransform):\n    \"\"\"Adjust the brightness of the image. A magnitude=0 gives a black image,\n    whereas magnitude=1 gives the original image. The bboxes, masks and\n    segmentations are not modified.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Brightness transformation.\n            Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for Brightness transformation.\n            Defaults to 0.1.\n        max_mag (float): The maximum magnitude for Brightness transformation.\n            Defaults to 1.9.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.1,\n                 max_mag: float = 1.9) -> None:\n        assert 0. <= min_mag <= 2.0, \\\n            f'min_mag for Brightness should be in range [0,2], got {min_mag}.'\n        assert 0. <= max_mag <= 2.0, \\\n            f'max_mag for Brightness should be in range [0,2], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Adjust the brightness of image.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.adjust_brightness(img, mag).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Contrast(ColorTransform):\n    \"\"\"Control the contrast of the image. A magnitude=0 gives a gray image,\n    whereas magnitude=1 gives the original imageThe bboxes, masks and\n    segmentations are not modified.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Contrast transformation.\n            Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for Contrast transformation.\n            Defaults to 0.1.\n        max_mag (float): The maximum magnitude for Contrast transformation.\n            Defaults to 1.9.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.1,\n                 max_mag: float = 1.9) -> None:\n        assert 0. <= min_mag <= 2.0, \\\n            f'min_mag for Contrast should be in range [0,2], got {min_mag}.'\n        assert 0. <= max_mag <= 2.0, \\\n            f'max_mag for Contrast should be in range [0,2], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Adjust the image contrast.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.adjust_contrast(img, mag).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Sharpness(ColorTransform):\n    \"\"\"Adjust images sharpness. A positive magnitude would enhance the\n    sharpness and a negative magnitude would make the image blurry. A\n    magnitude=0 gives the origin img.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Sharpness transformation.\n            Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for Sharpness transformation.\n            Defaults to 0.1.\n        max_mag (float): The maximum magnitude for Sharpness transformation.\n            Defaults to 1.9.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.1,\n                 max_mag: float = 1.9) -> None:\n        assert 0. <= min_mag <= 2.0, \\\n            f'min_mag for Sharpness should be in range [0,2], got {min_mag}.'\n        assert 0. <= max_mag <= 2.0, \\\n            f'max_mag for Sharpness should be in range [0,2], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Adjust the image sharpness.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.adjust_sharpness(img, mag).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Solarize(ColorTransform):\n    \"\"\"Solarize images (Invert all pixels above a threshold value of\n    magnitude.).\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Solarize transformation.\n            Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for Solarize transformation.\n            Defaults to 0.0.\n        max_mag (float): The maximum magnitude for Solarize transformation.\n            Defaults to 256.0.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 256.0) -> None:\n        assert 0. <= min_mag <= 256.0, f'min_mag for Solarize should be ' \\\n                                       f'in range [0, 256], got {min_mag}.'\n        assert 0. <= max_mag <= 256.0, f'max_mag for Solarize should be ' \\\n                                       f'in range [0, 256], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Invert all pixel values above magnitude.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.solarize(img, mag).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass SolarizeAdd(ColorTransform):\n    \"\"\"SolarizeAdd images. For each pixel in the image that is less than 128,\n    add an additional amount to it decided by the magnitude.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing SolarizeAdd\n            transformation. Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for SolarizeAdd transformation.\n            Defaults to 0.0.\n        max_mag (float): The maximum magnitude for SolarizeAdd transformation.\n            Defaults to 110.0.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 110.0) -> None:\n        assert 0. <= min_mag <= 110.0, f'min_mag for SolarizeAdd should be ' \\\n                                       f'in range [0, 110], got {min_mag}.'\n        assert 0. <= max_mag <= 110.0, f'max_mag for SolarizeAdd should be ' \\\n                                       f'in range [0, 110], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"SolarizeAdd the image.\"\"\"\n        img = results['img']\n        img_solarized = np.where(img < 128, np.minimum(img + mag, 255), img)\n        results['img'] = img_solarized.astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Posterize(ColorTransform):\n    \"\"\"Posterize images (reduce the number of bits for each color channel).\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Posterize\n            transformation. Defaults to 1.0.\n        level (int, optional): Should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for Posterize transformation.\n            Defaults to 0.0.\n        max_mag (float): The maximum magnitude for Posterize transformation.\n            Defaults to 4.0.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 4.0) -> None:\n        assert 0. <= min_mag <= 8.0, f'min_mag for Posterize should be ' \\\n                                     f'in range [0, 8], got {min_mag}.'\n        assert 0. <= max_mag <= 8.0, f'max_mag for Posterize should be ' \\\n                                     f'in range [0, 8], got {max_mag}.'\n        super().__init__(\n            prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Posterize the image.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.posterize(img, math.ceil(mag)).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Equalize(ColorTransform):\n    \"\"\"Equalize the image histogram. The bboxes, masks and segmentations are\n    not modified.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing Equalize transformation.\n            Defaults to 1.0.\n        level (int, optional): No use for Equalize transformation.\n            Defaults to None.\n        min_mag (float): No use for Equalize transformation. Defaults to 0.1.\n        max_mag (float): No use for Equalize transformation. Defaults to 1.9.\n    \"\"\"\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Equalizes the histogram of one image.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.imequalize(img).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass AutoContrast(ColorTransform):\n    \"\"\"Auto adjust image contrast.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing AutoContrast should\n             be in range [0, 1]. Defaults to 1.0.\n        level (int, optional): No use for AutoContrast transformation.\n            Defaults to None.\n        min_mag (float): No use for AutoContrast transformation.\n            Defaults to 0.1.\n        max_mag (float): No use for AutoContrast transformation.\n            Defaults to 1.9.\n    \"\"\"\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Auto adjust image contrast.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.auto_contrast(img).astype(img.dtype)\n\n\n@TRANSFORMS.register_module()\nclass Invert(ColorTransform):\n    \"\"\"Invert images.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        prob (float): The probability for performing invert therefore should\n             be in range [0, 1]. Defaults to 1.0.\n        level (int, optional): No use for Invert transformation.\n            Defaults to None.\n        min_mag (float): No use for Invert transformation. Defaults to 0.1.\n        max_mag (float): No use for Invert transformation. Defaults to 1.9.\n    \"\"\"\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Invert the image.\"\"\"\n        img = results['img']\n        results['img'] = mmcv.iminvert(img).astype(img.dtype)\n"
  },
  {
    "path": "mmdet/datasets/transforms/formatting.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nfrom mmcv.transforms import to_tensor\nfrom mmcv.transforms.base import BaseTransform\nfrom mmengine.structures import InstanceData, PixelData\n\nfrom mmdet.registry import TRANSFORMS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.bbox import BaseBoxes\n\n\n@TRANSFORMS.register_module()\nclass PackDetInputs(BaseTransform):\n    \"\"\"Pack the inputs data for the detection / semantic segmentation /\n    panoptic segmentation.\n\n    The ``img_meta`` item is always populated.  The contents of the\n    ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n        - ``img_id``: id of the image\n\n        - ``img_path``: path to the image file\n\n        - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n        - ``img_shape``: shape of the image input to the network as a tuple \\\n            (h, w).  Note that images may be zero padded on the \\\n            bottom/right if the batch tensor is larger than this shape.\n\n        - ``scale_factor``: a float indicating the preprocessing scale\n\n        - ``flip``: a boolean indicating if image flip transform was used\n\n        - ``flip_direction``: the flipping direction\n\n    Args:\n        meta_keys (Sequence[str], optional): Meta keys to be converted to\n            ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n            Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n            'scale_factor', 'flip', 'flip_direction')``\n    \"\"\"\n    mapping_table = {\n        'gt_bboxes': 'bboxes',\n        'gt_bboxes_labels': 'labels',\n        'gt_masks': 'masks'\n    }\n\n    def __init__(self,\n                 meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                            'scale_factor', 'flip', 'flip_direction')):\n        self.meta_keys = meta_keys\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Method to pack the input data.\n\n        Args:\n            results (dict): Result dict from the data pipeline.\n\n        Returns:\n            dict:\n\n            - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n            - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n                sample.\n        \"\"\"\n        packed_results = dict()\n        if 'img' in results:\n            img = results['img']\n            if len(img.shape) < 3:\n                img = np.expand_dims(img, -1)\n            # To improve the computational speed by by 3-5 times, apply:\n            # If image is not contiguous, use\n            # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n            # If image is already contiguous, use\n            # `torch.permute()` followed by `torch.contiguous()`\n            # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n            # for more details\n            if not img.flags.c_contiguous:\n                img = np.ascontiguousarray(img.transpose(2, 0, 1))\n                img = to_tensor(img)\n            else:\n                img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n            packed_results['inputs'] = img\n\n        if 'gt_ignore_flags' in results:\n            valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n            ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n        data_sample = DetDataSample()\n        instance_data = InstanceData()\n        ignore_instance_data = InstanceData()\n\n        for key in self.mapping_table.keys():\n            if key not in results:\n                continue\n            if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n                if 'gt_ignore_flags' in results:\n                    instance_data[\n                        self.mapping_table[key]] = results[key][valid_idx]\n                    ignore_instance_data[\n                        self.mapping_table[key]] = results[key][ignore_idx]\n                else:\n                    instance_data[self.mapping_table[key]] = results[key]\n            else:\n                if 'gt_ignore_flags' in results:\n                    instance_data[self.mapping_table[key]] = to_tensor(\n                        results[key][valid_idx])\n                    ignore_instance_data[self.mapping_table[key]] = to_tensor(\n                        results[key][ignore_idx])\n                else:\n                    instance_data[self.mapping_table[key]] = to_tensor(\n                        results[key])\n        data_sample.gt_instances = instance_data\n        data_sample.ignored_instances = ignore_instance_data\n\n        if 'proposals' in results:\n            proposals = InstanceData(\n                bboxes=to_tensor(results['proposals']),\n                scores=to_tensor(results['proposals_scores']))\n            data_sample.proposals = proposals\n\n        if 'gt_seg_map' in results:\n            gt_sem_seg_data = dict(\n                sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n            data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)\n\n        img_meta = {}\n        for key in self.meta_keys:\n            assert key in results, f'`{key}` is not found in `results`, ' \\\n                f'the valid keys are {list(results)}.'\n            img_meta[key] = results[key]\n\n        data_sample.set_metainfo(img_meta)\n        packed_results['data_samples'] = data_sample\n\n        return packed_results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(meta_keys={self.meta_keys})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass ToTensor:\n    \"\"\"Convert some results to :obj:`torch.Tensor` by given keys.\n\n    Args:\n        keys (Sequence[str]): Keys that need to be converted to Tensor.\n    \"\"\"\n\n    def __init__(self, keys):\n        self.keys = keys\n\n    def __call__(self, results):\n        \"\"\"Call function to convert data in results to :obj:`torch.Tensor`.\n\n        Args:\n            results (dict): Result dict contains the data to convert.\n\n        Returns:\n            dict: The result dict contains the data converted\n                to :obj:`torch.Tensor`.\n        \"\"\"\n        for key in self.keys:\n            results[key] = to_tensor(results[key])\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(keys={self.keys})'\n\n\n@TRANSFORMS.register_module()\nclass ImageToTensor:\n    \"\"\"Convert image to :obj:`torch.Tensor` by given keys.\n\n    The dimension order of input image is (H, W, C). The pipeline will convert\n    it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n    (1, H, W).\n\n    Args:\n        keys (Sequence[str]): Key of images to be converted to Tensor.\n    \"\"\"\n\n    def __init__(self, keys):\n        self.keys = keys\n\n    def __call__(self, results):\n        \"\"\"Call function to convert image in results to :obj:`torch.Tensor` and\n        transpose the channel order.\n\n        Args:\n            results (dict): Result dict contains the image data to convert.\n\n        Returns:\n            dict: The result dict contains the image converted\n                to :obj:`torch.Tensor` and permuted to (C, H, W) order.\n        \"\"\"\n        for key in self.keys:\n            img = results[key]\n            if len(img.shape) < 3:\n                img = np.expand_dims(img, -1)\n            results[key] = to_tensor(img).permute(2, 0, 1).contiguous()\n\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(keys={self.keys})'\n\n\n@TRANSFORMS.register_module()\nclass Transpose:\n    \"\"\"Transpose some results by given keys.\n\n    Args:\n        keys (Sequence[str]): Keys of results to be transposed.\n        order (Sequence[int]): Order of transpose.\n    \"\"\"\n\n    def __init__(self, keys, order):\n        self.keys = keys\n        self.order = order\n\n    def __call__(self, results):\n        \"\"\"Call function to transpose the channel order of data in results.\n\n        Args:\n            results (dict): Result dict contains the data to transpose.\n\n        Returns:\n            dict: The result dict contains the data transposed to \\\n                ``self.order``.\n        \"\"\"\n        for key in self.keys:\n            results[key] = results[key].transpose(self.order)\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n            f'(keys={self.keys}, order={self.order})'\n\n\n@TRANSFORMS.register_module()\nclass WrapFieldsToLists:\n    \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n    This class can be used as a last step of a test or validation\n    pipeline for single image evaluation or inference.\n\n    Example:\n        >>> test_pipeline = [\n        >>>    dict(type='LoadImageFromFile'),\n        >>>    dict(type='Normalize',\n                    mean=[123.675, 116.28, 103.53],\n                    std=[58.395, 57.12, 57.375],\n                    to_rgb=True),\n        >>>    dict(type='Pad', size_divisor=32),\n        >>>    dict(type='ImageToTensor', keys=['img']),\n        >>>    dict(type='Collect', keys=['img']),\n        >>>    dict(type='WrapFieldsToLists')\n        >>> ]\n    \"\"\"\n\n    def __call__(self, results):\n        \"\"\"Call function to wrap fields into lists.\n\n        Args:\n            results (dict): Result dict contains the data to wrap.\n\n        Returns:\n            dict: The result dict where value of ``self.keys`` are wrapped \\\n                into list.\n        \"\"\"\n\n        # Wrap dict fields into lists\n        for key, val in results.items():\n            results[key] = [val]\n        return results\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}()'\n"
  },
  {
    "path": "mmdet/datasets/transforms/geometric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nfrom typing import Optional, Union\n\nimport cv2\nimport mmcv\nimport numpy as np\nfrom mmcv.transforms import BaseTransform\nfrom mmcv.transforms.utils import cache_randomness\n\nfrom mmdet.registry import TRANSFORMS\nfrom mmdet.structures.bbox import autocast_box_type\nfrom .augment_wrappers import _MAX_LEVEL, level_to_mag\n\n\n@TRANSFORMS.register_module()\nclass GeomTransform(BaseTransform):\n    \"\"\"Base class for geometric transformations. All geometric transformations\n    need to inherit from this base class. ``GeomTransform`` unifies the class\n    attributes and class functions of geometric transformations (ShearX,\n    ShearY, Rotate, TranslateX, and TranslateY), and records the homography\n    matrix.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        prob (float): The probability for performing the geometric\n            transformation and should be in range [0, 1]. Defaults to 1.0.\n        level (int, optional): The level should be in range [0, _MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum magnitude for geometric transformation.\n            Defaults to 0.0.\n        max_mag (float): The maximum magnitude for geometric transformation.\n            Defaults to 1.0.\n        reversal_prob (float): The probability that reverses the geometric\n            transformation magnitude. Should be in range [0,1].\n            Defaults to 0.5.\n        img_border_value (int | float | tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 1.0,\n                 reversal_prob: float = 0.5,\n                 img_border_value: Union[int, float, tuple] = 128,\n                 mask_border_value: int = 0,\n                 seg_ignore_label: int = 255,\n                 interpolation: str = 'bilinear') -> None:\n        assert 0 <= prob <= 1.0, f'The probability of the transformation ' \\\n                                 f'should be in range [0,1], got {prob}.'\n        assert level is None or isinstance(level, int), \\\n            f'The level should be None or type int, got {type(level)}.'\n        assert level is None or 0 <= level <= _MAX_LEVEL, \\\n            f'The level should be in range [0,{_MAX_LEVEL}], got {level}.'\n        assert isinstance(min_mag, float), \\\n            f'min_mag should be type float, got {type(min_mag)}.'\n        assert isinstance(max_mag, float), \\\n            f'max_mag should be type float, got {type(max_mag)}.'\n        assert min_mag <= max_mag, \\\n            f'min_mag should smaller than max_mag, ' \\\n            f'got min_mag={min_mag} and max_mag={max_mag}'\n        assert isinstance(reversal_prob, float), \\\n            f'reversal_prob should be type float, got {type(max_mag)}.'\n        assert 0 <= reversal_prob <= 1.0, \\\n            f'The reversal probability of the transformation magnitude ' \\\n            f'should be type float, got {type(reversal_prob)}.'\n        if isinstance(img_border_value, (float, int)):\n            img_border_value = tuple([float(img_border_value)] * 3)\n        elif isinstance(img_border_value, tuple):\n            assert len(img_border_value) == 3, \\\n                f'img_border_value as tuple must have 3 elements, ' \\\n                f'got {len(img_border_value)}.'\n            img_border_value = tuple([float(val) for val in img_border_value])\n        else:\n            raise ValueError(\n                'img_border_value must be float or tuple with 3 elements.')\n        assert np.all([0 <= val <= 255 for val in img_border_value]), 'all ' \\\n            'elements of img_border_value should between range [0,255].' \\\n            f'got {img_border_value}.'\n        self.prob = prob\n        self.level = level\n        self.min_mag = min_mag\n        self.max_mag = max_mag\n        self.reversal_prob = reversal_prob\n        self.img_border_value = img_border_value\n        self.mask_border_value = mask_border_value\n        self.seg_ignore_label = seg_ignore_label\n        self.interpolation = interpolation\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Transform the image.\"\"\"\n        pass\n\n    def _transform_masks(self, results: dict, mag: float) -> None:\n        \"\"\"Transform the masks.\"\"\"\n        pass\n\n    def _transform_seg(self, results: dict, mag: float) -> None:\n        \"\"\"Transform the segmentation map.\"\"\"\n        pass\n\n    def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:\n        \"\"\"Get the homography matrix for the geometric transformation.\"\"\"\n        return np.eye(3, dtype=np.float32)\n\n    def _transform_bboxes(self, results: dict, mag: float) -> None:\n        \"\"\"Transform the bboxes.\"\"\"\n        results['gt_bboxes'].project_(self.homography_matrix)\n        results['gt_bboxes'].clip_(results['img_shape'])\n\n    def _record_homography_matrix(self, results: dict) -> None:\n        \"\"\"Record the homography matrix for the geometric transformation.\"\"\"\n        if results.get('homography_matrix', None) is None:\n            results['homography_matrix'] = self.homography_matrix\n        else:\n            results['homography_matrix'] = self.homography_matrix @ results[\n                'homography_matrix']\n\n    @cache_randomness\n    def _random_disable(self):\n        \"\"\"Randomly disable the transform.\"\"\"\n        return np.random.rand() > self.prob\n\n    @cache_randomness\n    def _get_mag(self):\n        \"\"\"Get the magnitude of the transform.\"\"\"\n        mag = level_to_mag(self.level, self.min_mag, self.max_mag)\n        return -mag if np.random.rand() > self.reversal_prob else mag\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function for images, bounding boxes, masks and semantic\n        segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Transformed results.\n        \"\"\"\n\n        if self._random_disable():\n            return results\n        mag = self._get_mag()\n        self.homography_matrix = self._get_homography_matrix(results, mag)\n        self._record_homography_matrix(results)\n        self._transform_img(results, mag)\n        if results.get('gt_bboxes', None) is not None:\n            self._transform_bboxes(results, mag)\n        if results.get('gt_masks', None) is not None:\n            self._transform_masks(results, mag)\n        if results.get('gt_seg_map', None) is not None:\n            self._transform_seg(results, mag)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(prob={self.prob}, '\n        repr_str += f'level={self.level}, '\n        repr_str += f'min_mag={self.min_mag}, '\n        repr_str += f'max_mag={self.max_mag}, '\n        repr_str += f'reversal_prob={self.reversal_prob}, '\n        repr_str += f'img_border_value={self.img_border_value}, '\n        repr_str += f'mask_border_value={self.mask_border_value}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label}, '\n        repr_str += f'interpolation={self.interpolation})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass ShearX(GeomTransform):\n    \"\"\"Shear the images, bboxes, masks and segmentation map horizontally.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        prob (float): The probability for performing Shear and should be in\n            range [0, 1]. Defaults to 1.0.\n        level (int, optional): The level should be in range [0, _MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum angle for the horizontal shear.\n            Defaults to 0.0.\n        max_mag (float): The maximum angle for the horizontal shear.\n            Defaults to 30.0.\n        reversal_prob (float): The probability that reverses the horizontal\n            shear magnitude. Should be in range [0,1]. Defaults to 0.5.\n        img_border_value (int | float | tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 30.0,\n                 reversal_prob: float = 0.5,\n                 img_border_value: Union[int, float, tuple] = 128,\n                 mask_border_value: int = 0,\n                 seg_ignore_label: int = 255,\n                 interpolation: str = 'bilinear') -> None:\n        assert 0. <= min_mag <= 90., \\\n            f'min_mag angle for ShearX should be ' \\\n            f'in range [0, 90], got {min_mag}.'\n        assert 0. <= max_mag <= 90., \\\n            f'max_mag angle for ShearX should be ' \\\n            f'in range [0, 90], got {max_mag}.'\n        super().__init__(\n            prob=prob,\n            level=level,\n            min_mag=min_mag,\n            max_mag=max_mag,\n            reversal_prob=reversal_prob,\n            img_border_value=img_border_value,\n            mask_border_value=mask_border_value,\n            seg_ignore_label=seg_ignore_label,\n            interpolation=interpolation)\n\n    @cache_randomness\n    def _get_mag(self):\n        \"\"\"Get the magnitude of the transform.\"\"\"\n        mag = level_to_mag(self.level, self.min_mag, self.max_mag)\n        mag = np.tan(mag * np.pi / 180)\n        return -mag if np.random.rand() > self.reversal_prob else mag\n\n    def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:\n        \"\"\"Get the homography matrix for ShearX.\"\"\"\n        return np.array([[1, mag, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Shear the image horizontally.\"\"\"\n        results['img'] = mmcv.imshear(\n            results['img'],\n            mag,\n            direction='horizontal',\n            border_value=self.img_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_masks(self, results: dict, mag: float) -> None:\n        \"\"\"Shear the masks horizontally.\"\"\"\n        results['gt_masks'] = results['gt_masks'].shear(\n            results['img_shape'],\n            mag,\n            direction='horizontal',\n            border_value=self.mask_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_seg(self, results: dict, mag: float) -> None:\n        \"\"\"Shear the segmentation map horizontally.\"\"\"\n        results['gt_seg_map'] = mmcv.imshear(\n            results['gt_seg_map'],\n            mag,\n            direction='horizontal',\n            border_value=self.seg_ignore_label,\n            interpolation='nearest')\n\n\n@TRANSFORMS.register_module()\nclass ShearY(GeomTransform):\n    \"\"\"Shear the images, bboxes, masks and segmentation map vertically.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        prob (float): The probability for performing ShearY and should be in\n            range [0, 1]. Defaults to 1.0.\n        level (int, optional): The level should be in range [0,_MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum angle for the vertical shear.\n            Defaults to 0.0.\n        max_mag (float): The maximum angle for the vertical shear.\n            Defaults to 30.0.\n        reversal_prob (float): The probability that reverses the vertical\n            shear magnitude. Should be in range [0,1]. Defaults to 0.5.\n        img_border_value (int | float | tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 30.,\n                 reversal_prob: float = 0.5,\n                 img_border_value: Union[int, float, tuple] = 128,\n                 mask_border_value: int = 0,\n                 seg_ignore_label: int = 255,\n                 interpolation: str = 'bilinear') -> None:\n        assert 0. <= min_mag <= 90., \\\n            f'min_mag angle for ShearY should be ' \\\n            f'in range [0, 90], got {min_mag}.'\n        assert 0. <= max_mag <= 90., \\\n            f'max_mag angle for ShearY should be ' \\\n            f'in range [0, 90], got {max_mag}.'\n        super().__init__(\n            prob=prob,\n            level=level,\n            min_mag=min_mag,\n            max_mag=max_mag,\n            reversal_prob=reversal_prob,\n            img_border_value=img_border_value,\n            mask_border_value=mask_border_value,\n            seg_ignore_label=seg_ignore_label,\n            interpolation=interpolation)\n\n    @cache_randomness\n    def _get_mag(self):\n        \"\"\"Get the magnitude of the transform.\"\"\"\n        mag = level_to_mag(self.level, self.min_mag, self.max_mag)\n        mag = np.tan(mag * np.pi / 180)\n        return -mag if np.random.rand() > self.reversal_prob else mag\n\n    def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:\n        \"\"\"Get the homography matrix for ShearY.\"\"\"\n        return np.array([[1, 0, 0], [mag, 1, 0], [0, 0, 1]], dtype=np.float32)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Shear the image vertically.\"\"\"\n        results['img'] = mmcv.imshear(\n            results['img'],\n            mag,\n            direction='vertical',\n            border_value=self.img_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_masks(self, results: dict, mag: float) -> None:\n        \"\"\"Shear the masks vertically.\"\"\"\n        results['gt_masks'] = results['gt_masks'].shear(\n            results['img_shape'],\n            mag,\n            direction='vertical',\n            border_value=self.mask_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_seg(self, results: dict, mag: float) -> None:\n        \"\"\"Shear the segmentation map vertically.\"\"\"\n        results['gt_seg_map'] = mmcv.imshear(\n            results['gt_seg_map'],\n            mag,\n            direction='vertical',\n            border_value=self.seg_ignore_label,\n            interpolation='nearest')\n\n\n@TRANSFORMS.register_module()\nclass Rotate(GeomTransform):\n    \"\"\"Rotate the images, bboxes, masks and segmentation map.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        prob (float): The probability for perform transformation and\n            should be in range 0 to 1. Defaults to 1.0.\n        level (int, optional): The level should be in range [0, _MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The maximum angle for rotation.\n            Defaults to 0.0.\n        max_mag (float): The maximum angle for rotation.\n            Defaults to 30.0.\n        reversal_prob (float): The probability that reverses the rotation\n            magnitude. Should be in range [0,1]. Defaults to 0.5.\n        img_border_value (int | float | tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 30.0,\n                 reversal_prob: float = 0.5,\n                 img_border_value: Union[int, float, tuple] = 128,\n                 mask_border_value: int = 0,\n                 seg_ignore_label: int = 255,\n                 interpolation: str = 'bilinear') -> None:\n        assert 0. <= min_mag <= 180., \\\n            f'min_mag for Rotate should be in range [0,180], got {min_mag}.'\n        assert 0. <= max_mag <= 180., \\\n            f'max_mag for Rotate should be in range [0,180], got {max_mag}.'\n        super().__init__(\n            prob=prob,\n            level=level,\n            min_mag=min_mag,\n            max_mag=max_mag,\n            reversal_prob=reversal_prob,\n            img_border_value=img_border_value,\n            mask_border_value=mask_border_value,\n            seg_ignore_label=seg_ignore_label,\n            interpolation=interpolation)\n\n    def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:\n        \"\"\"Get the homography matrix for Rotate.\"\"\"\n        img_shape = results['img_shape']\n        center = ((img_shape[1] - 1) * 0.5, (img_shape[0] - 1) * 0.5)\n        cv2_rotation_matrix = cv2.getRotationMatrix2D(center, -mag, 1.0)\n        return np.concatenate(\n            [cv2_rotation_matrix,\n             np.array([0, 0, 1]).reshape((1, 3))]).astype(np.float32)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Rotate the image.\"\"\"\n        results['img'] = mmcv.imrotate(\n            results['img'],\n            mag,\n            border_value=self.img_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_masks(self, results: dict, mag: float) -> None:\n        \"\"\"Rotate the masks.\"\"\"\n        results['gt_masks'] = results['gt_masks'].rotate(\n            results['img_shape'],\n            mag,\n            border_value=self.mask_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_seg(self, results: dict, mag: float) -> None:\n        \"\"\"Rotate the segmentation map.\"\"\"\n        results['gt_seg_map'] = mmcv.imrotate(\n            results['gt_seg_map'],\n            mag,\n            border_value=self.seg_ignore_label,\n            interpolation='nearest')\n\n\n@TRANSFORMS.register_module()\nclass TranslateX(GeomTransform):\n    \"\"\"Translate the images, bboxes, masks and segmentation map horizontally.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        prob (float): The probability for perform transformation and\n            should be in range 0 to 1. Defaults to 1.0.\n        level (int, optional): The level should be in range [0, _MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum pixel's offset ratio for horizontal\n            translation. Defaults to 0.0.\n        max_mag (float): The maximum pixel's offset ratio for horizontal\n            translation. Defaults to 0.1.\n        reversal_prob (float): The probability that reverses the horizontal\n            translation magnitude. Should be in range [0,1]. Defaults to 0.5.\n        img_border_value (int | float | tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 0.1,\n                 reversal_prob: float = 0.5,\n                 img_border_value: Union[int, float, tuple] = 128,\n                 mask_border_value: int = 0,\n                 seg_ignore_label: int = 255,\n                 interpolation: str = 'bilinear') -> None:\n        assert 0. <= min_mag <= 1., \\\n            f'min_mag ratio for TranslateX should be ' \\\n            f'in range [0, 1], got {min_mag}.'\n        assert 0. <= max_mag <= 1., \\\n            f'max_mag ratio for TranslateX should be ' \\\n            f'in range [0, 1], got {max_mag}.'\n        super().__init__(\n            prob=prob,\n            level=level,\n            min_mag=min_mag,\n            max_mag=max_mag,\n            reversal_prob=reversal_prob,\n            img_border_value=img_border_value,\n            mask_border_value=mask_border_value,\n            seg_ignore_label=seg_ignore_label,\n            interpolation=interpolation)\n\n    def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:\n        \"\"\"Get the homography matrix for TranslateX.\"\"\"\n        mag = int(results['img_shape'][1] * mag)\n        return np.array([[1, 0, mag], [0, 1, 0], [0, 0, 1]], dtype=np.float32)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Translate the image horizontally.\"\"\"\n        mag = int(results['img_shape'][1] * mag)\n        results['img'] = mmcv.imtranslate(\n            results['img'],\n            mag,\n            direction='horizontal',\n            border_value=self.img_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_masks(self, results: dict, mag: float) -> None:\n        \"\"\"Translate the masks horizontally.\"\"\"\n        mag = int(results['img_shape'][1] * mag)\n        results['gt_masks'] = results['gt_masks'].translate(\n            results['img_shape'],\n            mag,\n            direction='horizontal',\n            border_value=self.mask_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_seg(self, results: dict, mag: float) -> None:\n        \"\"\"Translate the segmentation map horizontally.\"\"\"\n        mag = int(results['img_shape'][1] * mag)\n        results['gt_seg_map'] = mmcv.imtranslate(\n            results['gt_seg_map'],\n            mag,\n            direction='horizontal',\n            border_value=self.seg_ignore_label,\n            interpolation='nearest')\n\n\n@TRANSFORMS.register_module()\nclass TranslateY(GeomTransform):\n    \"\"\"Translate the images, bboxes, masks and segmentation map vertically.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        prob (float): The probability for perform transformation and\n            should be in range 0 to 1. Defaults to 1.0.\n        level (int, optional): The level should be in range [0, _MAX_LEVEL].\n            If level is None, it will generate from [0, _MAX_LEVEL] randomly.\n            Defaults to None.\n        min_mag (float): The minimum pixel's offset ratio for vertical\n            translation. Defaults to 0.0.\n        max_mag (float): The maximum pixel's offset ratio for vertical\n            translation. Defaults to 0.1.\n        reversal_prob (float): The probability that reverses the vertical\n            translation magnitude. Should be in range [0,1]. Defaults to 0.5.\n        img_border_value (int | float | tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 1.0,\n                 level: Optional[int] = None,\n                 min_mag: float = 0.0,\n                 max_mag: float = 0.1,\n                 reversal_prob: float = 0.5,\n                 img_border_value: Union[int, float, tuple] = 128,\n                 mask_border_value: int = 0,\n                 seg_ignore_label: int = 255,\n                 interpolation: str = 'bilinear') -> None:\n        assert 0. <= min_mag <= 1., \\\n            f'min_mag ratio for TranslateY should be ' \\\n            f'in range [0,1], got {min_mag}.'\n        assert 0. <= max_mag <= 1., \\\n            f'max_mag ratio for TranslateY should be ' \\\n            f'in range [0,1], got {max_mag}.'\n        super().__init__(\n            prob=prob,\n            level=level,\n            min_mag=min_mag,\n            max_mag=max_mag,\n            reversal_prob=reversal_prob,\n            img_border_value=img_border_value,\n            mask_border_value=mask_border_value,\n            seg_ignore_label=seg_ignore_label,\n            interpolation=interpolation)\n\n    def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:\n        \"\"\"Get the homography matrix for TranslateY.\"\"\"\n        mag = int(results['img_shape'][0] * mag)\n        return np.array([[1, 0, 0], [0, 1, mag], [0, 0, 1]], dtype=np.float32)\n\n    def _transform_img(self, results: dict, mag: float) -> None:\n        \"\"\"Translate the image vertically.\"\"\"\n        mag = int(results['img_shape'][0] * mag)\n        results['img'] = mmcv.imtranslate(\n            results['img'],\n            mag,\n            direction='vertical',\n            border_value=self.img_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_masks(self, results: dict, mag: float) -> None:\n        \"\"\"Translate masks vertically.\"\"\"\n        mag = int(results['img_shape'][0] * mag)\n        results['gt_masks'] = results['gt_masks'].translate(\n            results['img_shape'],\n            mag,\n            direction='vertical',\n            border_value=self.mask_border_value,\n            interpolation=self.interpolation)\n\n    def _transform_seg(self, results: dict, mag: float) -> None:\n        \"\"\"Translate segmentation map vertically.\"\"\"\n        mag = int(results['img_shape'][0] * mag)\n        results['gt_seg_map'] = mmcv.imtranslate(\n            results['gt_seg_map'],\n            mag,\n            direction='vertical',\n            border_value=self.seg_ignore_label,\n            interpolation='nearest')\n"
  },
  {
    "path": "mmdet/datasets/transforms/instaboost.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport numpy as np\nfrom mmcv.transforms import BaseTransform\n\nfrom mmdet.registry import TRANSFORMS\n\n\n@TRANSFORMS.register_module()\nclass InstaBoost(BaseTransform):\n    r\"\"\"Data augmentation method in `InstaBoost: Boosting Instance\n    Segmentation Via Probability Map Guided Copy-Pasting\n    <https://arxiv.org/abs/1908.07801>`_.\n\n    Refer to https://github.com/GothicAi/Instaboost for implementation details.\n\n\n    Required Keys:\n\n    - img (np.uint8)\n    - instances\n\n    Modified Keys:\n\n    - img (np.uint8)\n    - instances\n\n    Args:\n        action_candidate (tuple): Action candidates. \"normal\", \"horizontal\", \\\n            \"vertical\", \"skip\" are supported. Defaults to ('normal', \\\n            'horizontal', 'skip').\n        action_prob (tuple): Corresponding action probabilities. Should be \\\n            the same length as action_candidate. Defaults to (1, 0, 0).\n        scale (tuple): (min scale, max scale). Defaults to (0.8, 1.2).\n        dx (int): The maximum x-axis shift will be (instance width) / dx.\n            Defaults to 15.\n        dy (int): The maximum y-axis shift will be (instance height) / dy.\n            Defaults to 15.\n        theta (tuple): (min rotation degree, max rotation degree). \\\n            Defaults to (-1, 1).\n        color_prob (float): Probability of images for color augmentation.\n            Defaults to 0.5.\n        hflag (bool): Whether to use heatmap guided. Defaults to False.\n        aug_ratio (float): Probability of applying this transformation. \\\n            Defaults to 0.5.\n    \"\"\"\n\n    def __init__(self,\n                 action_candidate: tuple = ('normal', 'horizontal', 'skip'),\n                 action_prob: tuple = (1, 0, 0),\n                 scale: tuple = (0.8, 1.2),\n                 dx: int = 15,\n                 dy: int = 15,\n                 theta: tuple = (-1, 1),\n                 color_prob: float = 0.5,\n                 hflag: bool = False,\n                 aug_ratio: float = 0.5) -> None:\n\n        import matplotlib\n        import matplotlib.pyplot as plt\n        default_backend = plt.get_backend()\n\n        try:\n            import instaboostfast as instaboost\n        except ImportError:\n            raise ImportError(\n                'Please run \"pip install instaboostfast\" '\n                'to install instaboostfast first for instaboost augmentation.')\n\n        # instaboost will modify the default backend\n        # and cause visualization to fail.\n        matplotlib.use(default_backend)\n\n        self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,\n                                               scale, dx, dy, theta,\n                                               color_prob, hflag)\n        self.aug_ratio = aug_ratio\n\n    def _load_anns(self, results: dict) -> Tuple[list, list]:\n        \"\"\"Convert raw anns to instaboost expected input format.\"\"\"\n        anns = []\n        ignore_anns = []\n        for instance in results['instances']:\n            label = instance['bbox_label']\n            bbox = instance['bbox']\n            mask = instance['mask']\n            x1, y1, x2, y2 = bbox\n            # assert (x2 - x1) >= 1 and (y2 - y1) >= 1\n            bbox = [x1, y1, x2 - x1, y2 - y1]\n\n            if instance['ignore_flag'] == 0:\n                anns.append({\n                    'category_id': label,\n                    'segmentation': mask,\n                    'bbox': bbox\n                })\n            else:\n                # Ignore instances without data augmentation\n                ignore_anns.append(instance)\n        return anns, ignore_anns\n\n    def _parse_anns(self, results: dict, anns: list, ignore_anns: list,\n                    img: np.ndarray) -> dict:\n        \"\"\"Restore the result of instaboost processing to the original anns\n        format.\"\"\"\n        instances = []\n        for ann in anns:\n            x1, y1, w, h = ann['bbox']\n            # TODO: more essential bug need to be fixed in instaboost\n            if w <= 0 or h <= 0:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n            instances.append(\n                dict(\n                    bbox=bbox,\n                    bbox_label=ann['category_id'],\n                    mask=ann['segmentation'],\n                    ignore_flag=0))\n\n        instances.extend(ignore_anns)\n        results['img'] = img\n        results['instances'] = instances\n        return results\n\n    def transform(self, results) -> dict:\n        \"\"\"The transform function.\"\"\"\n        img = results['img']\n        ori_type = img.dtype\n        if 'instances' not in results or len(results['instances']) == 0:\n            return results\n\n        anns, ignore_anns = self._load_anns(results)\n        if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):\n            try:\n                import instaboostfast as instaboost\n            except ImportError:\n                raise ImportError('Please run \"pip install instaboostfast\" '\n                                  'to install instaboostfast first.')\n            anns, img = instaboost.get_new_data(\n                anns, img.astype(np.uint8), self.cfg, background=None)\n\n        results = self._parse_anns(results, anns, ignore_anns,\n                                   img.astype(ori_type))\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(aug_ratio={self.aug_ratio})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/datasets/transforms/loading.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, Union\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nimport torch\nfrom mmcv.transforms import BaseTransform\nfrom mmcv.transforms import LoadAnnotations as MMCV_LoadAnnotations\nfrom mmcv.transforms import LoadImageFromFile\nfrom mmengine.fileio import FileClient\nfrom mmengine.structures import BaseDataElement\n\nfrom mmdet.registry import TRANSFORMS\nfrom mmdet.structures.bbox import get_box_type\nfrom mmdet.structures.bbox.box_type import autocast_box_type\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\n\n\n@TRANSFORMS.register_module()\nclass LoadImageFromNDArray(LoadImageFromFile):\n    \"\"\"Load an image from ``results['img']``.\n\n    Similar with :obj:`LoadImageFromFile`, but the image has been loaded as\n    :obj:`np.ndarray` in ``results['img']``. Can be used when loading image\n    from webcam.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n    - img_path\n    - img_shape\n    - ori_shape\n\n    Args:\n        to_float32 (bool): Whether to convert the loaded image to a float32\n            numpy array. If set to False, the loaded image is an uint8 array.\n            Defaults to False.\n    \"\"\"\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to add image meta information.\n\n        Args:\n            results (dict): Result dict with Webcam read image in\n                ``results['img']``.\n\n        Returns:\n            dict: The dict contains loaded image and meta information.\n        \"\"\"\n\n        img = results['img']\n        if self.to_float32:\n            img = img.astype(np.float32)\n\n        results['img_path'] = None\n        results['img'] = img\n        results['img_shape'] = img.shape[:2]\n        results['ori_shape'] = img.shape[:2]\n        return results\n\n\n@TRANSFORMS.register_module()\nclass LoadMultiChannelImageFromFiles(BaseTransform):\n    \"\"\"Load multi-channel images from a list of separate channel files.\n\n    Required Keys:\n\n    - img_path\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - ori_shape\n\n    Args:\n        to_float32 (bool): Whether to convert the loaded image to a float32\n            numpy array. If set to False, the loaded image is an uint8 array.\n            Defaults to False.\n        color_type (str): The flag argument for :func:``mmcv.imfrombytes``.\n            Defaults to 'unchanged'.\n        imdecode_backend (str): The image decoding backend type. The backend\n            argument for :func:``mmcv.imfrombytes``.\n            See :func:``mmcv.imfrombytes`` for details.\n            Defaults to 'cv2'.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(\n        self,\n        to_float32: bool = False,\n        color_type: str = 'unchanged',\n        imdecode_backend: str = 'cv2',\n        file_client_args: dict = dict(backend='disk')\n    ) -> None:\n        self.to_float32 = to_float32\n        self.color_type = color_type\n        self.imdecode_backend = imdecode_backend\n        self.file_client_args = file_client_args.copy()\n        self.file_client = FileClient(**self.file_client_args)\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform functions to load multiple images and get images meta\n        information.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded images and meta information.\n        \"\"\"\n\n        assert isinstance(results['img_path'], list)\n        img = []\n        for name in results['img_path']:\n            img_bytes = self.file_client.get(name)\n            img.append(\n                mmcv.imfrombytes(\n                    img_bytes,\n                    flag=self.color_type,\n                    backend=self.imdecode_backend))\n        img = np.stack(img, axis=-1)\n        if self.to_float32:\n            img = img.astype(np.float32)\n\n        results['img'] = img\n        results['img_shape'] = img.shape[:2]\n        results['ori_shape'] = img.shape[:2]\n        return results\n\n    def __repr__(self):\n        repr_str = (f'{self.__class__.__name__}('\n                    f'to_float32={self.to_float32}, '\n                    f\"color_type='{self.color_type}', \"\n                    f\"imdecode_backend='{self.imdecode_backend}', \"\n                    f'file_client_args={self.file_client_args})')\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass LoadAnnotations(MMCV_LoadAnnotations):\n    \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n    by dataset.\n\n    The annotation format is as the following:\n\n    .. code-block:: python\n\n        {\n            'instances':\n            [\n                {\n                # List of 4 numbers representing the bounding box of the\n                # instance, in (x1, y1, x2, y2) order.\n                'bbox': [x1, y1, x2, y2],\n\n                # Label of image classification.\n                'bbox_label': 1,\n\n                # Used in instance/panoptic segmentation. The segmentation mask\n                # of the instance or the information of segments.\n                # 1. If list[list[float]], it represents a list of polygons,\n                # one for each connected component of the object. Each\n                # list[float] is one simple polygon in the format of\n                # [x1, y1, ..., xn, yn] (n≥3). The Xs and Ys are absolute\n                # coordinates in unit of pixels.\n                # 2. If dict, it represents the per-pixel segmentation mask in\n                # COCO’s compressed RLE format. The dict should have keys\n                # “size” and “counts”.  Can be loaded by pycocotools\n                'mask': list[list[float]] or dict,\n\n                }\n            ]\n            # Filename of semantic or panoptic segmentation ground truth file.\n            'seg_map_path': 'a/b/c'\n        }\n\n    After this module, the annotation has been changed to the format below:\n\n    .. code-block:: python\n\n        {\n            # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n            # in an image\n            'gt_bboxes': BaseBoxes(N, 4)\n             # In int type.\n            'gt_bboxes_labels': np.ndarray(N, )\n             # In built-in class\n            'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n             # In uint8 type.\n            'gt_seg_map': np.ndarray (H, W)\n             # in (x, y, v) order, float type.\n        }\n\n    Required Keys:\n\n    - height\n    - width\n    - instances\n\n      - bbox (optional)\n      - bbox_label\n      - mask (optional)\n      - ignore_flag\n\n    - seg_map_path (optional)\n\n    Added Keys:\n\n    - gt_bboxes (BaseBoxes[torch.float32])\n    - gt_bboxes_labels (np.int64)\n    - gt_masks (BitmapMasks | PolygonMasks)\n    - gt_seg_map (np.uint8)\n    - gt_ignore_flags (bool)\n\n    Args:\n        with_bbox (bool): Whether to parse and load the bbox annotation.\n            Defaults to True.\n        with_label (bool): Whether to parse and load the label annotation.\n            Defaults to True.\n        with_mask (bool): Whether to parse and load the mask annotation.\n             Default: False.\n        with_seg (bool): Whether to parse and load the semantic segmentation\n            annotation. Defaults to False.\n        poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n        box_type (str): The box type used to wrap the bboxes. If ``box_type``\n            is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n        imdecode_backend (str): The image decoding backend type. The backend\n            argument for :func:``mmcv.imfrombytes``.\n            See :fun:``mmcv.imfrombytes`` for details.\n            Defaults to 'cv2'.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:``mmengine.fileio.FileClient`` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 with_mask: bool = False,\n                 poly2mask: bool = True,\n                 box_type: str = 'hbox',\n                 **kwargs) -> None:\n        super(LoadAnnotations, self).__init__(**kwargs)\n        self.with_mask = with_mask\n        self.poly2mask = poly2mask\n        self.box_type = box_type\n\n    def _load_bboxes(self, results: dict) -> None:\n        \"\"\"Private function to load bounding box annotations.\n\n        Args:\n            results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n        Returns:\n            dict: The dict contains loaded bounding box annotations.\n        \"\"\"\n        gt_bboxes = []\n        gt_ignore_flags = []\n        for instance in results.get('instances', []):\n            gt_bboxes.append(instance['bbox'])\n            gt_ignore_flags.append(instance['ignore_flag'])\n        if self.box_type is None:\n            results['gt_bboxes'] = np.array(\n                gt_bboxes, dtype=np.float32).reshape((-1, 4))\n        else:\n            _, box_type_cls = get_box_type(self.box_type)\n            results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n        results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n    def _load_labels(self, results: dict) -> None:\n        \"\"\"Private function to load label annotations.\n\n        Args:\n            results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n        Returns:\n            dict: The dict contains loaded label annotations.\n        \"\"\"\n        gt_bboxes_labels = []\n        for instance in results.get('instances', []):\n            gt_bboxes_labels.append(instance['bbox_label'])\n        # TODO: Inconsistent with mmcv, consider how to deal with it later.\n        results['gt_bboxes_labels'] = np.array(\n            gt_bboxes_labels, dtype=np.int64)\n\n    def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n                   img_w: int) -> np.ndarray:\n        \"\"\"Private function to convert masks represented with polygon to\n        bitmaps.\n\n        Args:\n            mask_ann (list | dict): Polygon mask annotation input.\n            img_h (int): The height of output mask.\n            img_w (int): The width of output mask.\n\n        Returns:\n            np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n        \"\"\"\n\n        if isinstance(mask_ann, list):\n            # polygon -- a single object might consist of multiple parts\n            # we merge all parts into one mask rle code\n            rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n            rle = maskUtils.merge(rles)\n        elif isinstance(mask_ann['counts'], list):\n            # uncompressed RLE\n            rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n        else:\n            # rle\n            rle = mask_ann\n        mask = maskUtils.decode(rle)\n        return mask\n\n    def _process_masks(self, results: dict) -> list:\n        \"\"\"Process gt_masks and filter invalid polygons.\n\n        Args:\n            results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n        Returns:\n            list: Processed gt_masks.\n        \"\"\"\n        gt_masks = []\n        gt_ignore_flags = []\n        for instance in results.get('instances', []):\n            gt_mask = instance['mask']\n            # If the annotation of segmentation mask is invalid,\n            # ignore the whole instance.\n            if isinstance(gt_mask, list):\n                gt_mask = [\n                    np.array(polygon) for polygon in gt_mask\n                    if len(polygon) % 2 == 0 and len(polygon) >= 6\n                ]\n                if len(gt_mask) == 0:\n                    # ignore this instance and set gt_mask to a fake mask\n                    instance['ignore_flag'] = 1\n                    gt_mask = [np.zeros(6)]\n            elif not self.poly2mask:\n                # `PolygonMasks` requires a ploygon of format List[np.array],\n                # other formats are invalid.\n                instance['ignore_flag'] = 1\n                gt_mask = [np.zeros(6)]\n            elif isinstance(gt_mask, dict) and \\\n                    not (gt_mask.get('counts') is not None and\n                         gt_mask.get('size') is not None and\n                         isinstance(gt_mask['counts'], (list, str))):\n                # if gt_mask is a dict, it should include `counts` and `size`,\n                # so that `BitmapMasks` can uncompressed RLE\n                instance['ignore_flag'] = 1\n                gt_mask = [np.zeros(6)]\n            gt_masks.append(gt_mask)\n            # re-process gt_ignore_flags\n            gt_ignore_flags.append(instance['ignore_flag'])\n        results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n        return gt_masks\n\n    def _load_masks(self, results: dict) -> None:\n        \"\"\"Private function to load mask annotations.\n\n        Args:\n            results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n        \"\"\"\n        h, w = results['ori_shape']\n        gt_masks = self._process_masks(results)\n        if self.poly2mask:\n            gt_masks = BitmapMasks(\n                [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n        else:\n            # fake polygon masks will be ignored in `PackDetInputs`\n            gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n        results['gt_masks'] = gt_masks\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Function to load multiple types annotations.\n\n        Args:\n            results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n        Returns:\n            dict: The dict contains loaded bounding box, label and\n            semantic segmentation.\n        \"\"\"\n\n        if self.with_bbox:\n            self._load_bboxes(results)\n        if self.with_label:\n            self._load_labels(results)\n        if self.with_mask:\n            self._load_masks(results)\n        if self.with_seg:\n            self._load_seg_map(results)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(with_bbox={self.with_bbox}, '\n        repr_str += f'with_label={self.with_label}, '\n        repr_str += f'with_mask={self.with_mask}, '\n        repr_str += f'with_seg={self.with_seg}, '\n        repr_str += f'poly2mask={self.poly2mask}, '\n        repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n        repr_str += f'file_client_args={self.file_client_args})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass LoadPanopticAnnotations(LoadAnnotations):\n    \"\"\"Load multiple types of panoptic annotations.\n\n    The annotation format is as the following:\n\n    .. code-block:: python\n\n        {\n            'instances':\n            [\n                {\n                # List of 4 numbers representing the bounding box of the\n                # instance, in (x1, y1, x2, y2) order.\n                'bbox': [x1, y1, x2, y2],\n\n                # Label of image classification.\n                'bbox_label': 1,\n                },\n                ...\n            ]\n            'segments_info':\n            [\n                {\n                # id = cls_id + instance_id * INSTANCE_OFFSET\n                'id': int,\n\n                # Contiguous category id defined in dataset.\n                'category': int\n\n                # Thing flag.\n                'is_thing': bool\n                },\n                ...\n            ]\n\n            # Filename of semantic or panoptic segmentation ground truth file.\n            'seg_map_path': 'a/b/c'\n        }\n\n    After this module, the annotation has been changed to the format below:\n\n    .. code-block:: python\n\n        {\n            # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n            # in an image\n            'gt_bboxes': BaseBoxes(N, 4)\n             # In int type.\n            'gt_bboxes_labels': np.ndarray(N, )\n             # In built-in class\n            'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n             # In uint8 type.\n            'gt_seg_map': np.ndarray (H, W)\n             # in (x, y, v) order, float type.\n        }\n\n    Required Keys:\n\n    - height\n    - width\n    - instances\n      - bbox\n      - bbox_label\n      - ignore_flag\n    - segments_info\n      - id\n      - category\n      - is_thing\n    - seg_map_path\n\n    Added Keys:\n\n    - gt_bboxes (BaseBoxes[torch.float32])\n    - gt_bboxes_labels (np.int64)\n    - gt_masks (BitmapMasks | PolygonMasks)\n    - gt_seg_map (np.uint8)\n    - gt_ignore_flags (bool)\n\n    Args:\n        with_bbox (bool): Whether to parse and load the bbox annotation.\n            Defaults to True.\n        with_label (bool): Whether to parse and load the label annotation.\n            Defaults to True.\n        with_mask (bool): Whether to parse and load the mask annotation.\n             Defaults to True.\n        with_seg (bool): Whether to parse and load the semantic segmentation\n            annotation. Defaults to False.\n        box_type (str): The box mode used to wrap the bboxes.\n        imdecode_backend (str): The image decoding backend type. The backend\n            argument for :func:``mmcv.imfrombytes``.\n            See :fun:``mmcv.imfrombytes`` for details.\n            Defaults to 'cv2'.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:``mmengine.fileio.FileClient`` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(\n        self,\n        with_bbox: bool = True,\n        with_label: bool = True,\n        with_mask: bool = True,\n        with_seg: bool = True,\n        box_type: str = 'hbox',\n        imdecode_backend: str = 'cv2',\n        file_client_args: dict = dict(backend='disk')\n    ) -> None:\n        try:\n            from panopticapi import utils\n        except ImportError:\n            raise ImportError(\n                'panopticapi is not installed, please install it by: '\n                'pip install git+https://github.com/cocodataset/'\n                'panopticapi.git.')\n        self.rgb2id = utils.rgb2id\n\n        self.file_client = FileClient(**file_client_args)\n        super(LoadPanopticAnnotations, self).__init__(\n            with_bbox=with_bbox,\n            with_label=with_label,\n            with_mask=with_mask,\n            with_seg=with_seg,\n            with_keypoints=False,\n            box_type=box_type,\n            imdecode_backend=imdecode_backend,\n            file_client_args=file_client_args)\n\n    def _load_masks_and_semantic_segs(self, results: dict) -> None:\n        \"\"\"Private function to load mask and semantic segmentation annotations.\n\n        In gt_semantic_seg, the foreground label is from ``0`` to\n        ``num_things - 1``, the background label is from ``num_things`` to\n        ``num_things + num_stuff - 1``, 255 means the ignored label (``VOID``).\n\n        Args:\n            results (dict): Result dict from :obj:``mmdet.CustomDataset``.\n        \"\"\"\n        # seg_map_path is None, when inference on the dataset without gts.\n        if results.get('seg_map_path', None) is None:\n            return\n\n        img_bytes = self.file_client.get(results['seg_map_path'])\n        pan_png = mmcv.imfrombytes(\n            img_bytes, flag='color', channel_order='rgb').squeeze()\n        pan_png = self.rgb2id(pan_png)\n\n        gt_masks = []\n        gt_seg = np.zeros_like(pan_png) + 255  # 255 as ignore\n\n        for segment_info in results['segments_info']:\n            mask = (pan_png == segment_info['id'])\n            gt_seg = np.where(mask, segment_info['category'], gt_seg)\n\n            # The legal thing masks\n            if segment_info.get('is_thing'):\n                gt_masks.append(mask.astype(np.uint8))\n\n        if self.with_mask:\n            h, w = results['ori_shape']\n            gt_masks = BitmapMasks(gt_masks, h, w)\n            results['gt_masks'] = gt_masks\n\n        if self.with_seg:\n            results['gt_seg_map'] = gt_seg\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Function to load multiple types panoptic annotations.\n\n        Args:\n            results (dict): Result dict from :obj:``mmdet.CustomDataset``.\n\n        Returns:\n            dict: The dict contains loaded bounding box, label, mask and\n                semantic segmentation annotations.\n        \"\"\"\n\n        if self.with_bbox:\n            self._load_bboxes(results)\n        if self.with_label:\n            self._load_labels(results)\n        if self.with_mask or self.with_seg:\n            # The tasks completed by '_load_masks' and '_load_semantic_segs'\n            # in LoadAnnotations are merged to one function.\n            self._load_masks_and_semantic_segs(results)\n\n        return results\n\n\n@TRANSFORMS.register_module()\nclass LoadProposals(BaseTransform):\n    \"\"\"Load proposal pipeline.\n\n    Required Keys:\n\n    - proposals\n\n    Modified Keys:\n\n    - proposals\n\n    Args:\n        num_max_proposals (int, optional): Maximum number of proposals to load.\n            If not specified, all proposals will be loaded.\n    \"\"\"\n\n    def __init__(self, num_max_proposals: Optional[int] = None) -> None:\n        self.num_max_proposals = num_max_proposals\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to load proposals from file.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded proposal annotations.\n        \"\"\"\n\n        proposals = results['proposals']\n        # the type of proposals should be `dict` or `InstanceData`\n        assert isinstance(proposals, dict) \\\n               or isinstance(proposals, BaseDataElement)\n        bboxes = proposals['bboxes'].astype(np.float32)\n        assert bboxes.shape[1] == 4, \\\n            f'Proposals should have shapes (n, 4), but found {bboxes.shape}'\n\n        if 'scores' in proposals:\n            scores = proposals['scores'].astype(np.float32)\n            assert bboxes.shape[0] == scores.shape[0]\n        else:\n            scores = np.zeros(bboxes.shape[0], dtype=np.float32)\n\n        if self.num_max_proposals is not None:\n            # proposals should sort by scores during dumping the proposals\n            bboxes = bboxes[:self.num_max_proposals]\n            scores = scores[:self.num_max_proposals]\n\n        if len(bboxes) == 0:\n            bboxes = np.zeros((0, 4), dtype=np.float32)\n            scores = np.zeros(0, dtype=np.float32)\n\n        results['proposals'] = bboxes\n        results['proposals_scores'] = scores\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n               f'(num_max_proposals={self.num_max_proposals})'\n\n\n@TRANSFORMS.register_module()\nclass FilterAnnotations(BaseTransform):\n    \"\"\"Filter invalid annotations.\n\n    Required Keys:\n\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_ignore_flags (bool) (optional)\n\n    Modified Keys:\n\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_masks (optional)\n    - gt_ignore_flags (optional)\n\n    Args:\n        min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n            boxes. Default: (1., 1.)\n        min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n            Default: 1\n        by_box (bool): Filter instances with bounding boxes not meeting the\n            min_gt_bbox_wh threshold. Default: True\n        by_mask (bool): Filter instances with masks not meeting\n            min_gt_mask_area threshold. Default: False\n        keep_empty (bool): Whether to return None when it\n            becomes an empty bbox after filtering. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 min_gt_bbox_wh: Tuple[int, int] = (1, 1),\n                 min_gt_mask_area: int = 1,\n                 by_box: bool = True,\n                 by_mask: bool = False,\n                 keep_empty: bool = True) -> None:\n        # TODO: add more filter options\n        assert by_box or by_mask\n        self.min_gt_bbox_wh = min_gt_bbox_wh\n        self.min_gt_mask_area = min_gt_mask_area\n        self.by_box = by_box\n        self.by_mask = by_mask\n        self.keep_empty = keep_empty\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> Union[dict, None]:\n        \"\"\"Transform function to filter annotations.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        assert 'gt_bboxes' in results\n        gt_bboxes = results['gt_bboxes']\n        if gt_bboxes.shape[0] == 0:\n            return results\n\n        tests = []\n        if self.by_box:\n            tests.append(\n                ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &\n                 (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())\n        if self.by_mask:\n            assert 'gt_masks' in results\n            gt_masks = results['gt_masks']\n            tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n        keep = tests[0]\n        for t in tests[1:]:\n            keep = keep & t\n\n        if not keep.any():\n            if self.keep_empty:\n                return None\n\n        keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')\n        for key in keys:\n            if key in results:\n                results[key] = results[key][keep]\n\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n               f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \\\n               f'keep_empty={self.keep_empty})'\n\n\n@TRANSFORMS.register_module()\nclass LoadEmptyAnnotations(BaseTransform):\n    \"\"\"Load Empty Annotations for unlabeled images.\n\n    Added Keys:\n    - gt_bboxes (np.float32)\n    - gt_bboxes_labels (np.int64)\n    - gt_masks (BitmapMasks | PolygonMasks)\n    - gt_seg_map (np.uint8)\n    - gt_ignore_flags (bool)\n\n    Args:\n        with_bbox (bool): Whether to load the pseudo bbox annotation.\n            Defaults to True.\n        with_label (bool): Whether to load the pseudo label annotation.\n            Defaults to True.\n        with_mask (bool): Whether to load the pseudo mask annotation.\n             Default: False.\n        with_seg (bool): Whether to load the pseudo semantic segmentation\n            annotation. Defaults to False.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n    \"\"\"\n\n    def __init__(self,\n                 with_bbox: bool = True,\n                 with_label: bool = True,\n                 with_mask: bool = False,\n                 with_seg: bool = False,\n                 seg_ignore_label: int = 255) -> None:\n        self.with_bbox = with_bbox\n        self.with_label = with_label\n        self.with_mask = with_mask\n        self.with_seg = with_seg\n        self.seg_ignore_label = seg_ignore_label\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to load empty annotations.\n\n        Args:\n            results (dict): Result dict.\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        if self.with_bbox:\n            results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32)\n            results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)\n        if self.with_label:\n            results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)\n        if self.with_mask:\n            # TODO: support PolygonMasks\n            h, w = results['img_shape']\n            gt_masks = np.zeros((0, h, w), dtype=np.uint8)\n            results['gt_masks'] = BitmapMasks(gt_masks, h, w)\n        if self.with_seg:\n            h, w = results['img_shape']\n            results['gt_seg_map'] = self.seg_ignore_label * np.ones(\n                (h, w), dtype=np.uint8)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(with_bbox={self.with_bbox}, '\n        repr_str += f'with_label={self.with_label}, '\n        repr_str += f'with_mask={self.with_mask}, '\n        repr_str += f'with_seg={self.with_seg}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass InferencerLoader(BaseTransform):\n    \"\"\"Load an image from ``results['img']``.\n\n    Similar with :obj:`LoadImageFromFile`, but the image has been loaded as\n    :obj:`np.ndarray` in ``results['img']``. Can be used when loading image\n    from webcam.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n    - img_path\n    - img_shape\n    - ori_shape\n\n    Args:\n        to_float32 (bool): Whether to convert the loaded image to a float32\n            numpy array. If set to False, the loaded image is an uint8 array.\n            Defaults to False.\n    \"\"\"\n\n    def __init__(self, **kwargs) -> None:\n        super().__init__()\n        self.from_file = TRANSFORMS.build(\n            dict(type='LoadImageFromFile', **kwargs))\n        self.from_ndarray = TRANSFORMS.build(\n            dict(type='mmdet.LoadImageFromNDArray', **kwargs))\n\n    def transform(self, results: Union[str, np.ndarray, dict]) -> dict:\n        \"\"\"Transform function to add image meta information.\n\n        Args:\n            results (str, np.ndarray or dict): The result.\n\n        Returns:\n            dict: The dict contains loaded image and meta information.\n        \"\"\"\n        if isinstance(results, str):\n            inputs = dict(img_path=results)\n        elif isinstance(results, np.ndarray):\n            inputs = dict(img=results)\n        elif isinstance(results, dict):\n            inputs = results\n        else:\n            raise NotImplementedError\n\n        if 'img' in inputs:\n            return self.from_ndarray(inputs)\n        return self.from_file(inputs)\n"
  },
  {
    "path": "mmdet/datasets/transforms/transforms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport inspect\nimport math\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nimport cv2\nimport mmcv\nimport numpy as np\nfrom mmcv.image.geometric import _scale_size\nfrom mmcv.transforms import BaseTransform\nfrom mmcv.transforms import Pad as MMCV_Pad\nfrom mmcv.transforms import RandomFlip as MMCV_RandomFlip\nfrom mmcv.transforms import Resize as MMCV_Resize\nfrom mmcv.transforms.utils import avoid_cache_randomness, cache_randomness\nfrom mmengine.dataset import BaseDataset\nfrom mmengine.utils import is_str\nfrom numpy import random\n\nfrom mmdet.registry import TRANSFORMS\nfrom mmdet.structures.bbox import HorizontalBoxes, autocast_box_type\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\nfrom mmdet.utils import log_img_scale\n\ntry:\n    from imagecorruptions import corrupt\nexcept ImportError:\n    corrupt = None\n\ntry:\n    import albumentations\n    from albumentations import Compose\nexcept ImportError:\n    albumentations = None\n    Compose = None\n\nNumber = Union[int, float]\n\n\n@TRANSFORMS.register_module()\nclass Resize(MMCV_Resize):\n    \"\"\"Resize images & bbox & seg.\n\n    This transform resizes the input image according to ``scale`` or\n    ``scale_factor``. Bboxes, masks, and seg map are then resized\n    with the same scale factor.\n    if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n    resize.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n\n    Added Keys:\n\n    - scale\n    - scale_factor\n    - keep_ratio\n    - homography_matrix\n\n    Args:\n        scale (int or tuple): Images scales for resizing. Defaults to None\n        scale_factor (float or tuple[float]): Scale factors for resizing.\n            Defaults to None.\n        keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n            image. Defaults to False.\n        clip_object_border (bool): Whether to clip the objects\n            outside the border of the image. In some dataset like MOT17, the gt\n            bboxes are allowed to cross the border of images. Therefore, we\n            don't need to clip the gt bboxes in these cases. Defaults to True.\n        backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n            These two backends generates slightly different results. Defaults\n            to 'cv2'.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def _resize_masks(self, results: dict) -> None:\n        \"\"\"Resize masks with ``results['scale']``\"\"\"\n        if results.get('gt_masks', None) is not None:\n            if self.keep_ratio:\n                results['gt_masks'] = results['gt_masks'].rescale(\n                    results['scale'])\n            else:\n                results['gt_masks'] = results['gt_masks'].resize(\n                    results['img_shape'])\n\n    def _resize_bboxes(self, results: dict) -> None:\n        \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n        if results.get('gt_bboxes', None) is not None:\n            results['gt_bboxes'].rescale_(results['scale_factor'])\n            if self.clip_object_border:\n                results['gt_bboxes'].clip_(results['img_shape'])\n\n    def _resize_seg(self, results: dict) -> None:\n        \"\"\"Resize semantic segmentation map with ``results['scale']``.\"\"\"\n        if results.get('gt_seg_map', None) is not None:\n            if self.keep_ratio:\n                gt_seg = mmcv.imrescale(\n                    results['gt_seg_map'],\n                    results['scale'],\n                    interpolation='nearest',\n                    backend=self.backend)\n            else:\n                gt_seg = mmcv.imresize(\n                    results['gt_seg_map'],\n                    results['scale'],\n                    interpolation='nearest',\n                    backend=self.backend)\n            results['gt_seg_map'] = gt_seg\n\n    def _record_homography_matrix(self, results: dict) -> None:\n        \"\"\"Record the homography matrix for the Resize.\"\"\"\n        w_scale, h_scale = results['scale_factor']\n        homography_matrix = np.array(\n            [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n        if results.get('homography_matrix', None) is None:\n            results['homography_matrix'] = homography_matrix\n        else:\n            results['homography_matrix'] = homography_matrix @ results[\n                'homography_matrix']\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to resize images, bounding boxes and semantic\n        segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n        Returns:\n            dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n            'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n            are updated in result dict.\n        \"\"\"\n        if self.scale:\n            results['scale'] = self.scale\n        else:\n            img_shape = results['img'].shape[:2]\n            results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n        self._resize_img(results)\n        self._resize_bboxes(results)\n        self._resize_masks(results)\n        self._resize_seg(results)\n        self._record_homography_matrix(results)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(scale={self.scale}, '\n        repr_str += f'scale_factor={self.scale_factor}, '\n        repr_str += f'keep_ratio={self.keep_ratio}, '\n        repr_str += f'clip_object_border={self.clip_object_border}), '\n        repr_str += f'backend={self.backend}), '\n        repr_str += f'interpolation={self.interpolation})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass FixShapeResize(Resize):\n    \"\"\"Resize images & bbox & seg to the specified size.\n\n    This transform resizes the input image according to ``width`` and\n    ``height``. Bboxes, masks, and seg map are then resized\n    with the same parameters.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n\n    Added Keys:\n\n    - scale\n    - scale_factor\n    - keep_ratio\n    - homography_matrix\n\n    Args:\n        width (int): width for resizing.\n        height (int): height for resizing.\n            Defaults to None.\n        pad_val (Number | dict[str, Number], optional): Padding value for if\n            the pad_mode is \"constant\".  If it is a single number, the value\n            to pad the image is the number and to pad the semantic\n            segmentation map is 255. If it is a dict, it should have the\n            following keys:\n\n            - img: The value to pad the image.\n            - seg: The value to pad the semantic segmentation map.\n            Defaults to dict(img=0, seg=255).\n        keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n            image. Defaults to False.\n        clip_object_border (bool): Whether to clip the objects\n            outside the border of the image. In some dataset like MOT17, the gt\n            bboxes are allowed to cross the border of images. Therefore, we\n            don't need to clip the gt bboxes in these cases. Defaults to True.\n        backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n            These two backends generates slightly different results. Defaults\n            to 'cv2'.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n            to 'bilinear'.\n    \"\"\"\n\n    def __init__(self,\n                 width: int,\n                 height: int,\n                 pad_val: Union[Number, dict] = dict(img=0, seg=255),\n                 keep_ratio: bool = False,\n                 clip_object_border: bool = True,\n                 backend: str = 'cv2',\n                 interpolation: str = 'bilinear') -> None:\n        assert width is not None and height is not None, (\n            '`width` and'\n            '`height` can not be `None`')\n\n        self.width = width\n        self.height = height\n        self.scale = (width, height)\n\n        self.backend = backend\n        self.interpolation = interpolation\n        self.keep_ratio = keep_ratio\n        self.clip_object_border = clip_object_border\n\n        if keep_ratio is True:\n            # padding to the fixed size when keep_ratio=True\n            self.pad_transform = Pad(size=self.scale, pad_val=pad_val)\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to resize images, bounding boxes and semantic\n        segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n        Returns:\n            dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n            'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n            are updated in result dict.\n        \"\"\"\n        img = results['img']\n        h, w = img.shape[:2]\n        if self.keep_ratio:\n            scale_factor = min(self.width / w, self.height / h)\n            results['scale_factor'] = (scale_factor, scale_factor)\n            real_w, real_h = int(w * float(scale_factor) +\n                                 0.5), int(h * float(scale_factor) + 0.5)\n            img, scale_factor = mmcv.imrescale(\n                results['img'], (real_w, real_h),\n                interpolation=self.interpolation,\n                return_scale=True,\n                backend=self.backend)\n            # the w_scale and h_scale has minor difference\n            # a real fix should be done in the mmcv.imrescale in the future\n            results['img'] = img\n            results['img_shape'] = img.shape[:2]\n            results['keep_ratio'] = self.keep_ratio\n            results['scale'] = (real_w, real_h)\n        else:\n            results['scale'] = (self.width, self.height)\n            results['scale_factor'] = (self.width / w, self.height / h)\n            super()._resize_img(results)\n\n        self._resize_bboxes(results)\n        self._resize_masks(results)\n        self._resize_seg(results)\n        self._record_homography_matrix(results)\n        if self.keep_ratio:\n            self.pad_transform(results)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(width={self.width}, height={self.height}, '\n        repr_str += f'keep_ratio={self.keep_ratio}, '\n        repr_str += f'clip_object_border={self.clip_object_border}), '\n        repr_str += f'backend={self.backend}), '\n        repr_str += f'interpolation={self.interpolation})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass RandomFlip(MMCV_RandomFlip):\n    \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n    flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n    modes:\n\n     - ``prob`` is float, ``direction`` is string: the image will be\n         ``direction``ly flipped with probability of ``prob`` .\n         E.g., ``prob=0.5``, ``direction='horizontal'``,\n         then image will be horizontally flipped with probability of 0.5.\n     - ``prob`` is float, ``direction`` is list of string: the image will\n         be ``direction[i]``ly flipped with probability of\n         ``prob/len(direction)``.\n         E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n         then image will be horizontally flipped with probability of 0.25,\n         vertically with probability of 0.25.\n     - ``prob`` is list of float, ``direction`` is list of string:\n         given ``len(prob) == len(direction)``, the image will\n         be ``direction[i]``ly flipped with probability of ``prob[i]``.\n         E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n         'vertical']``, then image will be horizontally flipped with\n         probability of 0.3, vertically with probability of 0.5.\n\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - flip\n    - flip_direction\n    - homography_matrix\n\n\n    Args:\n         prob (float | list[float], optional): The flipping probability.\n             Defaults to None.\n         direction(str | list[str]): The flipping direction. Options\n             If input is a list, the length must equal ``prob``. Each\n             element in ``prob`` indicates the flip probability of\n             corresponding direction. Defaults to 'horizontal'.\n    \"\"\"\n\n    def _record_homography_matrix(self, results: dict) -> None:\n        \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n        cur_dir = results['flip_direction']\n        h, w = results['img'].shape[:2]\n\n        if cur_dir == 'horizontal':\n            homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n                                         dtype=np.float32)\n        elif cur_dir == 'vertical':\n            homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n                                         dtype=np.float32)\n        elif cur_dir == 'diagonal':\n            homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n                                         dtype=np.float32)\n        else:\n            homography_matrix = np.eye(3, dtype=np.float32)\n\n        if results.get('homography_matrix', None) is None:\n            results['homography_matrix'] = homography_matrix\n        else:\n            results['homography_matrix'] = homography_matrix @ results[\n                'homography_matrix']\n\n    @autocast_box_type()\n    def _flip(self, results: dict) -> None:\n        \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n        # flip image\n        results['img'] = mmcv.imflip(\n            results['img'], direction=results['flip_direction'])\n\n        img_shape = results['img'].shape[:2]\n\n        # flip bboxes\n        if results.get('gt_bboxes', None) is not None:\n            results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n        # flip masks\n        if results.get('gt_masks', None) is not None:\n            results['gt_masks'] = results['gt_masks'].flip(\n                results['flip_direction'])\n\n        # flip segs\n        if results.get('gt_seg_map', None) is not None:\n            results['gt_seg_map'] = mmcv.imflip(\n                results['gt_seg_map'], direction=results['flip_direction'])\n\n        # record homography matrix for flip\n        self._record_homography_matrix(results)\n\n\n@TRANSFORMS.register_module()\nclass RandomShift(BaseTransform):\n    \"\"\"Shift the image and box given shift pixels and probability.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32])\n    - gt_bboxes_labels (np.int64)\n    - gt_ignore_flags (bool) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes\n    - gt_bboxes_labels\n    - gt_ignore_flags (bool) (optional)\n\n    Args:\n        prob (float): Probability of shifts. Defaults to 0.5.\n        max_shift_px (int): The max pixels for shifting. Defaults to 32.\n        filter_thr_px (int): The width and height threshold for filtering.\n            The bbox and the rest of the targets below the width and\n            height threshold will be filtered. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 prob: float = 0.5,\n                 max_shift_px: int = 32,\n                 filter_thr_px: int = 1) -> None:\n        assert 0 <= prob <= 1\n        assert max_shift_px >= 0\n        self.prob = prob\n        self.max_shift_px = max_shift_px\n        self.filter_thr_px = int(filter_thr_px)\n\n    @cache_randomness\n    def _random_prob(self) -> float:\n        return random.uniform(0, 1)\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to random shift images, bounding boxes.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Shift results.\n        \"\"\"\n        if self._random_prob() < self.prob:\n            img_shape = results['img'].shape[:2]\n\n            random_shift_x = random.randint(-self.max_shift_px,\n                                            self.max_shift_px)\n            random_shift_y = random.randint(-self.max_shift_px,\n                                            self.max_shift_px)\n            new_x = max(0, random_shift_x)\n            ori_x = max(0, -random_shift_x)\n            new_y = max(0, random_shift_y)\n            ori_y = max(0, -random_shift_y)\n\n            # TODO: support mask and semantic segmentation maps.\n            bboxes = results['gt_bboxes'].clone()\n            bboxes.translate_([random_shift_x, random_shift_y])\n\n            # clip border\n            bboxes.clip_(img_shape)\n\n            # remove invalid bboxes\n            valid_inds = (bboxes.widths > self.filter_thr_px).numpy() & (\n                bboxes.heights > self.filter_thr_px).numpy()\n            # If the shift does not contain any gt-bbox area, skip this\n            # image.\n            if not valid_inds.any():\n                return results\n            bboxes = bboxes[valid_inds]\n            results['gt_bboxes'] = bboxes\n            results['gt_bboxes_labels'] = results['gt_bboxes_labels'][\n                valid_inds]\n\n            if results.get('gt_ignore_flags', None) is not None:\n                results['gt_ignore_flags'] = \\\n                    results['gt_ignore_flags'][valid_inds]\n\n            # shift img\n            img = results['img']\n            new_img = np.zeros_like(img)\n            img_h, img_w = img.shape[:2]\n            new_h = img_h - np.abs(random_shift_y)\n            new_w = img_w - np.abs(random_shift_x)\n            new_img[new_y:new_y + new_h, new_x:new_x + new_w] \\\n                = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w]\n            results['img'] = new_img\n\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(prob={self.prob}, '\n        repr_str += f'max_shift_px={self.max_shift_px}, '\n        repr_str += f'filter_thr_px={self.filter_thr_px})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass Pad(MMCV_Pad):\n    \"\"\"Pad the image & segmentation map.\n\n    There are three padding modes: (1) pad to a fixed size and (2) pad to the\n    minimum size that is divisible by some number. and (3)pad to square. Also,\n    pad to square and pad to the minimum size can be used as the same time.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_masks\n    - gt_seg_map\n\n    Added Keys:\n\n    - pad_shape\n    - pad_fixed_size\n    - pad_size_divisor\n\n    Args:\n        size (tuple, optional): Fixed padding size.\n            Expected padding shape (width, height). Defaults to None.\n        size_divisor (int, optional): The divisor of padded size. Defaults to\n            None.\n        pad_to_square (bool): Whether to pad the image into a square.\n            Currently only used for YOLOX. Defaults to False.\n        pad_val (Number | dict[str, Number], optional) - Padding value for if\n            the pad_mode is \"constant\".  If it is a single number, the value\n            to pad the image is the number and to pad the semantic\n            segmentation map is 255. If it is a dict, it should have the\n            following keys:\n\n            - img: The value to pad the image.\n            - seg: The value to pad the semantic segmentation map.\n            Defaults to dict(img=0, seg=255).\n        padding_mode (str): Type of padding. Should be: constant, edge,\n            reflect or symmetric. Defaults to 'constant'.\n\n            - constant: pads with a constant value, this value is specified\n              with pad_val.\n            - edge: pads with the last value at the edge of the image.\n            - reflect: pads with reflection of image without repeating the last\n              value on the edge. For example, padding [1, 2, 3, 4] with 2\n              elements on both sides in reflect mode will result in\n              [3, 2, 1, 2, 3, 4, 3, 2].\n            - symmetric: pads with reflection of image repeating the last value\n              on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n              both sides in symmetric mode will result in\n              [2, 1, 1, 2, 3, 4, 4, 3]\n    \"\"\"\n\n    def _pad_masks(self, results: dict) -> None:\n        \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n        if results.get('gt_masks', None) is not None:\n            pad_val = self.pad_val.get('masks', 0)\n            pad_shape = results['pad_shape'][:2]\n            results['gt_masks'] = results['gt_masks'].pad(\n                pad_shape, pad_val=pad_val)\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        self._pad_img(results)\n        self._pad_seg(results)\n        self._pad_masks(results)\n        return results\n\n\n@TRANSFORMS.register_module()\nclass RandomCrop(BaseTransform):\n    \"\"\"Random crop the image & bboxes & masks.\n\n    The absolute ``crop_size`` is sampled based on ``crop_type`` and\n    ``image_size``, then the cropped results are generated.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_masks (optional)\n    - gt_ignore_flags (optional)\n    - gt_seg_map (optional)\n\n    Added Keys:\n\n    - homography_matrix\n\n    Args:\n        crop_size (tuple): The relative ratio or absolute pixels of\n            (width, height).\n        crop_type (str, optional): One of \"relative_range\", \"relative\",\n            \"absolute\", \"absolute_range\". \"relative\" randomly crops\n            (h * crop_size[0], w * crop_size[1]) part from an input of size\n            (h, w). \"relative_range\" uniformly samples relative crop size from\n            range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n            respectively. \"absolute\" crops from an input with absolute size\n            (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n            crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n            in range [crop_size[0], min(w, crop_size[1])].\n            Defaults to \"absolute\".\n        allow_negative_crop (bool, optional): Whether to allow a crop that does\n            not contain any bbox area. Defaults to False.\n        recompute_bbox (bool, optional): Whether to re-compute the boxes based\n            on cropped instance masks. Defaults to False.\n        bbox_clip_border (bool, optional): Whether clip the objects outside\n            the border of the image. Defaults to True.\n\n    Note:\n        - If the image is smaller than the absolute crop size, return the\n            original image.\n        - The keys for bboxes, labels and masks must be aligned. That is,\n          ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n          ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n          ``gt_masks_ignore``.\n        - If the crop does not contain any gt-bbox region and\n          ``allow_negative_crop`` is set to False, skip this image.\n    \"\"\"\n\n    def __init__(self,\n                 crop_size: tuple,\n                 crop_type: str = 'absolute',\n                 allow_negative_crop: bool = False,\n                 recompute_bbox: bool = False,\n                 bbox_clip_border: bool = True) -> None:\n        if crop_type not in [\n                'relative_range', 'relative', 'absolute', 'absolute_range'\n        ]:\n            raise ValueError(f'Invalid crop_type {crop_type}.')\n        if crop_type in ['absolute', 'absolute_range']:\n            assert crop_size[0] > 0 and crop_size[1] > 0\n            assert isinstance(crop_size[0], int) and isinstance(\n                crop_size[1], int)\n            if crop_type == 'absolute_range':\n                assert crop_size[0] <= crop_size[1]\n        else:\n            assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n        self.crop_size = crop_size\n        self.crop_type = crop_type\n        self.allow_negative_crop = allow_negative_crop\n        self.bbox_clip_border = bbox_clip_border\n        self.recompute_bbox = recompute_bbox\n\n    def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n                   allow_negative_crop: bool) -> Union[dict, None]:\n        \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n        segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n            crop_size (Tuple[int, int]): Expected absolute size after\n                cropping, (h, w).\n            allow_negative_crop (bool): Whether to allow a crop that does not\n                contain any bbox area.\n\n        Returns:\n            results (Union[dict, None]): Randomly cropped results, 'img_shape'\n                key in result dict is updated according to crop size. None will\n                be returned when there is no valid bbox after cropping.\n        \"\"\"\n        assert crop_size[0] > 0 and crop_size[1] > 0\n        img = results['img']\n        margin_h = max(img.shape[0] - crop_size[0], 0)\n        margin_w = max(img.shape[1] - crop_size[1], 0)\n        offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n        crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n        crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n        # Record the homography matrix for the RandomCrop\n        homography_matrix = np.array(\n            [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n            dtype=np.float32)\n        if results.get('homography_matrix', None) is None:\n            results['homography_matrix'] = homography_matrix\n        else:\n            results['homography_matrix'] = homography_matrix @ results[\n                'homography_matrix']\n\n        # crop the image\n        img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n        img_shape = img.shape\n        results['img'] = img\n        results['img_shape'] = img_shape\n\n        # crop bboxes accordingly and clip to the image boundary\n        if results.get('gt_bboxes', None) is not None:\n            bboxes = results['gt_bboxes']\n            bboxes.translate_([-offset_w, -offset_h])\n            if self.bbox_clip_border:\n                bboxes.clip_(img_shape[:2])\n            valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n            # If the crop does not contain any gt-bbox area and\n            # allow_negative_crop is False, skip this image.\n            if (not valid_inds.any() and not allow_negative_crop):\n                return None\n\n            results['gt_bboxes'] = bboxes[valid_inds]\n\n            if results.get('gt_ignore_flags', None) is not None:\n                results['gt_ignore_flags'] = \\\n                    results['gt_ignore_flags'][valid_inds]\n\n            if results.get('gt_bboxes_labels', None) is not None:\n                results['gt_bboxes_labels'] = \\\n                    results['gt_bboxes_labels'][valid_inds]\n\n            if results.get('gt_masks', None) is not None:\n                results['gt_masks'] = results['gt_masks'][\n                    valid_inds.nonzero()[0]].crop(\n                        np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n                if self.recompute_bbox:\n                    results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n                        type(results['gt_bboxes']))\n\n        # crop semantic seg\n        if results.get('gt_seg_map', None) is not None:\n            results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n                                                          crop_x1:crop_x2]\n\n        return results\n\n    @cache_randomness\n    def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n        \"\"\"Randomly generate crop offset.\n\n        Args:\n            margin (Tuple[int, int]): The upper bound for the offset generated\n                randomly.\n\n        Returns:\n            Tuple[int, int]: The random offset for the crop.\n        \"\"\"\n        margin_h, margin_w = margin\n        offset_h = np.random.randint(0, margin_h + 1)\n        offset_w = np.random.randint(0, margin_w + 1)\n\n        return offset_h, offset_w\n\n    @cache_randomness\n    def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n        \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n        `image_size`.\n\n        Args:\n            image_size (Tuple[int, int]): (h, w).\n\n        Returns:\n            crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n        \"\"\"\n        h, w = image_size\n        if self.crop_type == 'absolute':\n            return min(self.crop_size[1], h), min(self.crop_size[0], w)\n        elif self.crop_type == 'absolute_range':\n            crop_h = np.random.randint(\n                min(h, self.crop_size[0]),\n                min(h, self.crop_size[1]) + 1)\n            crop_w = np.random.randint(\n                min(w, self.crop_size[0]),\n                min(w, self.crop_size[1]) + 1)\n            return crop_h, crop_w\n        elif self.crop_type == 'relative':\n            crop_w, crop_h = self.crop_size\n            return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n        else:\n            # 'relative_range'\n            crop_size = np.asarray(self.crop_size, dtype=np.float32)\n            crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n            return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> Union[dict, None]:\n        \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n        semantic segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            results (Union[dict, None]): Randomly cropped results, 'img_shape'\n                key in result dict is updated according to crop size. None will\n                be returned when there is no valid bbox after cropping.\n        \"\"\"\n        image_size = results['img'].shape[:2]\n        crop_size = self._get_crop_size(image_size)\n        results = self._crop_data(results, crop_size, self.allow_negative_crop)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(crop_size={self.crop_size}, '\n        repr_str += f'crop_type={self.crop_type}, '\n        repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n        repr_str += f'recompute_bbox={self.recompute_bbox}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass SegRescale(BaseTransform):\n    \"\"\"Rescale semantic segmentation maps.\n\n    This transform rescale the ``gt_seg_map`` according to ``scale_factor``.\n\n    Required Keys:\n\n    - gt_seg_map\n\n    Modified Keys:\n\n    - gt_seg_map\n\n    Args:\n        scale_factor (float): The scale factor of the final output. Defaults\n            to 1.\n        backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.\n            These two backends generates slightly different results. Defaults\n            to 'cv2'.\n    \"\"\"\n\n    def __init__(self, scale_factor: float = 1, backend: str = 'cv2') -> None:\n        self.scale_factor = scale_factor\n        self.backend = backend\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to scale the semantic segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with semantic segmentation map scaled.\n        \"\"\"\n        if self.scale_factor != 1:\n            results['gt_seg_map'] = mmcv.imrescale(\n                results['gt_seg_map'],\n                self.scale_factor,\n                interpolation='nearest',\n                backend=self.backend)\n\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(scale_factor={self.scale_factor}, '\n        repr_str += f'backend={self.backend})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass PhotoMetricDistortion(BaseTransform):\n    \"\"\"Apply photometric distortion to image sequentially, every transformation\n    is applied with a probability of 0.5. The position of random contrast is in\n    second or second to last.\n\n    1. random brightness\n    2. random contrast (mode 0)\n    3. convert color from BGR to HSV\n    4. random saturation\n    5. random hue\n    6. convert color from HSV to BGR\n    7. random contrast (mode 1)\n    8. randomly swap channels\n\n    Required Keys:\n\n    - img (np.uint8)\n\n    Modified Keys:\n\n    - img (np.float32)\n\n    Args:\n        brightness_delta (int): delta of brightness.\n        contrast_range (sequence): range of contrast.\n        saturation_range (sequence): range of saturation.\n        hue_delta (int): delta of hue.\n    \"\"\"\n\n    def __init__(self,\n                 brightness_delta: int = 32,\n                 contrast_range: Sequence[Number] = (0.5, 1.5),\n                 saturation_range: Sequence[Number] = (0.5, 1.5),\n                 hue_delta: int = 18) -> None:\n        self.brightness_delta = brightness_delta\n        self.contrast_lower, self.contrast_upper = contrast_range\n        self.saturation_lower, self.saturation_upper = saturation_range\n        self.hue_delta = hue_delta\n\n    @cache_randomness\n    def _random_flags(self) -> Sequence[Number]:\n        mode = random.randint(2)\n        brightness_flag = random.randint(2)\n        contrast_flag = random.randint(2)\n        saturation_flag = random.randint(2)\n        hue_flag = random.randint(2)\n        swap_flag = random.randint(2)\n        delta_value = random.uniform(-self.brightness_delta,\n                                     self.brightness_delta)\n        alpha_value = random.uniform(self.contrast_lower, self.contrast_upper)\n        saturation_value = random.uniform(self.saturation_lower,\n                                          self.saturation_upper)\n        hue_value = random.uniform(-self.hue_delta, self.hue_delta)\n        swap_value = random.permutation(3)\n\n        return (mode, brightness_flag, contrast_flag, saturation_flag,\n                hue_flag, swap_flag, delta_value, alpha_value,\n                saturation_value, hue_value, swap_value)\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to perform photometric distortion on images.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images distorted.\n        \"\"\"\n        assert 'img' in results, '`img` is not found in results'\n        img = results['img']\n        img = img.astype(np.float32)\n\n        (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag,\n         swap_flag, delta_value, alpha_value, saturation_value, hue_value,\n         swap_value) = self._random_flags()\n\n        # random brightness\n        if brightness_flag:\n            img += delta_value\n\n        # mode == 0 --> do random contrast first\n        # mode == 1 --> do random contrast last\n        if mode == 1:\n            if contrast_flag:\n                img *= alpha_value\n\n        # convert color from BGR to HSV\n        img = mmcv.bgr2hsv(img)\n\n        # random saturation\n        if saturation_flag:\n            img[..., 1] *= saturation_value\n            # For image(type=float32), after convert bgr to hsv by opencv,\n            # valid saturation value range is [0, 1]\n            if saturation_value > 1:\n                img[..., 1] = img[..., 1].clip(0, 1)\n\n        # random hue\n        if hue_flag:\n            img[..., 0] += hue_value\n            img[..., 0][img[..., 0] > 360] -= 360\n            img[..., 0][img[..., 0] < 0] += 360\n\n        # convert color from HSV to BGR\n        img = mmcv.hsv2bgr(img)\n\n        # random contrast\n        if mode == 0:\n            if contrast_flag:\n                img *= alpha_value\n\n        # randomly swap channels\n        if swap_flag:\n            img = img[..., swap_value]\n\n        results['img'] = img\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(brightness_delta={self.brightness_delta}, '\n        repr_str += 'contrast_range='\n        repr_str += f'{(self.contrast_lower, self.contrast_upper)}, '\n        repr_str += 'saturation_range='\n        repr_str += f'{(self.saturation_lower, self.saturation_upper)}, '\n        repr_str += f'hue_delta={self.hue_delta})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass Expand(BaseTransform):\n    \"\"\"Random expand the image & bboxes & masks & segmentation map.\n\n    Randomly place the original image on a canvas of ``ratio`` x original image\n    size filled with mean values. The ratio is in the range of ratio_range.\n\n    Required Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes\n    - gt_masks\n    - gt_seg_map\n\n\n    Args:\n        mean (sequence): mean value of dataset.\n        to_rgb (bool): if need to convert the order of mean to align with RGB.\n        ratio_range (sequence)): range of expand ratio.\n        seg_ignore_label (int): label of ignore segmentation map.\n        prob (float): probability of applying this transformation\n    \"\"\"\n\n    def __init__(self,\n                 mean: Sequence[Number] = (0, 0, 0),\n                 to_rgb: bool = True,\n                 ratio_range: Sequence[Number] = (1, 4),\n                 seg_ignore_label: int = None,\n                 prob: float = 0.5) -> None:\n        self.to_rgb = to_rgb\n        self.ratio_range = ratio_range\n        if to_rgb:\n            self.mean = mean[::-1]\n        else:\n            self.mean = mean\n        self.min_ratio, self.max_ratio = ratio_range\n        self.seg_ignore_label = seg_ignore_label\n        self.prob = prob\n\n    @cache_randomness\n    def _random_prob(self) -> float:\n        return random.uniform(0, 1)\n\n    @cache_randomness\n    def _random_ratio(self) -> float:\n        return random.uniform(self.min_ratio, self.max_ratio)\n\n    @cache_randomness\n    def _random_left_top(self, ratio: float, h: int,\n                         w: int) -> Tuple[int, int]:\n        left = int(random.uniform(0, w * ratio - w))\n        top = int(random.uniform(0, h * ratio - h))\n        return left, top\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to expand images, bounding boxes, masks,\n        segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images, bounding boxes, masks, segmentation\n                map expanded.\n        \"\"\"\n        if self._random_prob() > self.prob:\n            return results\n        assert 'img' in results, '`img` is not found in results'\n        img = results['img']\n        h, w, c = img.shape\n        ratio = self._random_ratio()\n        # speedup expand when meets large image\n        if np.all(self.mean == self.mean[0]):\n            expand_img = np.empty((int(h * ratio), int(w * ratio), c),\n                                  img.dtype)\n            expand_img.fill(self.mean[0])\n        else:\n            expand_img = np.full((int(h * ratio), int(w * ratio), c),\n                                 self.mean,\n                                 dtype=img.dtype)\n        left, top = self._random_left_top(ratio, h, w)\n        expand_img[top:top + h, left:left + w] = img\n        results['img'] = expand_img\n        results['img_shape'] = expand_img.shape[:2]\n\n        # expand bboxes\n        if results.get('gt_bboxes', None) is not None:\n            results['gt_bboxes'].translate_([left, top])\n\n        # expand masks\n        if results.get('gt_masks', None) is not None:\n            results['gt_masks'] = results['gt_masks'].expand(\n                int(h * ratio), int(w * ratio), top, left)\n\n        # expand segmentation map\n        if results.get('gt_seg_map', None) is not None:\n            gt_seg = results['gt_seg_map']\n            expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),\n                                    self.seg_ignore_label,\n                                    dtype=gt_seg.dtype)\n            expand_gt_seg[top:top + h, left:left + w] = gt_seg\n            results['gt_seg_map'] = expand_gt_seg\n\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '\n        repr_str += f'ratio_range={self.ratio_range}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label}, '\n        repr_str += f'prob={self.prob})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass MinIoURandomCrop(BaseTransform):\n    \"\"\"Random crop the image & bboxes & masks & segmentation map, the cropped\n    patches have minimum IoU requirement with original image & bboxes & masks.\n\n    & segmentation map, the IoU threshold is randomly selected from min_ious.\n\n\n    Required Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - gt_seg_map (np.uint8) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes\n    - gt_bboxes_labels\n    - gt_masks\n    - gt_ignore_flags\n    - gt_seg_map\n\n\n    Args:\n        min_ious (Sequence[float]): minimum IoU threshold for all intersections\n            with bounding boxes.\n        min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,\n        where a >= min_crop_size).\n        bbox_clip_border (bool, optional): Whether clip the objects outside\n            the border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9),\n                 min_crop_size: float = 0.3,\n                 bbox_clip_border: bool = True) -> None:\n\n        self.min_ious = min_ious\n        self.sample_mode = (1, *min_ious, 0)\n        self.min_crop_size = min_crop_size\n        self.bbox_clip_border = bbox_clip_border\n\n    @cache_randomness\n    def _random_mode(self) -> Number:\n        return random.choice(self.sample_mode)\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to crop images and bounding boxes with minimum\n        IoU constraint.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images and bounding boxes cropped, \\\n                'img_shape' key is updated.\n        \"\"\"\n        assert 'img' in results, '`img` is not found in results'\n        assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results'\n        img = results['img']\n        boxes = results['gt_bboxes']\n        h, w, c = img.shape\n        while True:\n            mode = self._random_mode()\n            self.mode = mode\n            if mode == 1:\n                return results\n\n            min_iou = self.mode\n            for i in range(50):\n                new_w = random.uniform(self.min_crop_size * w, w)\n                new_h = random.uniform(self.min_crop_size * h, h)\n\n                # h / w in [0.5, 2]\n                if new_h / new_w < 0.5 or new_h / new_w > 2:\n                    continue\n\n                left = random.uniform(w - new_w)\n                top = random.uniform(h - new_h)\n\n                patch = np.array(\n                    (int(left), int(top), int(left + new_w), int(top + new_h)))\n                # Line or point crop is not allowed\n                if patch[2] == patch[0] or patch[3] == patch[1]:\n                    continue\n                overlaps = boxes.overlaps(\n                    HorizontalBoxes(patch.reshape(-1, 4).astype(np.float32)),\n                    boxes).numpy().reshape(-1)\n                if len(overlaps) > 0 and overlaps.min() < min_iou:\n                    continue\n\n                # center of boxes should inside the crop img\n                # only adjust boxes and instance masks when the gt is not empty\n                if len(overlaps) > 0:\n                    # adjust boxes\n                    def is_center_of_bboxes_in_patch(boxes, patch):\n                        centers = boxes.centers.numpy()\n                        mask = ((centers[:, 0] > patch[0]) *\n                                (centers[:, 1] > patch[1]) *\n                                (centers[:, 0] < patch[2]) *\n                                (centers[:, 1] < patch[3]))\n                        return mask\n\n                    mask = is_center_of_bboxes_in_patch(boxes, patch)\n                    if not mask.any():\n                        continue\n                    if results.get('gt_bboxes', None) is not None:\n                        boxes = results['gt_bboxes']\n                        mask = is_center_of_bboxes_in_patch(boxes, patch)\n                        boxes = boxes[mask]\n                        boxes.translate_([-patch[0], -patch[1]])\n                        if self.bbox_clip_border:\n                            boxes.clip_(\n                                [patch[3] - patch[1], patch[2] - patch[0]])\n                        results['gt_bboxes'] = boxes\n\n                        # ignore_flags\n                        if results.get('gt_ignore_flags', None) is not None:\n                            results['gt_ignore_flags'] = \\\n                                results['gt_ignore_flags'][mask]\n\n                        # labels\n                        if results.get('gt_bboxes_labels', None) is not None:\n                            results['gt_bboxes_labels'] = results[\n                                'gt_bboxes_labels'][mask]\n\n                        # mask fields\n                        if results.get('gt_masks', None) is not None:\n                            results['gt_masks'] = results['gt_masks'][\n                                mask.nonzero()[0]].crop(patch)\n                # adjust the img no matter whether the gt is empty before crop\n                img = img[patch[1]:patch[3], patch[0]:patch[2]]\n                results['img'] = img\n                results['img_shape'] = img.shape[:2]\n\n                # seg fields\n                if results.get('gt_seg_map', None) is not None:\n                    results['gt_seg_map'] = results['gt_seg_map'][\n                        patch[1]:patch[3], patch[0]:patch[2]]\n                return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(min_ious={self.min_ious}, '\n        repr_str += f'min_crop_size={self.min_crop_size}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass Corrupt(BaseTransform):\n    \"\"\"Corruption augmentation.\n\n    Corruption transforms implemented based on\n    `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.\n\n    Required Keys:\n\n    - img (np.uint8)\n\n\n    Modified Keys:\n\n    - img (np.uint8)\n\n\n    Args:\n        corruption (str): Corruption name.\n        severity (int): The severity of corruption. Defaults to 1.\n    \"\"\"\n\n    def __init__(self, corruption: str, severity: int = 1) -> None:\n        self.corruption = corruption\n        self.severity = severity\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Call function to corrupt image.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images corrupted.\n        \"\"\"\n\n        if corrupt is None:\n            raise RuntimeError('imagecorruptions is not installed')\n        results['img'] = corrupt(\n            results['img'].astype(np.uint8),\n            corruption_name=self.corruption,\n            severity=self.severity)\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(corruption={self.corruption}, '\n        repr_str += f'severity={self.severity})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\n@avoid_cache_randomness\nclass Albu(BaseTransform):\n    \"\"\"Albumentation augmentation.\n\n    Adds custom transformations from Albumentations library.\n    Please, visit `https://albumentations.readthedocs.io`\n    to get more information.\n\n    Required Keys:\n\n    - img (np.uint8)\n    - gt_bboxes (HorizontalBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n\n    Modified Keys:\n\n    - img (np.uint8)\n    - gt_bboxes (HorizontalBoxes[torch.float32]) (optional)\n    - gt_masks (BitmapMasks | PolygonMasks) (optional)\n    - img_shape (tuple)\n\n    An example of ``transforms`` is as followed:\n\n    .. code-block::\n\n        [\n            dict(\n                type='ShiftScaleRotate',\n                shift_limit=0.0625,\n                scale_limit=0.0,\n                rotate_limit=0,\n                interpolation=1,\n                p=0.5),\n            dict(\n                type='RandomBrightnessContrast',\n                brightness_limit=[0.1, 0.3],\n                contrast_limit=[0.1, 0.3],\n                p=0.2),\n            dict(type='ChannelShuffle', p=0.1),\n            dict(\n                type='OneOf',\n                transforms=[\n                    dict(type='Blur', blur_limit=3, p=1.0),\n                    dict(type='MedianBlur', blur_limit=3, p=1.0)\n                ],\n                p=0.1),\n        ]\n\n    Args:\n        transforms (list[dict]): A list of albu transformations\n        bbox_params (dict, optional): Bbox_params for albumentation `Compose`\n        keymap (dict, optional): Contains\n            {'input key':'albumentation-style key'}\n        skip_img_without_anno (bool): Whether to skip the image if no ann left\n            after aug. Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 transforms: List[dict],\n                 bbox_params: Optional[dict] = None,\n                 keymap: Optional[dict] = None,\n                 skip_img_without_anno: bool = False) -> None:\n        if Compose is None:\n            raise RuntimeError('albumentations is not installed')\n\n        # Args will be modified later, copying it will be safer\n        transforms = copy.deepcopy(transforms)\n        if bbox_params is not None:\n            bbox_params = copy.deepcopy(bbox_params)\n        if keymap is not None:\n            keymap = copy.deepcopy(keymap)\n        self.transforms = transforms\n        self.filter_lost_elements = False\n        self.skip_img_without_anno = skip_img_without_anno\n\n        # A simple workaround to remove masks without boxes\n        if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params\n                and 'filter_lost_elements' in bbox_params):\n            self.filter_lost_elements = True\n            self.origin_label_fields = bbox_params['label_fields']\n            bbox_params['label_fields'] = ['idx_mapper']\n            del bbox_params['filter_lost_elements']\n\n        self.bbox_params = (\n            self.albu_builder(bbox_params) if bbox_params else None)\n        self.aug = Compose([self.albu_builder(t) for t in self.transforms],\n                           bbox_params=self.bbox_params)\n\n        if not keymap:\n            self.keymap_to_albu = {\n                'img': 'image',\n                'gt_masks': 'masks',\n                'gt_bboxes': 'bboxes'\n            }\n        else:\n            self.keymap_to_albu = keymap\n        self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}\n\n    def albu_builder(self, cfg: dict) -> albumentations:\n        \"\"\"Import a module from albumentations.\n\n        It inherits some of :func:`build_from_cfg` logic.\n\n        Args:\n            cfg (dict): Config dict. It should at least contain the key \"type\".\n\n        Returns:\n            obj: The constructed object.\n        \"\"\"\n\n        assert isinstance(cfg, dict) and 'type' in cfg\n        args = cfg.copy()\n        obj_type = args.pop('type')\n        if is_str(obj_type):\n            if albumentations is None:\n                raise RuntimeError('albumentations is not installed')\n            obj_cls = getattr(albumentations, obj_type)\n        elif inspect.isclass(obj_type):\n            obj_cls = obj_type\n        else:\n            raise TypeError(\n                f'type must be a str or valid type, but got {type(obj_type)}')\n\n        if 'transforms' in args:\n            args['transforms'] = [\n                self.albu_builder(transform)\n                for transform in args['transforms']\n            ]\n\n        return obj_cls(**args)\n\n    @staticmethod\n    def mapper(d: dict, keymap: dict) -> dict:\n        \"\"\"Dictionary mapper. Renames keys according to keymap provided.\n\n        Args:\n            d (dict): old dict\n            keymap (dict): {'old_key':'new_key'}\n        Returns:\n            dict: new dict.\n        \"\"\"\n        updated_dict = {}\n        for k, v in zip(d.keys(), d.values()):\n            new_k = keymap.get(k, k)\n            updated_dict[new_k] = d[k]\n        return updated_dict\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> Union[dict, None]:\n        \"\"\"Transform function of Albu.\"\"\"\n        # TODO: gt_seg_map is not currently supported\n        # dict to albumentations format\n        results = self.mapper(results, self.keymap_to_albu)\n        results, ori_masks = self._preprocess_results(results)\n        results = self.aug(**results)\n        results = self._postprocess_results(results, ori_masks)\n        if results is None:\n            return None\n        # back to the original format\n        results = self.mapper(results, self.keymap_back)\n        results['img_shape'] = results['img'].shape\n        return results\n\n    def _preprocess_results(self, results: dict) -> tuple:\n        \"\"\"Pre-processing results to facilitate the use of Albu.\"\"\"\n        if 'bboxes' in results:\n            # to list of boxes\n            if not isinstance(results['bboxes'], HorizontalBoxes):\n                raise NotImplementedError(\n                    'Albu only supports horizontal boxes now')\n            bboxes = results['bboxes'].numpy()\n            results['bboxes'] = [x for x in bboxes]\n            # add pseudo-field for filtration\n            if self.filter_lost_elements:\n                results['idx_mapper'] = np.arange(len(results['bboxes']))\n\n        # TODO: Support mask structure in albu\n        ori_masks = None\n        if 'masks' in results:\n            if isinstance(results['masks'], PolygonMasks):\n                raise NotImplementedError(\n                    'Albu only supports BitMap masks now')\n            ori_masks = results['masks']\n            if albumentations.__version__ < '0.5':\n                results['masks'] = results['masks'].masks\n            else:\n                results['masks'] = [mask for mask in results['masks'].masks]\n\n        return results, ori_masks\n\n    def _postprocess_results(\n            self,\n            results: dict,\n            ori_masks: Optional[Union[BitmapMasks,\n                                      PolygonMasks]] = None) -> dict:\n        \"\"\"Post-processing Albu output.\"\"\"\n        # albumentations may return np.array or list on different versions\n        if 'gt_bboxes_labels' in results and isinstance(\n                results['gt_bboxes_labels'], list):\n            results['gt_bboxes_labels'] = np.array(\n                results['gt_bboxes_labels'], dtype=np.int64)\n        if 'gt_ignore_flags' in results and isinstance(\n                results['gt_ignore_flags'], list):\n            results['gt_ignore_flags'] = np.array(\n                results['gt_ignore_flags'], dtype=bool)\n\n        if 'bboxes' in results:\n            if isinstance(results['bboxes'], list):\n                results['bboxes'] = np.array(\n                    results['bboxes'], dtype=np.float32)\n            results['bboxes'] = results['bboxes'].reshape(-1, 4)\n            results['bboxes'] = HorizontalBoxes(results['bboxes'])\n\n            # filter label_fields\n            if self.filter_lost_elements:\n\n                for label in self.origin_label_fields:\n                    results[label] = np.array(\n                        [results[label][i] for i in results['idx_mapper']])\n                if 'masks' in results:\n                    assert ori_masks is not None\n                    results['masks'] = np.array(\n                        [results['masks'][i] for i in results['idx_mapper']])\n                    results['masks'] = ori_masks.__class__(\n                        results['masks'], results['image'].shape[0],\n                        results['image'].shape[1])\n\n                if (not len(results['idx_mapper'])\n                        and self.skip_img_without_anno):\n                    return None\n            elif 'masks' in results:\n                results['masks'] = ori_masks.__class__(\n                    results['masks'], results['image'].shape[0],\n                    results['image'].shape[1])\n\n        return results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\n@avoid_cache_randomness\nclass RandomCenterCropPad(BaseTransform):\n    \"\"\"Random center crop and random around padding for CornerNet.\n\n    This operation generates randomly cropped image from the original image and\n    pads it simultaneously. Different from :class:`RandomCrop`, the output\n    shape may not equal to ``crop_size`` strictly. We choose a random value\n    from ``ratios`` and the output shape could be larger or smaller than\n    ``crop_size``. The padding operation is also different from :class:`Pad`,\n    here we use around padding instead of right-bottom padding.\n\n    The relation between output image (padding image) and original image:\n\n    .. code:: text\n\n                        output image\n\n               +----------------------------+\n               |          padded area       |\n        +------|----------------------------|----------+\n        |      |         cropped area       |          |\n        |      |         +---------------+  |          |\n        |      |         |    .   center |  |          | original image\n        |      |         |        range  |  |          |\n        |      |         +---------------+  |          |\n        +------|----------------------------|----------+\n               |          padded area       |\n               +----------------------------+\n\n    There are 5 main areas in the figure:\n\n    - output image: output image of this operation, also called padding\n      image in following instruction.\n    - original image: input image of this operation.\n    - padded area: non-intersect area of output image and original image.\n    - cropped area: the overlap of output image and original image.\n    - center range: a smaller area where random center chosen from.\n      center range is computed by ``border`` and original image's shape\n      to avoid our random center is too close to original image's border.\n\n    Also this operation act differently in train and test mode, the summary\n    pipeline is listed below.\n\n    Train pipeline:\n\n    1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image\n       will be ``random_ratio * crop_size``.\n    2. Choose a ``random_center`` in center range.\n    3. Generate padding image with center matches the ``random_center``.\n    4. Initialize the padding image with pixel value equals to ``mean``.\n    5. Copy the cropped area to padding image.\n    6. Refine annotations.\n\n    Test pipeline:\n\n    1. Compute output shape according to ``test_pad_mode``.\n    2. Generate padding image with center matches the original image\n       center.\n    3. Initialize the padding image with pixel value equals to ``mean``.\n    4. Copy the ``cropped area`` to padding image.\n\n    Required Keys:\n\n    - img (np.float32)\n    - img_shape (tuple)\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n\n    Modified Keys:\n\n    - img (np.float32)\n    - img_shape (tuple)\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n\n    Args:\n        crop_size (tuple, optional): expected size after crop, final size will\n            computed according to ratio. Requires  (width, height)\n            in train mode, and None in test mode.\n        ratios (tuple, optional): random select a ratio from tuple and crop\n            image to (crop_size[0] * ratio) * (crop_size[1] * ratio).\n            Only available in train mode. Defaults to (0.9, 1.0, 1.1).\n        border (int, optional): max distance from center select area to image\n            border. Only available in train mode. Defaults to 128.\n        mean (sequence, optional): Mean values of 3 channels.\n        std (sequence, optional): Std values of 3 channels.\n        to_rgb (bool, optional): Whether to convert the image from BGR to RGB.\n        test_mode (bool): whether involve random variables in transform.\n            In train mode, crop_size is fixed, center coords and ratio is\n            random selected from predefined lists. In test mode, crop_size\n            is image's original shape, center coords and ratio is fixed.\n            Defaults to False.\n        test_pad_mode (tuple, optional): padding method and padding shape\n            value, only available in test mode. Default is using\n            'logical_or' with 127 as padding shape value.\n\n            - 'logical_or': final_shape = input_shape | padding_shape_value\n            - 'size_divisor': final_shape = int(\n              ceil(input_shape / padding_shape_value) * padding_shape_value)\n\n            Defaults to ('logical_or', 127).\n        test_pad_add_pix (int): Extra padding pixel in test mode.\n            Defaults to 0.\n        bbox_clip_border (bool): Whether clip the objects outside\n            the border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 crop_size: Optional[tuple] = None,\n                 ratios: Optional[tuple] = (0.9, 1.0, 1.1),\n                 border: Optional[int] = 128,\n                 mean: Optional[Sequence] = None,\n                 std: Optional[Sequence] = None,\n                 to_rgb: Optional[bool] = None,\n                 test_mode: bool = False,\n                 test_pad_mode: Optional[tuple] = ('logical_or', 127),\n                 test_pad_add_pix: int = 0,\n                 bbox_clip_border: bool = True) -> None:\n        if test_mode:\n            assert crop_size is None, 'crop_size must be None in test mode'\n            assert ratios is None, 'ratios must be None in test mode'\n            assert border is None, 'border must be None in test mode'\n            assert isinstance(test_pad_mode, (list, tuple))\n            assert test_pad_mode[0] in ['logical_or', 'size_divisor']\n        else:\n            assert isinstance(crop_size, (list, tuple))\n            assert crop_size[0] > 0 and crop_size[1] > 0, (\n                'crop_size must > 0 in train mode')\n            assert isinstance(ratios, (list, tuple))\n            assert test_pad_mode is None, (\n                'test_pad_mode must be None in train mode')\n\n        self.crop_size = crop_size\n        self.ratios = ratios\n        self.border = border\n        # We do not set default value to mean, std and to_rgb because these\n        # hyper-parameters are easy to forget but could affect the performance.\n        # Please use the same setting as Normalize for performance assurance.\n        assert mean is not None and std is not None and to_rgb is not None\n        self.to_rgb = to_rgb\n        self.input_mean = mean\n        self.input_std = std\n        if to_rgb:\n            self.mean = mean[::-1]\n            self.std = std[::-1]\n        else:\n            self.mean = mean\n            self.std = std\n        self.test_mode = test_mode\n        self.test_pad_mode = test_pad_mode\n        self.test_pad_add_pix = test_pad_add_pix\n        self.bbox_clip_border = bbox_clip_border\n\n    def _get_border(self, border, size):\n        \"\"\"Get final border for the target size.\n\n        This function generates a ``final_border`` according to image's shape.\n        The area between ``final_border`` and ``size - final_border`` is the\n        ``center range``. We randomly choose center from the ``center range``\n        to avoid our random center is too close to original image's border.\n        Also ``center range`` should be larger than 0.\n\n        Args:\n            border (int): The initial border, default is 128.\n            size (int): The width or height of original image.\n        Returns:\n            int: The final border.\n        \"\"\"\n        k = 2 * border / size\n        i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))\n        return border // i\n\n    def _filter_boxes(self, patch, boxes):\n        \"\"\"Check whether the center of each box is in the patch.\n\n        Args:\n            patch (list[int]): The cropped area, [left, top, right, bottom].\n            boxes (numpy array, (N x 4)): Ground truth boxes.\n\n        Returns:\n            mask (numpy array, (N,)): Each box is inside or outside the patch.\n        \"\"\"\n        center = boxes.centers.numpy()\n        mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (\n            center[:, 0] < patch[2]) * (\n                center[:, 1] < patch[3])\n        return mask\n\n    def _crop_image_and_paste(self, image, center, size):\n        \"\"\"Crop image with a given center and size, then paste the cropped\n        image to a blank image with two centers align.\n\n        This function is equivalent to generating a blank image with ``size``\n        as its shape. Then cover it on the original image with two centers (\n        the center of blank image and the random center of original image)\n        aligned. The overlap area is paste from the original image and the\n        outside area is filled with ``mean pixel``.\n\n        Args:\n            image (np array, H x W x C): Original image.\n            center (list[int]): Target crop center coord.\n            size (list[int]): Target crop size. [target_h, target_w]\n\n        Returns:\n            cropped_img (np array, target_h x target_w x C): Cropped image.\n            border (np array, 4): The distance of four border of\n                ``cropped_img`` to the original image area, [top, bottom,\n                left, right]\n            patch (list[int]): The cropped area, [left, top, right, bottom].\n        \"\"\"\n        center_y, center_x = center\n        target_h, target_w = size\n        img_h, img_w, img_c = image.shape\n\n        x0 = max(0, center_x - target_w // 2)\n        x1 = min(center_x + target_w // 2, img_w)\n        y0 = max(0, center_y - target_h // 2)\n        y1 = min(center_y + target_h // 2, img_h)\n        patch = np.array((int(x0), int(y0), int(x1), int(y1)))\n\n        left, right = center_x - x0, x1 - center_x\n        top, bottom = center_y - y0, y1 - center_y\n\n        cropped_center_y, cropped_center_x = target_h // 2, target_w // 2\n        cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)\n        for i in range(img_c):\n            cropped_img[:, :, i] += self.mean[i]\n        y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)\n        x_slice = slice(cropped_center_x - left, cropped_center_x + right)\n        cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]\n\n        border = np.array([\n            cropped_center_y - top, cropped_center_y + bottom,\n            cropped_center_x - left, cropped_center_x + right\n        ],\n                          dtype=np.float32)\n\n        return cropped_img, border, patch\n\n    def _train_aug(self, results):\n        \"\"\"Random crop and around padding the original image.\n\n        Args:\n            results (dict): Image infomations in the augment pipeline.\n\n        Returns:\n            results (dict): The updated dict.\n        \"\"\"\n        img = results['img']\n        h, w, c = img.shape\n        gt_bboxes = results['gt_bboxes']\n        while True:\n            scale = random.choice(self.ratios)\n            new_h = int(self.crop_size[1] * scale)\n            new_w = int(self.crop_size[0] * scale)\n            h_border = self._get_border(self.border, h)\n            w_border = self._get_border(self.border, w)\n\n            for i in range(50):\n                center_x = random.randint(low=w_border, high=w - w_border)\n                center_y = random.randint(low=h_border, high=h - h_border)\n\n                cropped_img, border, patch = self._crop_image_and_paste(\n                    img, [center_y, center_x], [new_h, new_w])\n\n                if len(gt_bboxes) == 0:\n                    results['img'] = cropped_img\n                    results['img_shape'] = cropped_img.shape\n                    return results\n\n                # if image do not have valid bbox, any crop patch is valid.\n                mask = self._filter_boxes(patch, gt_bboxes)\n                if not mask.any():\n                    continue\n\n                results['img'] = cropped_img\n                results['img_shape'] = cropped_img.shape\n\n                x0, y0, x1, y1 = patch\n\n                left_w, top_h = center_x - x0, center_y - y0\n                cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n                # crop bboxes accordingly and clip to the image boundary\n                gt_bboxes = gt_bboxes[mask]\n                gt_bboxes.translate_([\n                    cropped_center_x - left_w - x0,\n                    cropped_center_y - top_h - y0\n                ])\n                if self.bbox_clip_border:\n                    gt_bboxes.clip_([new_h, new_w])\n                keep = gt_bboxes.is_inside([new_h, new_w]).numpy()\n                gt_bboxes = gt_bboxes[keep]\n\n                results['gt_bboxes'] = gt_bboxes\n\n                # ignore_flags\n                if results.get('gt_ignore_flags', None) is not None:\n                    gt_ignore_flags = results['gt_ignore_flags'][mask]\n                    results['gt_ignore_flags'] = \\\n                        gt_ignore_flags[keep]\n\n                # labels\n                if results.get('gt_bboxes_labels', None) is not None:\n                    gt_labels = results['gt_bboxes_labels'][mask]\n                    results['gt_bboxes_labels'] = gt_labels[keep]\n\n                if 'gt_masks' in results or 'gt_seg_map' in results:\n                    raise NotImplementedError(\n                        'RandomCenterCropPad only supports bbox.')\n\n                return results\n\n    def _test_aug(self, results):\n        \"\"\"Around padding the original image without cropping.\n\n        The padding mode and value are from ``test_pad_mode``.\n\n        Args:\n            results (dict): Image infomations in the augment pipeline.\n\n        Returns:\n            results (dict): The updated dict.\n        \"\"\"\n        img = results['img']\n        h, w, c = img.shape\n        if self.test_pad_mode[0] in ['logical_or']:\n            # self.test_pad_add_pix is only used for centernet\n            target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix\n            target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix\n        elif self.test_pad_mode[0] in ['size_divisor']:\n            divisor = self.test_pad_mode[1]\n            target_h = int(np.ceil(h / divisor)) * divisor\n            target_w = int(np.ceil(w / divisor)) * divisor\n        else:\n            raise NotImplementedError(\n                'RandomCenterCropPad only support two testing pad mode:'\n                'logical-or and size_divisor.')\n\n        cropped_img, border, _ = self._crop_image_and_paste(\n            img, [h // 2, w // 2], [target_h, target_w])\n        results['img'] = cropped_img\n        results['img_shape'] = cropped_img.shape\n        results['border'] = border\n        return results\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        img = results['img']\n        assert img.dtype == np.float32, (\n            'RandomCenterCropPad needs the input image of dtype np.float32,'\n            ' please set \"to_float32=True\" in \"LoadImageFromFile\" pipeline')\n        h, w, c = img.shape\n        assert c == len(self.mean)\n        if self.test_mode:\n            return self._test_aug(results)\n        else:\n            return self._train_aug(results)\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(crop_size={self.crop_size}, '\n        repr_str += f'ratios={self.ratios}, '\n        repr_str += f'border={self.border}, '\n        repr_str += f'mean={self.input_mean}, '\n        repr_str += f'std={self.input_std}, '\n        repr_str += f'to_rgb={self.to_rgb}, '\n        repr_str += f'test_mode={self.test_mode}, '\n        repr_str += f'test_pad_mode={self.test_pad_mode}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass CutOut(BaseTransform):\n    \"\"\"CutOut operation.\n\n    Randomly drop some regions of image used in\n    `Cutout <https://arxiv.org/abs/1708.04552>`_.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        n_holes (int or tuple[int, int]): Number of regions to be dropped.\n            If it is given as a list, number of holes will be randomly\n            selected from the closed interval [``n_holes[0]``, ``n_holes[1]``].\n        cutout_shape (tuple[int, int] or list[tuple[int, int]], optional):\n            The candidate shape of dropped regions. It can be\n            ``tuple[int, int]`` to use a fixed cutout shape, or\n            ``list[tuple[int, int]]`` to randomly choose shape\n            from the list. Defaults to None.\n        cutout_ratio (tuple[float, float] or list[tuple[float, float]],\n            optional): The candidate ratio of dropped regions. It can be\n            ``tuple[float, float]`` to use a fixed ratio or\n            ``list[tuple[float, float]]`` to randomly choose ratio\n            from the list. Please note that ``cutout_shape`` and\n            ``cutout_ratio`` cannot be both given at the same time.\n            Defaults to None.\n        fill_in (tuple[float, float, float] or tuple[int, int, int]): The value\n            of pixel to fill in the dropped regions. Defaults to (0, 0, 0).\n    \"\"\"\n\n    def __init__(\n        self,\n        n_holes: Union[int, Tuple[int, int]],\n        cutout_shape: Optional[Union[Tuple[int, int],\n                                     List[Tuple[int, int]]]] = None,\n        cutout_ratio: Optional[Union[Tuple[float, float],\n                                     List[Tuple[float, float]]]] = None,\n        fill_in: Union[Tuple[float, float, float], Tuple[int, int,\n                                                         int]] = (0, 0, 0)\n    ) -> None:\n\n        assert (cutout_shape is None) ^ (cutout_ratio is None), \\\n            'Either cutout_shape or cutout_ratio should be specified.'\n        assert (isinstance(cutout_shape, (list, tuple))\n                or isinstance(cutout_ratio, (list, tuple)))\n        if isinstance(n_holes, tuple):\n            assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]\n        else:\n            n_holes = (n_holes, n_holes)\n        self.n_holes = n_holes\n        self.fill_in = fill_in\n        self.with_ratio = cutout_ratio is not None\n        self.candidates = cutout_ratio if self.with_ratio else cutout_shape\n        if not isinstance(self.candidates, list):\n            self.candidates = [self.candidates]\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Call function to drop some regions of image.\"\"\"\n        h, w, c = results['img'].shape\n        n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)\n        for _ in range(n_holes):\n            x1 = np.random.randint(0, w)\n            y1 = np.random.randint(0, h)\n            index = np.random.randint(0, len(self.candidates))\n            if not self.with_ratio:\n                cutout_w, cutout_h = self.candidates[index]\n            else:\n                cutout_w = int(self.candidates[index][0] * w)\n                cutout_h = int(self.candidates[index][1] * h)\n\n            x2 = np.clip(x1 + cutout_w, 0, w)\n            y2 = np.clip(y1 + cutout_h, 0, h)\n            results['img'][y1:y2, x1:x2, :] = self.fill_in\n\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(n_holes={self.n_holes}, '\n        repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio\n                     else f'cutout_shape={self.candidates}, ')\n        repr_str += f'fill_in={self.fill_in})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass Mosaic(BaseTransform):\n    \"\"\"Mosaic augmentation.\n\n    Given 4 images, mosaic transform combines them into\n    one output image. The output image is composed of the parts from each sub-\n    image.\n\n    .. code:: text\n\n                        mosaic transform\n                           center_x\n                +------------------------------+\n                |       pad        |  pad      |\n                |      +-----------+           |\n                |      |           |           |\n                |      |  image1   |--------+  |\n                |      |           |        |  |\n                |      |           | image2 |  |\n     center_y   |----+-------------+-----------|\n                |    |   cropped   |           |\n                |pad |   image3    |  image4   |\n                |    |             |           |\n                +----|-------------+-----------+\n                     |             |\n                     +-------------+\n\n     The mosaic transform steps are as follows:\n\n         1. Choose the mosaic center as the intersections of 4 images\n         2. Get the left top image according to the index, and randomly\n            sample another 3 images from the custom dataset.\n         3. Sub image will be cropped if image is larger than mosaic patch\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - mix_results (List[dict])\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n\n    Args:\n        img_scale (Sequence[int]): Image size after mosaic pipeline of single\n            image. The shape order should be (width, height).\n            Defaults to (640, 640).\n        center_ratio_range (Sequence[float]): Center ratio range of mosaic\n            output. Defaults to (0.5, 1.5).\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        pad_val (int): Pad value. Defaults to 114.\n        prob (float): Probability of applying this transformation.\n            Defaults to 1.0.\n    \"\"\"\n\n    def __init__(self,\n                 img_scale: Tuple[int, int] = (640, 640),\n                 center_ratio_range: Tuple[float, float] = (0.5, 1.5),\n                 bbox_clip_border: bool = True,\n                 pad_val: float = 114.0,\n                 prob: float = 1.0) -> None:\n        assert isinstance(img_scale, tuple)\n        assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n                                 f'got {prob}.'\n\n        log_img_scale(img_scale, skip_square=True, shape_order='wh')\n        self.img_scale = img_scale\n        self.center_ratio_range = center_ratio_range\n        self.bbox_clip_border = bbox_clip_border\n        self.pad_val = pad_val\n        self.prob = prob\n\n    @cache_randomness\n    def get_indexes(self, dataset: BaseDataset) -> int:\n        \"\"\"Call function to collect indexes.\n\n        Args:\n            dataset (:obj:`MultiImageMixDataset`): The dataset.\n\n        Returns:\n            list: indexes.\n        \"\"\"\n\n        indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n        return indexes\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Mosaic transform function.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        if random.uniform(0, 1) > self.prob:\n            return results\n\n        assert 'mix_results' in results\n        mosaic_bboxes = []\n        mosaic_bboxes_labels = []\n        mosaic_ignore_flags = []\n        if len(results['img'].shape) == 3:\n            mosaic_img = np.full(\n                (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n                self.pad_val,\n                dtype=results['img'].dtype)\n        else:\n            mosaic_img = np.full(\n                (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n                self.pad_val,\n                dtype=results['img'].dtype)\n\n        # mosaic center x, y\n        center_x = int(\n            random.uniform(*self.center_ratio_range) * self.img_scale[0])\n        center_y = int(\n            random.uniform(*self.center_ratio_range) * self.img_scale[1])\n        center_position = (center_x, center_y)\n\n        loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n        for i, loc in enumerate(loc_strs):\n            if loc == 'top_left':\n                results_patch = copy.deepcopy(results)\n            else:\n                results_patch = copy.deepcopy(results['mix_results'][i - 1])\n\n            img_i = results_patch['img']\n            h_i, w_i = img_i.shape[:2]\n            # keep_ratio resize\n            scale_ratio_i = min(self.img_scale[1] / h_i,\n                                self.img_scale[0] / w_i)\n            img_i = mmcv.imresize(\n                img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n            # compute the combine parameters\n            paste_coord, crop_coord = self._mosaic_combine(\n                loc, center_position, img_i.shape[:2][::-1])\n            x1_p, y1_p, x2_p, y2_p = paste_coord\n            x1_c, y1_c, x2_c, y2_c = crop_coord\n\n            # crop and paste image\n            mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n            # adjust coordinate\n            gt_bboxes_i = results_patch['gt_bboxes']\n            gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n            gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n            padw = x1_p - x1_c\n            padh = y1_p - y1_c\n            gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n            gt_bboxes_i.translate_([padw, padh])\n            mosaic_bboxes.append(gt_bboxes_i)\n            mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n            mosaic_ignore_flags.append(gt_ignore_flags_i)\n\n        mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n        mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n        mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n        if self.bbox_clip_border:\n            mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n        # remove outside bboxes\n        inside_inds = mosaic_bboxes.is_inside(\n            [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n        mosaic_bboxes = mosaic_bboxes[inside_inds]\n        mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n        mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n        results['img'] = mosaic_img\n        results['img_shape'] = mosaic_img.shape\n        results['gt_bboxes'] = mosaic_bboxes\n        results['gt_bboxes_labels'] = mosaic_bboxes_labels\n        results['gt_ignore_flags'] = mosaic_ignore_flags\n        return results\n\n    def _mosaic_combine(\n            self, loc: str, center_position_xy: Sequence[float],\n            img_shape_wh: Sequence[int]) -> Tuple[Tuple[int], Tuple[int]]:\n        \"\"\"Calculate global coordinate of mosaic image and local coordinate of\n        cropped sub-image.\n\n        Args:\n            loc (str): Index for the sub-image, loc in ('top_left',\n              'top_right', 'bottom_left', 'bottom_right').\n            center_position_xy (Sequence[float]): Mixing center for 4 images,\n                (x, y).\n            img_shape_wh (Sequence[int]): Width and height of sub-image\n\n        Returns:\n            tuple[tuple[float]]: Corresponding coordinate of pasting and\n                cropping\n                - paste_coord (tuple): paste corner coordinate in mosaic image.\n                - crop_coord (tuple): crop corner coordinate in mosaic image.\n        \"\"\"\n        assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n        if loc == 'top_left':\n            # index0 to top left part of image\n            x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n                             max(center_position_xy[1] - img_shape_wh[1], 0), \\\n                             center_position_xy[0], \\\n                             center_position_xy[1]\n            crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (\n                y2 - y1), img_shape_wh[0], img_shape_wh[1]\n\n        elif loc == 'top_right':\n            # index1 to top right part of image\n            x1, y1, x2, y2 = center_position_xy[0], \\\n                             max(center_position_xy[1] - img_shape_wh[1], 0), \\\n                             min(center_position_xy[0] + img_shape_wh[0],\n                                 self.img_scale[0] * 2), \\\n                             center_position_xy[1]\n            crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(\n                img_shape_wh[0], x2 - x1), img_shape_wh[1]\n\n        elif loc == 'bottom_left':\n            # index2 to bottom left part of image\n            x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n                             center_position_xy[1], \\\n                             center_position_xy[0], \\\n                             min(self.img_scale[1] * 2, center_position_xy[1] +\n                                 img_shape_wh[1])\n            crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(\n                y2 - y1, img_shape_wh[1])\n\n        else:\n            # index3 to bottom right part of image\n            x1, y1, x2, y2 = center_position_xy[0], \\\n                             center_position_xy[1], \\\n                             min(center_position_xy[0] + img_shape_wh[0],\n                                 self.img_scale[0] * 2), \\\n                             min(self.img_scale[1] * 2, center_position_xy[1] +\n                                 img_shape_wh[1])\n            crop_coord = 0, 0, min(img_shape_wh[0],\n                                   x2 - x1), min(y2 - y1, img_shape_wh[1])\n\n        paste_coord = x1, y1, x2, y2\n        return paste_coord, crop_coord\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(img_scale={self.img_scale}, '\n        repr_str += f'center_ratio_range={self.center_ratio_range}, '\n        repr_str += f'pad_val={self.pad_val}, '\n        repr_str += f'prob={self.prob})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass MixUp(BaseTransform):\n    \"\"\"MixUp data augmentation.\n\n    .. code:: text\n\n                         mixup transform\n                +------------------------------+\n                | mixup image   |              |\n                |      +--------|--------+     |\n                |      |        |        |     |\n                |---------------+        |     |\n                |      |                 |     |\n                |      |      image      |     |\n                |      |                 |     |\n                |      |                 |     |\n                |      |-----------------+     |\n                |             pad              |\n                +------------------------------+\n\n     The mixup transform steps are as follows:\n\n        1. Another random image is picked by dataset and embedded in\n           the top left patch(after padding and resizing)\n        2. The target of mixup transform is the weighted average of mixup\n           image and origin image.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - mix_results (List[dict])\n\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n\n\n    Args:\n        img_scale (Sequence[int]): Image output size after mixup pipeline.\n            The shape order should be (width, height). Defaults to (640, 640).\n        ratio_range (Sequence[float]): Scale ratio of mixup image.\n            Defaults to (0.5, 1.5).\n        flip_ratio (float): Horizontal flip ratio of mixup image.\n            Defaults to 0.5.\n        pad_val (int): Pad value. Defaults to 114.\n        max_iters (int): The maximum number of iterations. If the number of\n            iterations is greater than `max_iters`, but gt_bbox is still\n            empty, then the iteration is terminated. Defaults to 15.\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 img_scale: Tuple[int, int] = (640, 640),\n                 ratio_range: Tuple[float, float] = (0.5, 1.5),\n                 flip_ratio: float = 0.5,\n                 pad_val: float = 114.0,\n                 max_iters: int = 15,\n                 bbox_clip_border: bool = True) -> None:\n        assert isinstance(img_scale, tuple)\n        log_img_scale(img_scale, skip_square=True, shape_order='wh')\n        self.dynamic_scale = img_scale\n        self.ratio_range = ratio_range\n        self.flip_ratio = flip_ratio\n        self.pad_val = pad_val\n        self.max_iters = max_iters\n        self.bbox_clip_border = bbox_clip_border\n\n    @cache_randomness\n    def get_indexes(self, dataset: BaseDataset) -> int:\n        \"\"\"Call function to collect indexes.\n\n        Args:\n            dataset (:obj:`MultiImageMixDataset`): The dataset.\n\n        Returns:\n            list: indexes.\n        \"\"\"\n\n        for i in range(self.max_iters):\n            index = random.randint(0, len(dataset))\n            gt_bboxes_i = dataset[index]['gt_bboxes']\n            if len(gt_bboxes_i) != 0:\n                break\n\n        return index\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"MixUp transform function.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n\n        assert 'mix_results' in results\n        assert len(\n            results['mix_results']) == 1, 'MixUp only support 2 images now !'\n\n        if results['mix_results'][0]['gt_bboxes'].shape[0] == 0:\n            # empty bbox\n            return results\n\n        retrieve_results = results['mix_results'][0]\n        retrieve_img = retrieve_results['img']\n\n        jit_factor = random.uniform(*self.ratio_range)\n        is_filp = random.uniform(0, 1) > self.flip_ratio\n\n        if len(retrieve_img.shape) == 3:\n            out_img = np.ones(\n                (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n                dtype=retrieve_img.dtype) * self.pad_val\n        else:\n            out_img = np.ones(\n                self.dynamic_scale[::-1],\n                dtype=retrieve_img.dtype) * self.pad_val\n\n        # 1. keep_ratio resize\n        scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n                          self.dynamic_scale[0] / retrieve_img.shape[1])\n        retrieve_img = mmcv.imresize(\n            retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n                           int(retrieve_img.shape[0] * scale_ratio)))\n\n        # 2. paste\n        out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n        # 3. scale jit\n        scale_ratio *= jit_factor\n        out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n                                          int(out_img.shape[0] * jit_factor)))\n\n        # 4. flip\n        if is_filp:\n            out_img = out_img[:, ::-1, :]\n\n        # 5. random crop\n        ori_img = results['img']\n        origin_h, origin_w = out_img.shape[:2]\n        target_h, target_w = ori_img.shape[:2]\n        padded_img = np.ones((max(origin_h, target_h), max(\n            origin_w, target_w), 3)) * self.pad_val\n        padded_img = padded_img.astype(np.uint8)\n        padded_img[:origin_h, :origin_w] = out_img\n\n        x_offset, y_offset = 0, 0\n        if padded_img.shape[0] > target_h:\n            y_offset = random.randint(0, padded_img.shape[0] - target_h)\n        if padded_img.shape[1] > target_w:\n            x_offset = random.randint(0, padded_img.shape[1] - target_w)\n        padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n                                        x_offset:x_offset + target_w]\n\n        # 6. adjust bbox\n        retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n        retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n        if self.bbox_clip_border:\n            retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n        if is_filp:\n            retrieve_gt_bboxes.flip_([origin_h, origin_w],\n                                     direction='horizontal')\n\n        # 7. filter\n        cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n        cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n        if self.bbox_clip_border:\n            cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n        # 8. mix up\n        ori_img = ori_img.astype(np.float32)\n        mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n        retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n        retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n        mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n            (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n        mixup_gt_bboxes_labels = np.concatenate(\n            (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n        mixup_gt_ignore_flags = np.concatenate(\n            (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n\n        # remove outside bbox\n        inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n        mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n        mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n        mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n\n        results['img'] = mixup_img.astype(np.uint8)\n        results['img_shape'] = mixup_img.shape\n        results['gt_bboxes'] = mixup_gt_bboxes\n        results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n        results['gt_ignore_flags'] = mixup_gt_ignore_flags\n\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n        repr_str += f'ratio_range={self.ratio_range}, '\n        repr_str += f'flip_ratio={self.flip_ratio}, '\n        repr_str += f'pad_val={self.pad_val}, '\n        repr_str += f'max_iters={self.max_iters}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass RandomAffine(BaseTransform):\n    \"\"\"Random affine transform data augmentation.\n\n    This operation randomly generates affine transform matrix which including\n    rotation, translation, shear and scaling transforms.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n\n    Args:\n        max_rotate_degree (float): Maximum degrees of rotation transform.\n            Defaults to 10.\n        max_translate_ratio (float): Maximum ratio of translation.\n            Defaults to 0.1.\n        scaling_ratio_range (tuple[float]): Min and max ratio of\n            scaling transform. Defaults to (0.5, 1.5).\n        max_shear_degree (float): Maximum degrees of shear\n            transform. Defaults to 2.\n        border (tuple[int]): Distance from width and height sides of input\n            image to adjust output shape. Only used in mosaic dataset.\n            Defaults to (0, 0).\n        border_val (tuple[int]): Border padding values of 3 channels.\n            Defaults to (114, 114, 114).\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 max_rotate_degree: float = 10.0,\n                 max_translate_ratio: float = 0.1,\n                 scaling_ratio_range: Tuple[float, float] = (0.5, 1.5),\n                 max_shear_degree: float = 2.0,\n                 border: Tuple[int, int] = (0, 0),\n                 border_val: Tuple[int, int, int] = (114, 114, 114),\n                 bbox_clip_border: bool = True) -> None:\n        assert 0 <= max_translate_ratio <= 1\n        assert scaling_ratio_range[0] <= scaling_ratio_range[1]\n        assert scaling_ratio_range[0] > 0\n        self.max_rotate_degree = max_rotate_degree\n        self.max_translate_ratio = max_translate_ratio\n        self.scaling_ratio_range = scaling_ratio_range\n        self.max_shear_degree = max_shear_degree\n        self.border = border\n        self.border_val = border_val\n        self.bbox_clip_border = bbox_clip_border\n\n    @cache_randomness\n    def _get_random_homography_matrix(self, height, width):\n        # Rotation\n        rotation_degree = random.uniform(-self.max_rotate_degree,\n                                         self.max_rotate_degree)\n        rotation_matrix = self._get_rotation_matrix(rotation_degree)\n\n        # Scaling\n        scaling_ratio = random.uniform(self.scaling_ratio_range[0],\n                                       self.scaling_ratio_range[1])\n        scaling_matrix = self._get_scaling_matrix(scaling_ratio)\n\n        # Shear\n        x_degree = random.uniform(-self.max_shear_degree,\n                                  self.max_shear_degree)\n        y_degree = random.uniform(-self.max_shear_degree,\n                                  self.max_shear_degree)\n        shear_matrix = self._get_shear_matrix(x_degree, y_degree)\n\n        # Translation\n        trans_x = random.uniform(-self.max_translate_ratio,\n                                 self.max_translate_ratio) * width\n        trans_y = random.uniform(-self.max_translate_ratio,\n                                 self.max_translate_ratio) * height\n        translate_matrix = self._get_translation_matrix(trans_x, trans_y)\n\n        warp_matrix = (\n            translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix)\n        return warp_matrix\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        img = results['img']\n        height = img.shape[0] + self.border[1] * 2\n        width = img.shape[1] + self.border[0] * 2\n\n        warp_matrix = self._get_random_homography_matrix(height, width)\n\n        img = cv2.warpPerspective(\n            img,\n            warp_matrix,\n            dsize=(width, height),\n            borderValue=self.border_val)\n        results['img'] = img\n        results['img_shape'] = img.shape\n\n        bboxes = results['gt_bboxes']\n        num_bboxes = len(bboxes)\n        if num_bboxes:\n            bboxes.project_(warp_matrix)\n            if self.bbox_clip_border:\n                bboxes.clip_([height, width])\n            # remove outside bbox\n            valid_index = bboxes.is_inside([height, width]).numpy()\n            results['gt_bboxes'] = bboxes[valid_index]\n            results['gt_bboxes_labels'] = results['gt_bboxes_labels'][\n                valid_index]\n            results['gt_ignore_flags'] = results['gt_ignore_flags'][\n                valid_index]\n\n            if 'gt_masks' in results:\n                raise NotImplementedError('RandomAffine only supports bbox.')\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(max_rotate_degree={self.max_rotate_degree}, '\n        repr_str += f'max_translate_ratio={self.max_translate_ratio}, '\n        repr_str += f'scaling_ratio_range={self.scaling_ratio_range}, '\n        repr_str += f'max_shear_degree={self.max_shear_degree}, '\n        repr_str += f'border={self.border}, '\n        repr_str += f'border_val={self.border_val}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n    @staticmethod\n    def _get_rotation_matrix(rotate_degrees: float) -> np.ndarray:\n        radian = math.radians(rotate_degrees)\n        rotation_matrix = np.array(\n            [[np.cos(radian), -np.sin(radian), 0.],\n             [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]],\n            dtype=np.float32)\n        return rotation_matrix\n\n    @staticmethod\n    def _get_scaling_matrix(scale_ratio: float) -> np.ndarray:\n        scaling_matrix = np.array(\n            [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]],\n            dtype=np.float32)\n        return scaling_matrix\n\n    @staticmethod\n    def _get_shear_matrix(x_shear_degrees: float,\n                          y_shear_degrees: float) -> np.ndarray:\n        x_radian = math.radians(x_shear_degrees)\n        y_radian = math.radians(y_shear_degrees)\n        shear_matrix = np.array([[1, np.tan(x_radian), 0.],\n                                 [np.tan(y_radian), 1, 0.], [0., 0., 1.]],\n                                dtype=np.float32)\n        return shear_matrix\n\n    @staticmethod\n    def _get_translation_matrix(x: float, y: float) -> np.ndarray:\n        translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]],\n                                      dtype=np.float32)\n        return translation_matrix\n\n\n@TRANSFORMS.register_module()\nclass YOLOXHSVRandomAug(BaseTransform):\n    \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n    https://github.com/Megvii-\n    BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n    Required Keys:\n\n    - img\n\n    Modified Keys:\n\n    - img\n\n    Args:\n        hue_delta (int): delta of hue. Defaults to 5.\n        saturation_delta (int): delta of saturation. Defaults to 30.\n        value_delta (int): delat of value. Defaults to 30.\n    \"\"\"\n\n    def __init__(self,\n                 hue_delta: int = 5,\n                 saturation_delta: int = 30,\n                 value_delta: int = 30) -> None:\n        self.hue_delta = hue_delta\n        self.saturation_delta = saturation_delta\n        self.value_delta = value_delta\n\n    @cache_randomness\n    def _get_hsv_gains(self):\n        hsv_gains = np.random.uniform(-1, 1, 3) * [\n            self.hue_delta, self.saturation_delta, self.value_delta\n        ]\n        # random selection of h, s, v\n        hsv_gains *= np.random.randint(0, 2, 3)\n        # prevent overflow\n        hsv_gains = hsv_gains.astype(np.int16)\n        return hsv_gains\n\n    def transform(self, results: dict) -> dict:\n        img = results['img']\n        hsv_gains = self._get_hsv_gains()\n        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n        img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n        img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n        img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n        cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n        results['img'] = img\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(hue_delta={self.hue_delta}, '\n        repr_str += f'saturation_delta={self.saturation_delta}, '\n        repr_str += f'value_delta={self.value_delta})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass CopyPaste(BaseTransform):\n    \"\"\"Simple Copy-Paste is a Strong Data Augmentation Method for Instance\n    Segmentation The simple copy-paste transform steps are as follows:\n\n    1. The destination image is already resized with aspect ratio kept,\n       cropped and padded.\n    2. Randomly select a source image, which is also already resized\n       with aspect ratio kept, cropped and padded in a similar way\n       as the destination image.\n    3. Randomly select some objects from the source image.\n    4. Paste these source objects to the destination image directly,\n       due to the source and destination image have the same size.\n    5. Update object masks of the destination image, for some origin objects\n       may be occluded.\n    6. Generate bboxes from the updated destination masks and\n       filter some objects which are totally occluded, and adjust bboxes\n       which are partly occluded.\n    7. Append selected source bboxes, masks, and labels.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - gt_masks (BitmapMasks) (optional)\n\n    Modified Keys:\n\n    - img\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n    - gt_masks (optional)\n\n    Args:\n        max_num_pasted (int): The maximum number of pasted objects.\n            Defaults to 100.\n        bbox_occluded_thr (int): The threshold of occluded bbox.\n            Defaults to 10.\n        mask_occluded_thr (int): The threshold of occluded mask.\n            Defaults to 300.\n        selected (bool): Whether select objects or not. If select is False,\n            all objects of the source image will be pasted to the\n            destination image.\n            Defaults to True.\n    \"\"\"\n\n    def __init__(\n        self,\n        max_num_pasted: int = 100,\n        bbox_occluded_thr: int = 10,\n        mask_occluded_thr: int = 300,\n        selected: bool = True,\n    ) -> None:\n        self.max_num_pasted = max_num_pasted\n        self.bbox_occluded_thr = bbox_occluded_thr\n        self.mask_occluded_thr = mask_occluded_thr\n        self.selected = selected\n\n    @cache_randomness\n    def get_indexes(self, dataset: BaseDataset) -> int:\n        \"\"\"Call function to collect indexes.s.\n\n        Args:\n            dataset (:obj:`MultiImageMixDataset`): The dataset.\n        Returns:\n            list: Indexes.\n        \"\"\"\n        return random.randint(0, len(dataset))\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to make a copy-paste of image.\n\n        Args:\n            results (dict): Result dict.\n        Returns:\n            dict: Result dict with copy-paste transformed.\n        \"\"\"\n\n        assert 'mix_results' in results\n        num_images = len(results['mix_results'])\n        assert num_images == 1, \\\n            f'CopyPaste only supports processing 2 images, got {num_images}'\n        if self.selected:\n            selected_results = self._select_object(results['mix_results'][0])\n        else:\n            selected_results = results['mix_results'][0]\n        return self._copy_paste(results, selected_results)\n\n    @cache_randomness\n    def _get_selected_inds(self, num_bboxes: int) -> np.ndarray:\n        max_num_pasted = min(num_bboxes + 1, self.max_num_pasted)\n        num_pasted = np.random.randint(0, max_num_pasted)\n        return np.random.choice(num_bboxes, size=num_pasted, replace=False)\n\n    def _select_object(self, results: dict) -> dict:\n        \"\"\"Select some objects from the source results.\"\"\"\n        bboxes = results['gt_bboxes']\n        labels = results['gt_bboxes_labels']\n        masks = results['gt_masks']\n        ignore_flags = results['gt_ignore_flags']\n\n        selected_inds = self._get_selected_inds(bboxes.shape[0])\n\n        selected_bboxes = bboxes[selected_inds]\n        selected_labels = labels[selected_inds]\n        selected_masks = masks[selected_inds]\n        selected_ignore_flags = ignore_flags[selected_inds]\n\n        results['gt_bboxes'] = selected_bboxes\n        results['gt_bboxes_labels'] = selected_labels\n        results['gt_masks'] = selected_masks\n        results['gt_ignore_flags'] = selected_ignore_flags\n        return results\n\n    def _copy_paste(self, dst_results: dict, src_results: dict) -> dict:\n        \"\"\"CopyPaste transform function.\n\n        Args:\n            dst_results (dict): Result dict of the destination image.\n            src_results (dict): Result dict of the source image.\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        dst_img = dst_results['img']\n        dst_bboxes = dst_results['gt_bboxes']\n        dst_labels = dst_results['gt_bboxes_labels']\n        dst_masks = dst_results['gt_masks']\n        dst_ignore_flags = dst_results['gt_ignore_flags']\n\n        src_img = src_results['img']\n        src_bboxes = src_results['gt_bboxes']\n        src_labels = src_results['gt_bboxes_labels']\n        src_masks = src_results['gt_masks']\n        src_ignore_flags = src_results['gt_ignore_flags']\n\n        if len(src_bboxes) == 0:\n            return dst_results\n\n        # update masks and generate bboxes from updated masks\n        composed_mask = np.where(np.any(src_masks.masks, axis=0), 1, 0)\n        updated_dst_masks = self._get_updated_masks(dst_masks, composed_mask)\n        updated_dst_bboxes = updated_dst_masks.get_bboxes(type(dst_bboxes))\n        assert len(updated_dst_bboxes) == len(updated_dst_masks)\n\n        # filter totally occluded objects\n        l1_distance = (updated_dst_bboxes.tensor - dst_bboxes.tensor).abs()\n        bboxes_inds = (l1_distance <= self.bbox_occluded_thr).all(\n            dim=-1).numpy()\n        masks_inds = updated_dst_masks.masks.sum(\n            axis=(1, 2)) > self.mask_occluded_thr\n        valid_inds = bboxes_inds | masks_inds\n\n        # Paste source objects to destination image directly\n        img = dst_img * (1 - composed_mask[..., np.newaxis]\n                         ) + src_img * composed_mask[..., np.newaxis]\n        bboxes = src_bboxes.cat([updated_dst_bboxes[valid_inds], src_bboxes])\n        labels = np.concatenate([dst_labels[valid_inds], src_labels])\n        masks = np.concatenate(\n            [updated_dst_masks.masks[valid_inds], src_masks.masks])\n        ignore_flags = np.concatenate(\n            [dst_ignore_flags[valid_inds], src_ignore_flags])\n\n        dst_results['img'] = img\n        dst_results['gt_bboxes'] = bboxes\n        dst_results['gt_bboxes_labels'] = labels\n        dst_results['gt_masks'] = BitmapMasks(masks, masks.shape[1],\n                                              masks.shape[2])\n        dst_results['gt_ignore_flags'] = ignore_flags\n\n        return dst_results\n\n    def _get_updated_masks(self, masks: BitmapMasks,\n                           composed_mask: np.ndarray) -> BitmapMasks:\n        \"\"\"Update masks with composed mask.\"\"\"\n        assert masks.masks.shape[-2:] == composed_mask.shape[-2:], \\\n            'Cannot compare two arrays of different size'\n        masks.masks = np.where(composed_mask, 0, masks.masks)\n        return masks\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(max_num_pasted={self.max_num_pasted}, '\n        repr_str += f'bbox_occluded_thr={self.bbox_occluded_thr}, '\n        repr_str += f'mask_occluded_thr={self.mask_occluded_thr}, '\n        repr_str += f'selected={self.selected})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass RandomErasing(BaseTransform):\n    \"\"\"RandomErasing operation.\n\n    Random Erasing randomly selects a rectangle region\n    in an image and erases its pixels with random values.\n    `RandomErasing <https://arxiv.org/abs/1708.04896>`_.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (HorizontalBoxes[torch.float32]) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - gt_masks (BitmapMasks) (optional)\n\n    Modified Keys:\n    - img\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n    - gt_masks (optional)\n\n    Args:\n        n_patches (int or tuple[int, int]): Number of regions to be dropped.\n            If it is given as a tuple, number of patches will be randomly\n            selected from the closed interval [``n_patches[0]``,\n            ``n_patches[1]``].\n        ratio (float or tuple[float, float]): The ratio of erased regions.\n            It can be ``float`` to use a fixed ratio or ``tuple[float, float]``\n            to randomly choose ratio from the interval.\n        squared (bool): Whether to erase square region. Defaults to True.\n        bbox_erased_thr (float): The threshold for the maximum area proportion\n            of the bbox to be erased. When the proportion of the area where the\n            bbox is erased is greater than the threshold, the bbox will be\n            removed. Defaults to 0.9.\n        img_border_value (int or float or tuple): The filled values for\n            image border. If float, the same fill value will be used for\n            all the three channels of image. If tuple, it should be 3 elements.\n            Defaults to 128.\n        mask_border_value (int): The fill value used for masks. Defaults to 0.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Defaults to 255.\n    \"\"\"\n\n    def __init__(\n        self,\n        n_patches: Union[int, Tuple[int, int]],\n        ratio: Union[float, Tuple[float, float]],\n        squared: bool = True,\n        bbox_erased_thr: float = 0.9,\n        img_border_value: Union[int, float, tuple] = 128,\n        mask_border_value: int = 0,\n        seg_ignore_label: int = 255,\n    ) -> None:\n        if isinstance(n_patches, tuple):\n            assert len(n_patches) == 2 and 0 <= n_patches[0] < n_patches[1]\n        else:\n            n_patches = (n_patches, n_patches)\n        if isinstance(ratio, tuple):\n            assert len(ratio) == 2 and 0 <= ratio[0] < ratio[1] <= 1\n        else:\n            ratio = (ratio, ratio)\n\n        self.n_patches = n_patches\n        self.ratio = ratio\n        self.squared = squared\n        self.bbox_erased_thr = bbox_erased_thr\n        self.img_border_value = img_border_value\n        self.mask_border_value = mask_border_value\n        self.seg_ignore_label = seg_ignore_label\n\n    @cache_randomness\n    def _get_patches(self, img_shape: Tuple[int, int]) -> List[list]:\n        \"\"\"Get patches for random erasing.\"\"\"\n        patches = []\n        n_patches = np.random.randint(self.n_patches[0], self.n_patches[1] + 1)\n        for _ in range(n_patches):\n            if self.squared:\n                ratio = np.random.random() * (self.ratio[1] -\n                                              self.ratio[0]) + self.ratio[0]\n                ratio = (ratio, ratio)\n            else:\n                ratio = (np.random.random() * (self.ratio[1] - self.ratio[0]) +\n                         self.ratio[0], np.random.random() *\n                         (self.ratio[1] - self.ratio[0]) + self.ratio[0])\n            ph, pw = int(img_shape[0] * ratio[0]), int(img_shape[1] * ratio[1])\n            px1, py1 = np.random.randint(0,\n                                         img_shape[1] - pw), np.random.randint(\n                                             0, img_shape[0] - ph)\n            px2, py2 = px1 + pw, py1 + ph\n            patches.append([px1, py1, px2, py2])\n        return np.array(patches)\n\n    def _transform_img(self, results: dict, patches: List[list]) -> None:\n        \"\"\"Random erasing the image.\"\"\"\n        for patch in patches:\n            px1, py1, px2, py2 = patch\n            results['img'][py1:py2, px1:px2, :] = self.img_border_value\n\n    def _transform_bboxes(self, results: dict, patches: List[list]) -> None:\n        \"\"\"Random erasing the bboxes.\"\"\"\n        bboxes = results['gt_bboxes']\n        # TODO: unify the logic by using operators in BaseBoxes.\n        assert isinstance(bboxes, HorizontalBoxes)\n        bboxes = bboxes.numpy()\n        left_top = np.maximum(bboxes[:, None, :2], patches[:, :2])\n        right_bottom = np.minimum(bboxes[:, None, 2:], patches[:, 2:])\n        wh = np.maximum(right_bottom - left_top, 0)\n        inter_areas = wh[:, :, 0] * wh[:, :, 1]\n        bbox_areas = (bboxes[:, 2] - bboxes[:, 0]) * (\n            bboxes[:, 3] - bboxes[:, 1])\n        bboxes_erased_ratio = inter_areas.sum(-1) / (bbox_areas + 1e-7)\n        valid_inds = bboxes_erased_ratio < self.bbox_erased_thr\n        results['gt_bboxes'] = HorizontalBoxes(bboxes[valid_inds])\n        results['gt_bboxes_labels'] = results['gt_bboxes_labels'][valid_inds]\n        results['gt_ignore_flags'] = results['gt_ignore_flags'][valid_inds]\n        if results.get('gt_masks', None) is not None:\n            results['gt_masks'] = results['gt_masks'][valid_inds]\n\n    def _transform_masks(self, results: dict, patches: List[list]) -> None:\n        \"\"\"Random erasing the masks.\"\"\"\n        for patch in patches:\n            px1, py1, px2, py2 = patch\n            results['gt_masks'].masks[:, py1:py2,\n                                      px1:px2] = self.mask_border_value\n\n    def _transform_seg(self, results: dict, patches: List[list]) -> None:\n        \"\"\"Random erasing the segmentation map.\"\"\"\n        for patch in patches:\n            px1, py1, px2, py2 = patch\n            results['gt_seg_map'][py1:py2, px1:px2] = self.seg_ignore_label\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to erase some regions of image.\"\"\"\n        patches = self._get_patches(results['img_shape'])\n        self._transform_img(results, patches)\n        if results.get('gt_bboxes', None) is not None:\n            self._transform_bboxes(results, patches)\n        if results.get('gt_masks', None) is not None:\n            self._transform_masks(results, patches)\n        if results.get('gt_seg_map', None) is not None:\n            self._transform_seg(results, patches)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(n_patches={self.n_patches}, '\n        repr_str += f'ratio={self.ratio}, '\n        repr_str += f'squared={self.squared}, '\n        repr_str += f'bbox_erased_thr={self.bbox_erased_thr}, '\n        repr_str += f'img_border_value={self.img_border_value}, '\n        repr_str += f'mask_border_value={self.mask_border_value}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass CachedMosaic(Mosaic):\n    \"\"\"Cached mosaic augmentation.\n\n    Cached mosaic transform will random select images from the cache\n    and combine them into one output image.\n\n    .. code:: text\n\n                        mosaic transform\n                           center_x\n                +------------------------------+\n                |       pad        |  pad      |\n                |      +-----------+           |\n                |      |           |           |\n                |      |  image1   |--------+  |\n                |      |           |        |  |\n                |      |           | image2 |  |\n     center_y   |----+-------------+-----------|\n                |    |   cropped   |           |\n                |pad |   image3    |  image4   |\n                |    |             |           |\n                +----|-------------+-----------+\n                     |             |\n                     +-------------+\n\n     The cached mosaic transform steps are as follows:\n\n         1. Append the results from the last transform into the cache.\n         2. Choose the mosaic center as the intersections of 4 images\n         3. Get the left top image according to the index, and randomly\n            sample another 3 images from the result cache.\n         4. Sub image will be cropped if image is larger than mosaic patch\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (np.float32) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n\n    Args:\n        img_scale (Sequence[int]): Image size after mosaic pipeline of single\n            image. The shape order should be (width, height).\n            Defaults to (640, 640).\n        center_ratio_range (Sequence[float]): Center ratio range of mosaic\n            output. Defaults to (0.5, 1.5).\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        pad_val (int): Pad value. Defaults to 114.\n        prob (float): Probability of applying this transformation.\n            Defaults to 1.0.\n        max_cached_images (int): The maximum length of the cache. The larger\n            the cache, the stronger the randomness of this transform. As a\n            rule of thumb, providing 10 caches for each image suffices for\n            randomness. Defaults to 40.\n        random_pop (bool): Whether to randomly pop a result from the cache\n            when the cache is full. If set to False, use FIFO popping method.\n            Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 max_cached_images: int = 40,\n                 random_pop: bool = True,\n                 **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        self.results_cache = []\n        self.random_pop = random_pop\n        assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n                                       f'but got {max_cached_images}.'\n        self.max_cached_images = max_cached_images\n\n    @cache_randomness\n    def get_indexes(self, cache: list) -> list:\n        \"\"\"Call function to collect indexes.\n\n        Args:\n            cache (list): The results cache.\n\n        Returns:\n            list: indexes.\n        \"\"\"\n\n        indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n        return indexes\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"Mosaic transform function.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        # cache and pop images\n        self.results_cache.append(copy.deepcopy(results))\n        if len(self.results_cache) > self.max_cached_images:\n            if self.random_pop:\n                index = random.randint(0, len(self.results_cache) - 1)\n            else:\n                index = 0\n            self.results_cache.pop(index)\n\n        if len(self.results_cache) <= 4:\n            return results\n\n        if random.uniform(0, 1) > self.prob:\n            return results\n        indices = self.get_indexes(self.results_cache)\n        mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n        # TODO: refactor mosaic to reuse these code.\n        mosaic_bboxes = []\n        mosaic_bboxes_labels = []\n        mosaic_ignore_flags = []\n        mosaic_masks = []\n        with_mask = True if 'gt_masks' in results else False\n\n        if len(results['img'].shape) == 3:\n            mosaic_img = np.full(\n                (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n                self.pad_val,\n                dtype=results['img'].dtype)\n        else:\n            mosaic_img = np.full(\n                (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n                self.pad_val,\n                dtype=results['img'].dtype)\n\n        # mosaic center x, y\n        center_x = int(\n            random.uniform(*self.center_ratio_range) * self.img_scale[0])\n        center_y = int(\n            random.uniform(*self.center_ratio_range) * self.img_scale[1])\n        center_position = (center_x, center_y)\n\n        loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n        for i, loc in enumerate(loc_strs):\n            if loc == 'top_left':\n                results_patch = copy.deepcopy(results)\n            else:\n                results_patch = copy.deepcopy(mix_results[i - 1])\n\n            img_i = results_patch['img']\n            h_i, w_i = img_i.shape[:2]\n            # keep_ratio resize\n            scale_ratio_i = min(self.img_scale[1] / h_i,\n                                self.img_scale[0] / w_i)\n            img_i = mmcv.imresize(\n                img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n            # compute the combine parameters\n            paste_coord, crop_coord = self._mosaic_combine(\n                loc, center_position, img_i.shape[:2][::-1])\n            x1_p, y1_p, x2_p, y2_p = paste_coord\n            x1_c, y1_c, x2_c, y2_c = crop_coord\n\n            # crop and paste image\n            mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n            # adjust coordinate\n            gt_bboxes_i = results_patch['gt_bboxes']\n            gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n            gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n            padw = x1_p - x1_c\n            padh = y1_p - y1_c\n            gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n            gt_bboxes_i.translate_([padw, padh])\n            mosaic_bboxes.append(gt_bboxes_i)\n            mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n            mosaic_ignore_flags.append(gt_ignore_flags_i)\n            if with_mask and results_patch.get('gt_masks', None) is not None:\n                gt_masks_i = results_patch['gt_masks']\n                gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n                gt_masks_i = gt_masks_i.translate(\n                    out_shape=(int(self.img_scale[0] * 2),\n                               int(self.img_scale[1] * 2)),\n                    offset=padw,\n                    direction='horizontal')\n                gt_masks_i = gt_masks_i.translate(\n                    out_shape=(int(self.img_scale[0] * 2),\n                               int(self.img_scale[1] * 2)),\n                    offset=padh,\n                    direction='vertical')\n                mosaic_masks.append(gt_masks_i)\n\n        mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n        mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n        mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n        if self.bbox_clip_border:\n            mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n        # remove outside bboxes\n        inside_inds = mosaic_bboxes.is_inside(\n            [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n        mosaic_bboxes = mosaic_bboxes[inside_inds]\n        mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n        mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n        results['img'] = mosaic_img\n        results['img_shape'] = mosaic_img.shape\n        results['gt_bboxes'] = mosaic_bboxes\n        results['gt_bboxes_labels'] = mosaic_bboxes_labels\n        results['gt_ignore_flags'] = mosaic_ignore_flags\n\n        if with_mask:\n            mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n            results['gt_masks'] = mosaic_masks[inside_inds]\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(img_scale={self.img_scale}, '\n        repr_str += f'center_ratio_range={self.center_ratio_range}, '\n        repr_str += f'pad_val={self.pad_val}, '\n        repr_str += f'prob={self.prob}, '\n        repr_str += f'max_cached_images={self.max_cached_images}, '\n        repr_str += f'random_pop={self.random_pop})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass CachedMixUp(BaseTransform):\n    \"\"\"Cached mixup data augmentation.\n\n    .. code:: text\n\n                         mixup transform\n                +------------------------------+\n                | mixup image   |              |\n                |      +--------|--------+     |\n                |      |        |        |     |\n                |---------------+        |     |\n                |      |                 |     |\n                |      |      image      |     |\n                |      |                 |     |\n                |      |                 |     |\n                |      |-----------------+     |\n                |             pad              |\n                +------------------------------+\n\n     The cached mixup transform steps are as follows:\n\n        1. Append the results from the last transform into the cache.\n        2. Another random image is picked from the cache and embedded in\n           the top left patch(after padding and resizing)\n        3. The target of mixup transform is the weighted average of mixup\n           image and origin image.\n\n    Required Keys:\n\n    - img\n    - gt_bboxes (np.float32) (optional)\n    - gt_bboxes_labels (np.int64) (optional)\n    - gt_ignore_flags (bool) (optional)\n    - mix_results (List[dict])\n\n\n    Modified Keys:\n\n    - img\n    - img_shape\n    - gt_bboxes (optional)\n    - gt_bboxes_labels (optional)\n    - gt_ignore_flags (optional)\n\n\n    Args:\n        img_scale (Sequence[int]): Image output size after mixup pipeline.\n            The shape order should be (width, height). Defaults to (640, 640).\n        ratio_range (Sequence[float]): Scale ratio of mixup image.\n            Defaults to (0.5, 1.5).\n        flip_ratio (float): Horizontal flip ratio of mixup image.\n            Defaults to 0.5.\n        pad_val (int): Pad value. Defaults to 114.\n        max_iters (int): The maximum number of iterations. If the number of\n            iterations is greater than `max_iters`, but gt_bbox is still\n            empty, then the iteration is terminated. Defaults to 15.\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        max_cached_images (int): The maximum length of the cache. The larger\n            the cache, the stronger the randomness of this transform. As a\n            rule of thumb, providing 10 caches for each image suffices for\n            randomness. Defaults to 20.\n        random_pop (bool): Whether to randomly pop a result from the cache\n            when the cache is full. If set to False, use FIFO popping method.\n            Defaults to True.\n        prob (float): Probability of applying this transformation.\n            Defaults to 1.0.\n    \"\"\"\n\n    def __init__(self,\n                 img_scale: Tuple[int, int] = (640, 640),\n                 ratio_range: Tuple[float, float] = (0.5, 1.5),\n                 flip_ratio: float = 0.5,\n                 pad_val: float = 114.0,\n                 max_iters: int = 15,\n                 bbox_clip_border: bool = True,\n                 max_cached_images: int = 20,\n                 random_pop: bool = True,\n                 prob: float = 1.0) -> None:\n        assert isinstance(img_scale, tuple)\n        assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n                                       f'but got {max_cached_images}.'\n        assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n                                 f'got {prob}.'\n        self.dynamic_scale = img_scale\n        self.ratio_range = ratio_range\n        self.flip_ratio = flip_ratio\n        self.pad_val = pad_val\n        self.max_iters = max_iters\n        self.bbox_clip_border = bbox_clip_border\n        self.results_cache = []\n\n        self.max_cached_images = max_cached_images\n        self.random_pop = random_pop\n        self.prob = prob\n\n    @cache_randomness\n    def get_indexes(self, cache: list) -> int:\n        \"\"\"Call function to collect indexes.\n\n        Args:\n            cache (list): The result cache.\n\n        Returns:\n            int: index.\n        \"\"\"\n\n        for i in range(self.max_iters):\n            index = random.randint(0, len(cache) - 1)\n            gt_bboxes_i = cache[index]['gt_bboxes']\n            if len(gt_bboxes_i) != 0:\n                break\n        return index\n\n    @autocast_box_type()\n    def transform(self, results: dict) -> dict:\n        \"\"\"MixUp transform function.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        # cache and pop images\n        self.results_cache.append(copy.deepcopy(results))\n        if len(self.results_cache) > self.max_cached_images:\n            if self.random_pop:\n                index = random.randint(0, len(self.results_cache) - 1)\n            else:\n                index = 0\n            self.results_cache.pop(index)\n\n        if len(self.results_cache) <= 1:\n            return results\n\n        if random.uniform(0, 1) > self.prob:\n            return results\n\n        index = self.get_indexes(self.results_cache)\n        retrieve_results = copy.deepcopy(self.results_cache[index])\n\n        # TODO: refactor mixup to reuse these code.\n        if retrieve_results['gt_bboxes'].shape[0] == 0:\n            # empty bbox\n            return results\n\n        retrieve_img = retrieve_results['img']\n        with_mask = True if 'gt_masks' in results else False\n\n        jit_factor = random.uniform(*self.ratio_range)\n        is_filp = random.uniform(0, 1) > self.flip_ratio\n\n        if len(retrieve_img.shape) == 3:\n            out_img = np.ones(\n                (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n                dtype=retrieve_img.dtype) * self.pad_val\n        else:\n            out_img = np.ones(\n                self.dynamic_scale[::-1],\n                dtype=retrieve_img.dtype) * self.pad_val\n\n        # 1. keep_ratio resize\n        scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n                          self.dynamic_scale[0] / retrieve_img.shape[1])\n        retrieve_img = mmcv.imresize(\n            retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n                           int(retrieve_img.shape[0] * scale_ratio)))\n\n        # 2. paste\n        out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n        # 3. scale jit\n        scale_ratio *= jit_factor\n        out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n                                          int(out_img.shape[0] * jit_factor)))\n\n        # 4. flip\n        if is_filp:\n            out_img = out_img[:, ::-1, :]\n\n        # 5. random crop\n        ori_img = results['img']\n        origin_h, origin_w = out_img.shape[:2]\n        target_h, target_w = ori_img.shape[:2]\n        padded_img = np.ones((max(origin_h, target_h), max(\n            origin_w, target_w), 3)) * self.pad_val\n        padded_img = padded_img.astype(np.uint8)\n        padded_img[:origin_h, :origin_w] = out_img\n\n        x_offset, y_offset = 0, 0\n        if padded_img.shape[0] > target_h:\n            y_offset = random.randint(0, padded_img.shape[0] - target_h)\n        if padded_img.shape[1] > target_w:\n            x_offset = random.randint(0, padded_img.shape[1] - target_w)\n        padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n                                        x_offset:x_offset + target_w]\n\n        # 6. adjust bbox\n        retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n        retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n        if with_mask:\n            retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n                scale_ratio)\n\n        if self.bbox_clip_border:\n            retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n        if is_filp:\n            retrieve_gt_bboxes.flip_([origin_h, origin_w],\n                                     direction='horizontal')\n            if with_mask:\n                retrieve_gt_masks = retrieve_gt_masks.flip()\n\n        # 7. filter\n        cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n        cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n        if with_mask:\n            retrieve_gt_masks = retrieve_gt_masks.translate(\n                out_shape=(target_h, target_w),\n                offset=-x_offset,\n                direction='horizontal')\n            retrieve_gt_masks = retrieve_gt_masks.translate(\n                out_shape=(target_h, target_w),\n                offset=-y_offset,\n                direction='vertical')\n\n        if self.bbox_clip_border:\n            cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n        # 8. mix up\n        ori_img = ori_img.astype(np.float32)\n        mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n        retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n        retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n        mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n            (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n        mixup_gt_bboxes_labels = np.concatenate(\n            (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n        mixup_gt_ignore_flags = np.concatenate(\n            (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n        if with_mask:\n            mixup_gt_masks = retrieve_gt_masks.cat(\n                [results['gt_masks'], retrieve_gt_masks])\n\n        # remove outside bbox\n        inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n        mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n        mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n        mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n        if with_mask:\n            mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n        results['img'] = mixup_img.astype(np.uint8)\n        results['img_shape'] = mixup_img.shape\n        results['gt_bboxes'] = mixup_gt_bboxes\n        results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n        results['gt_ignore_flags'] = mixup_gt_ignore_flags\n        if with_mask:\n            results['gt_masks'] = mixup_gt_masks\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n        repr_str += f'ratio_range={self.ratio_range}, '\n        repr_str += f'flip_ratio={self.flip_ratio}, '\n        repr_str += f'pad_val={self.pad_val}, '\n        repr_str += f'max_iters={self.max_iters}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n        repr_str += f'max_cached_images={self.max_cached_images}, '\n        repr_str += f'random_pop={self.random_pop}, '\n        repr_str += f'prob={self.prob})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/datasets/transforms/wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport numpy as np\nfrom mmcv.transforms import BaseTransform, Compose\nfrom mmcv.transforms.utils import cache_random_params, cache_randomness\n\nfrom mmdet.registry import TRANSFORMS\n\n\n@TRANSFORMS.register_module()\nclass MultiBranch(BaseTransform):\n    r\"\"\"Multiple branch pipeline wrapper.\n\n    Generate multiple data-augmented versions of the same image.\n    `MultiBranch` needs to specify the branch names of all\n    pipelines of the dataset, perform corresponding data augmentation\n    for the current branch, and return None for other branches,\n    which ensures the consistency of return format across\n    different samples.\n\n    Args:\n        branch_field (list): List of branch names.\n        branch_pipelines (dict): Dict of different pipeline configs\n            to be composed.\n\n    Examples:\n        >>> branch_field = ['sup', 'unsup_teacher', 'unsup_student']\n        >>> sup_pipeline = [\n        >>>     dict(type='LoadImageFromFile',\n        >>>         file_client_args=dict(backend='disk')),\n        >>>     dict(type='LoadAnnotations', with_bbox=True),\n        >>>     dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n        >>>     dict(type='RandomFlip', prob=0.5),\n        >>>     dict(\n        >>>         type='MultiBranch',\n        >>>         branch_field=branch_field,\n        >>>         sup=dict(type='PackDetInputs'))\n        >>>     ]\n        >>> weak_pipeline = [\n        >>>     dict(type='LoadImageFromFile',\n        >>>         file_client_args=dict(backend='disk')),\n        >>>     dict(type='LoadAnnotations', with_bbox=True),\n        >>>     dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n        >>>     dict(type='RandomFlip', prob=0.0),\n        >>>     dict(\n        >>>         type='MultiBranch',\n        >>>         branch_field=branch_field,\n        >>>         sup=dict(type='PackDetInputs'))\n        >>>     ]\n        >>> strong_pipeline = [\n        >>>     dict(type='LoadImageFromFile',\n        >>>         file_client_args=dict(backend='disk')),\n        >>>     dict(type='LoadAnnotations', with_bbox=True),\n        >>>     dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n        >>>     dict(type='RandomFlip', prob=1.0),\n        >>>     dict(\n        >>>         type='MultiBranch',\n        >>>         branch_field=branch_field,\n        >>>         sup=dict(type='PackDetInputs'))\n        >>>     ]\n        >>> unsup_pipeline = [\n        >>>     dict(type='LoadImageFromFile',\n        >>>         file_client_args=file_client_args),\n        >>>     dict(type='LoadEmptyAnnotations'),\n        >>>     dict(\n        >>>         type='MultiBranch',\n        >>>         branch_field=branch_field,\n        >>>         unsup_teacher=weak_pipeline,\n        >>>         unsup_student=strong_pipeline)\n        >>>     ]\n        >>> from mmcv.transforms import Compose\n        >>> sup_branch = Compose(sup_pipeline)\n        >>> unsup_branch = Compose(unsup_pipeline)\n        >>> print(sup_branch)\n        >>> Compose(\n        >>>     LoadImageFromFile(ignore_empty=False, to_float32=False, color_type='color', imdecode_backend='cv2', file_client_args={'backend': 'disk'}) # noqa\n        >>>     LoadAnnotations(with_bbox=True, with_label=True, with_mask=False, with_seg=False, poly2mask=True, imdecode_backend='cv2', file_client_args={'backend': 'disk'}) # noqa\n        >>>     Resize(scale=(1333, 800), scale_factor=None, keep_ratio=True, clip_object_border=True), backend=cv2), interpolation=bilinear) # noqa\n        >>>     RandomFlip(prob=0.5, direction=horizontal)\n        >>>     MultiBranch(branch_pipelines=['sup'])\n        >>> )\n        >>> print(unsup_branch)\n        >>> Compose(\n        >>>     LoadImageFromFile(ignore_empty=False, to_float32=False, color_type='color', imdecode_backend='cv2', file_client_args={'backend': 'disk'}) # noqa\n        >>>     LoadEmptyAnnotations(with_bbox=True, with_label=True, with_mask=False, with_seg=False, seg_ignore_label=255) # noqa\n        >>>     MultiBranch(branch_pipelines=['unsup_teacher', 'unsup_student'])\n        >>> )\n    \"\"\"\n\n    def __init__(self, branch_field: List[str],\n                 **branch_pipelines: dict) -> None:\n        self.branch_field = branch_field\n        self.branch_pipelines = {\n            branch: Compose(pipeline)\n            for branch, pipeline in branch_pipelines.items()\n        }\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Transform function to apply transforms sequentially.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict:\n\n            - 'inputs' (Dict[str, obj:`torch.Tensor`]): The forward data of\n                models from different branches.\n            - 'data_sample' (Dict[str,obj:`DetDataSample`]): The annotation\n                info of the sample from different branches.\n        \"\"\"\n\n        multi_results = {}\n        for branch in self.branch_field:\n            multi_results[branch] = {'inputs': None, 'data_samples': None}\n        for branch, pipeline in self.branch_pipelines.items():\n            branch_results = pipeline(copy.deepcopy(results))\n            # If one branch pipeline returns None,\n            # it will sample another data from dataset.\n            if branch_results is None:\n                return None\n            multi_results[branch] = branch_results\n\n        format_results = {}\n        for branch, results in multi_results.items():\n            for key in results.keys():\n                if format_results.get(key, None) is None:\n                    format_results[key] = {branch: results[key]}\n                else:\n                    format_results[key][branch] = results[key]\n        return format_results\n\n    def __repr__(self) -> str:\n        repr_str = self.__class__.__name__\n        repr_str += f'(branch_pipelines={list(self.branch_pipelines.keys())})'\n        return repr_str\n\n\n@TRANSFORMS.register_module()\nclass RandomOrder(Compose):\n    \"\"\"Shuffle the transform Sequence.\"\"\"\n\n    @cache_randomness\n    def _random_permutation(self):\n        return np.random.permutation(len(self.transforms))\n\n    def transform(self, results: Dict) -> Optional[Dict]:\n        \"\"\"Transform function to apply transforms in random order.\n\n        Args:\n            results (dict): A result dict contains the results to transform.\n\n        Returns:\n            dict or None: Transformed results.\n        \"\"\"\n        inds = self._random_permutation()\n        for idx in inds:\n            t = self.transforms[idx]\n            results = t(results)\n            if results is None:\n                return None\n        return results\n\n    def __repr__(self):\n        \"\"\"Compute the string representation.\"\"\"\n        format_string = self.__class__.__name__ + '('\n        for t in self.transforms:\n            format_string += f'{t.__class__.__name__}, '\n        format_string += ')'\n        return format_string\n\n\n@TRANSFORMS.register_module()\nclass ProposalBroadcaster(BaseTransform):\n    \"\"\"A transform wrapper to apply the wrapped transforms to process both\n    `gt_bboxes` and `proposals` without adding any codes. It will do the\n    following steps:\n\n        1. Scatter the broadcasting targets to a list of inputs of the wrapped\n           transforms. The type of the list should be list[dict, dict], which\n           the first is the original inputs, the second is the processing\n           results that `gt_bboxes` being rewritten by the `proposals`.\n        2. Apply ``self.transforms``, with same random parameters, which is\n           sharing with a context manager. The type of the outputs is a\n           list[dict, dict].\n        3. Gather the outputs, update the `proposals` in the first item of\n           the outputs with the `gt_bboxes` in the second .\n\n    Args:\n         transforms (list, optional): Sequence of transform\n            object or config dict to be wrapped. Defaults to [].\n\n    Note: The `TransformBroadcaster` in MMCV can achieve the same operation as\n          `ProposalBroadcaster`, but need to set more complex parameters.\n\n    Examples:\n        >>> pipeline = [\n        >>>     dict(type='LoadImageFromFile'),\n        >>>     dict(type='LoadProposals', num_max_proposals=2000),\n        >>>     dict(type='LoadAnnotations', with_bbox=True),\n        >>>     dict(\n        >>>         type='ProposalBroadcaster',\n        >>>         transforms=[\n        >>>             dict(type='Resize', scale=(1333, 800),\n        >>>                  keep_ratio=True),\n        >>>             dict(type='RandomFlip', prob=0.5),\n        >>>         ]),\n        >>>     dict(type='PackDetInputs')]\n    \"\"\"\n\n    def __init__(self, transforms: List[Union[dict, Callable]] = []) -> None:\n        self.transforms = Compose(transforms)\n\n    def transform(self, results: dict) -> dict:\n        \"\"\"Apply wrapped transform functions to process both `gt_bboxes` and\n        `proposals`.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        assert results.get('proposals', None) is not None, \\\n            '`proposals` should be in the results, please delete ' \\\n            '`ProposalBroadcaster` in your configs, or check whether ' \\\n            'you have load proposals successfully.'\n\n        inputs = self._process_input(results)\n        outputs = self._apply_transforms(inputs)\n        outputs = self._process_output(outputs)\n        return outputs\n\n    def _process_input(self, data: dict) -> list:\n        \"\"\"Scatter the broadcasting targets to a list of inputs of the wrapped\n        transforms.\n\n        Args:\n            data (dict): The original input data.\n\n        Returns:\n            list[dict]: A list of input data.\n        \"\"\"\n        cp_data = copy.deepcopy(data)\n        cp_data['gt_bboxes'] = cp_data['proposals']\n        scatters = [data, cp_data]\n        return scatters\n\n    def _apply_transforms(self, inputs: list) -> list:\n        \"\"\"Apply ``self.transforms``.\n\n        Args:\n            inputs (list[dict, dict]): list of input data.\n\n        Returns:\n            list[dict]: The output of the wrapped pipeline.\n        \"\"\"\n        assert len(inputs) == 2\n        ctx = cache_random_params\n        with ctx(self.transforms):\n            output_scatters = [self.transforms(_input) for _input in inputs]\n        return output_scatters\n\n    def _process_output(self, output_scatters: list) -> dict:\n        \"\"\"Gathering and renaming data items.\n\n        Args:\n            output_scatters (list[dict, dict]): The output of the wrapped\n                pipeline.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        assert isinstance(output_scatters, list) and \\\n               isinstance(output_scatters[0], dict) and \\\n               len(output_scatters) == 2\n        outputs = output_scatters[0]\n        outputs['proposals'] = output_scatters[1]['gt_bboxes']\n        return outputs\n"
  },
  {
    "path": "mmdet/datasets/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nfrom mmcv.transforms import LoadImageFromFile\n\nfrom mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations\nfrom mmdet.registry import TRANSFORMS\n\n\ndef get_loading_pipeline(pipeline):\n    \"\"\"Only keep loading image and annotations related configuration.\n\n    Args:\n        pipeline (list[dict]): Data pipeline configs.\n\n    Returns:\n        list[dict]: The new pipeline list with only keep\n            loading image and annotations related configuration.\n\n    Examples:\n        >>> pipelines = [\n        ...    dict(type='LoadImageFromFile'),\n        ...    dict(type='LoadAnnotations', with_bbox=True),\n        ...    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n        ...    dict(type='RandomFlip', flip_ratio=0.5),\n        ...    dict(type='Normalize', **img_norm_cfg),\n        ...    dict(type='Pad', size_divisor=32),\n        ...    dict(type='DefaultFormatBundle'),\n        ...    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n        ...    ]\n        >>> expected_pipelines = [\n        ...    dict(type='LoadImageFromFile'),\n        ...    dict(type='LoadAnnotations', with_bbox=True)\n        ...    ]\n        >>> assert expected_pipelines ==\\\n        ...        get_loading_pipeline(pipelines)\n    \"\"\"\n    loading_pipeline_cfg = []\n    for cfg in pipeline:\n        obj_cls = TRANSFORMS.get(cfg['type'])\n        # TODO：use more elegant way to distinguish loading modules\n        if obj_cls is not None and obj_cls in (LoadImageFromFile,\n                                               LoadAnnotations,\n                                               LoadPanopticAnnotations):\n            loading_pipeline_cfg.append(cfg)\n    assert len(loading_pipeline_cfg) == 2, \\\n        'The data pipeline in your config file must include ' \\\n        'loading image and annotations related pipeline.'\n    return loading_pipeline_cfg\n"
  },
  {
    "path": "mmdet/datasets/voc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import DATASETS\nfrom .xml_style import XMLDataset\n\n\n@DATASETS.register_module()\nclass VOCDataset(XMLDataset):\n    \"\"\"Dataset for PASCAL VOC.\"\"\"\n\n    METAINFO = {\n        'classes':\n        ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',\n         'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n         'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),\n        # palette is a list of color tuples, which is used for visualization.\n        'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),\n                    (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),\n                    (153, 69, 1), (120, 166, 157), (0, 182, 199),\n                    (0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60),\n                    (163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100),\n                    (183, 130, 88)]\n    }\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        if 'VOC2007' in self.sub_data_root:\n            self._metainfo['dataset_type'] = 'VOC2007'\n        elif 'VOC2012' in self.sub_data_root:\n            self._metainfo['dataset_type'] = 'VOC2012'\n        else:\n            self._metainfo['dataset_type'] = None\n"
  },
  {
    "path": "mmdet/datasets/wider_face.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nfrom mmengine.fileio import list_from_file\n\nfrom mmdet.registry import DATASETS\nfrom .xml_style import XMLDataset\n\n\n@DATASETS.register_module()\nclass WIDERFaceDataset(XMLDataset):\n    \"\"\"Reader for the WIDER Face dataset in PASCAL VOC format.\n\n    Conversion scripts can be found in\n    https://github.com/sovrasov/wider-face-pascal-voc-annotations\n    \"\"\"\n    METAINFO = {'classes': ('face', ), 'palette': [(0, 255, 0)]}\n\n    def __init__(self, **kwargs):\n        super(WIDERFaceDataset, self).__init__(**kwargs)\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from WIDERFace XML style annotation file.\n\n        Args:\n            ann_file (str): Path of XML file.\n\n        Returns:\n            list[dict]: Annotation info from XML file.\n        \"\"\"\n\n        data_infos = []\n        img_ids = list_from_file(ann_file)\n        for img_id in img_ids:\n            filename = f'{img_id}.jpg'\n            xml_path = osp.join(self.img_prefix, 'Annotations',\n                                f'{img_id}.xml')\n            tree = ET.parse(xml_path)\n            root = tree.getroot()\n            size = root.find('size')\n            width = int(size.find('width').text)\n            height = int(size.find('height').text)\n            folder = root.find('folder').text\n            data_infos.append(\n                dict(\n                    id=img_id,\n                    filename=osp.join(folder, filename),\n                    width=width,\n                    height=height))\n\n        return data_infos\n"
  },
  {
    "path": "mmdet/datasets/xml_style.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\nfrom typing import List, Optional, Union\n\nimport mmcv\nfrom mmengine.fileio import list_from_file\n\nfrom mmdet.registry import DATASETS\nfrom .base_det_dataset import BaseDetDataset\n\n\n@DATASETS.register_module()\nclass XMLDataset(BaseDetDataset):\n    \"\"\"XML dataset for detection.\n\n    Args:\n        img_subdir (str): Subdir where images are stored. Default: JPEGImages.\n        ann_subdir (str): Subdir where annotations are. Default: Annotations.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 img_subdir: str = 'JPEGImages',\n                 ann_subdir: str = 'Annotations',\n                 **kwargs) -> None:\n        self.img_subdir = img_subdir\n        self.ann_subdir = ann_subdir\n        super().__init__(**kwargs)\n\n    @property\n    def sub_data_root(self) -> str:\n        \"\"\"Return the sub data root.\"\"\"\n        return self.data_prefix.get('sub_data_root', '')\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotation from XML style ann_file.\n\n        Returns:\n            list[dict]: Annotation info from XML file.\n        \"\"\"\n        assert self._metainfo.get('classes', None) is not None, \\\n            '`classes` in `XMLDataset` can not be None.'\n        self.cat2label = {\n            cat: i\n            for i, cat in enumerate(self._metainfo['classes'])\n        }\n\n        data_list = []\n        img_ids = list_from_file(\n            self.ann_file, file_client_args=self.file_client_args)\n        for img_id in img_ids:\n            file_name = osp.join(self.img_subdir, f'{img_id}.jpg')\n            xml_path = osp.join(self.sub_data_root, self.ann_subdir,\n                                f'{img_id}.xml')\n\n            raw_img_info = {}\n            raw_img_info['img_id'] = img_id\n            raw_img_info['file_name'] = file_name\n            raw_img_info['xml_path'] = xml_path\n\n            parsed_data_info = self.parse_data_info(raw_img_info)\n            data_list.append(parsed_data_info)\n        return data_list\n\n    @property\n    def bbox_min_size(self) -> Optional[str]:\n        \"\"\"Return the minimum size of bounding boxes in the images.\"\"\"\n        if self.filter_cfg is not None:\n            return self.filter_cfg.get('bbox_min_size', None)\n        else:\n            return None\n\n    def parse_data_info(self, img_info: dict) -> Union[dict, List[dict]]:\n        \"\"\"Parse raw annotation to target format.\n\n        Args:\n            img_info (dict): Raw image information, usually it includes\n                `img_id`, `file_name`, and `xml_path`.\n\n        Returns:\n            Union[dict, List[dict]]: Parsed annotation.\n        \"\"\"\n        data_info = {}\n        img_path = osp.join(self.sub_data_root, img_info['file_name'])\n        data_info['img_path'] = img_path\n        data_info['img_id'] = img_info['img_id']\n        data_info['xml_path'] = img_info['xml_path']\n\n        # deal with xml file\n        with self.file_client.get_local_path(\n                img_info['xml_path']) as local_path:\n            raw_ann_info = ET.parse(local_path)\n        root = raw_ann_info.getroot()\n        size = root.find('size')\n        if size is not None:\n            width = int(size.find('width').text)\n            height = int(size.find('height').text)\n        else:\n            img_bytes = self.file_client.get(img_path)\n            img = mmcv.imfrombytes(img_bytes, backend='cv2')\n            height, width = img.shape[:2]\n            del img, img_bytes\n\n        data_info['height'] = height\n        data_info['width'] = width\n\n        instances = []\n        for obj in raw_ann_info.findall('object'):\n            instance = {}\n            name = obj.find('name').text\n            if name not in self._metainfo['classes']:\n                continue\n            difficult = obj.find('difficult')\n            difficult = 0 if difficult is None else int(difficult.text)\n            bnd_box = obj.find('bndbox')\n            bbox = [\n                int(float(bnd_box.find('xmin').text)) - 1,\n                int(float(bnd_box.find('ymin').text)) - 1,\n                int(float(bnd_box.find('xmax').text)) - 1,\n                int(float(bnd_box.find('ymax').text)) - 1\n            ]\n            ignore = False\n            if self.bbox_min_size is not None:\n                assert not self.test_mode\n                w = bbox[2] - bbox[0]\n                h = bbox[3] - bbox[1]\n                if w < self.bbox_min_size or h < self.bbox_min_size:\n                    ignore = True\n            if difficult or ignore:\n                instance['ignore_flag'] = 1\n            else:\n                instance['ignore_flag'] = 0\n            instance['bbox'] = bbox\n            instance['bbox_label'] = self.cat2label[name]\n            instances.append(instance)\n        data_info['instances'] = instances\n        return data_info\n\n    def filter_data(self) -> List[dict]:\n        \"\"\"Filter annotations according to filter_cfg.\n\n        Returns:\n            List[dict]: Filtered results.\n        \"\"\"\n        if self.test_mode:\n            return self.data_list\n\n        filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False) \\\n            if self.filter_cfg is not None else False\n        min_size = self.filter_cfg.get('min_size', 0) \\\n            if self.filter_cfg is not None else 0\n\n        valid_data_infos = []\n        for i, data_info in enumerate(self.data_list):\n            width = data_info['width']\n            height = data_info['height']\n            if filter_empty_gt and len(data_info['instances']) == 0:\n                continue\n            if min(width, height) >= min_size:\n                valid_data_infos.append(data_info)\n\n        return valid_data_infos\n"
  },
  {
    "path": "mmdet/engine/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .hooks import *  # noqa: F401, F403\nfrom .optimizers import *  # noqa: F401, F403\nfrom .runner import *  # noqa: F401, F403\nfrom .schedulers import *  # noqa: F401, F403\n"
  },
  {
    "path": "mmdet/engine/hooks/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .checkloss_hook import CheckInvalidLossHook\nfrom .mean_teacher_hook import MeanTeacherHook\nfrom .memory_profiler_hook import MemoryProfilerHook\nfrom .num_class_check_hook import NumClassCheckHook\nfrom .pipeline_switch_hook import PipelineSwitchHook\nfrom .set_epoch_info_hook import SetEpochInfoHook\nfrom .sync_norm_hook import SyncNormHook\nfrom .utils import trigger_visualization_hook\nfrom .visualization_hook import DetVisualizationHook\nfrom .yolox_mode_switch_hook import YOLOXModeSwitchHook\n\n__all__ = [\n    'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',\n    'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',\n    'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',\n    'PipelineSwitchHook'\n]\n"
  },
  {
    "path": "mmdet/engine/hooks/checkloss_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nfrom mmengine.hooks import Hook\nfrom mmengine.runner import Runner\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass CheckInvalidLossHook(Hook):\n    \"\"\"Check invalid loss hook.\n\n    This hook will regularly check whether the loss is valid\n    during training.\n\n    Args:\n        interval (int): Checking interval (every k iterations).\n            Default: 50.\n    \"\"\"\n\n    def __init__(self, interval: int = 50) -> None:\n        self.interval = interval\n\n    def after_train_iter(self,\n                         runner: Runner,\n                         batch_idx: int,\n                         data_batch: Optional[dict] = None,\n                         outputs: Optional[dict] = None) -> None:\n        \"\"\"Regularly check whether the loss is valid every n iterations.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the training process.\n            batch_idx (int): The index of the current batch in the train loop.\n            data_batch (dict, Optional): Data from dataloader.\n                Defaults to None.\n            outputs (dict, Optional): Outputs from model. Defaults to None.\n        \"\"\"\n        if self.every_n_train_iters(runner, self.interval):\n            assert torch.isfinite(outputs['loss']), \\\n                runner.logger.info('loss become infinite or NaN!')\n"
  },
  {
    "path": "mmdet/engine/hooks/mean_teacher_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch.nn as nn\nfrom mmengine.hooks import Hook\nfrom mmengine.model import is_model_wrapper\nfrom mmengine.runner import Runner\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass MeanTeacherHook(Hook):\n    \"\"\"Mean Teacher Hook.\n\n    Mean Teacher is an efficient semi-supervised learning method in\n    `Mean Teacher <https://arxiv.org/abs/1703.01780>`_.\n    This method requires two models with exactly the same structure,\n    as the student model and the teacher model, respectively.\n    The student model updates the parameters through gradient descent,\n    and the teacher model updates the parameters through\n    exponential moving average of the student model.\n    Compared with the student model, the teacher model\n    is smoother and accumulates more knowledge.\n\n    Args:\n        momentum (float): The momentum used for updating teacher's parameter.\n            Teacher's parameter are updated with the formula:\n           `teacher = (1-momentum) * teacher + momentum * student`.\n            Defaults to 0.001.\n        interval (int): Update teacher's parameter every interval iteration.\n            Defaults to 1.\n        skip_buffers (bool): Whether to skip the model buffers, such as\n            batchnorm running stats (running_mean, running_var), it does not\n            perform the ema operation. Default to True.\n    \"\"\"\n\n    def __init__(self,\n                 momentum: float = 0.001,\n                 interval: int = 1,\n                 skip_buffer=True) -> None:\n        assert 0 < momentum < 1\n        self.momentum = momentum\n        self.interval = interval\n        self.skip_buffers = skip_buffer\n\n    def before_train(self, runner: Runner) -> None:\n        \"\"\"To check that teacher model and student model exist.\"\"\"\n        model = runner.model\n        if is_model_wrapper(model):\n            model = model.module\n        assert hasattr(model, 'teacher')\n        assert hasattr(model, 'student')\n        # only do it at initial stage\n        if runner.iter == 0:\n            self.momentum_update(model, 1)\n\n    def after_train_iter(self,\n                         runner: Runner,\n                         batch_idx: int,\n                         data_batch: Optional[dict] = None,\n                         outputs: Optional[dict] = None) -> None:\n        \"\"\"Update teacher's parameter every self.interval iterations.\"\"\"\n        if (runner.iter + 1) % self.interval != 0:\n            return\n        model = runner.model\n        if is_model_wrapper(model):\n            model = model.module\n        self.momentum_update(model, self.momentum)\n\n    def momentum_update(self, model: nn.Module, momentum: float) -> None:\n        \"\"\"Compute the moving average of the parameters using exponential\n        moving average.\"\"\"\n        if self.skip_buffers:\n            for (src_name, src_parm), (dst_name, dst_parm) in zip(\n                    model.student.named_parameters(),\n                    model.teacher.named_parameters()):\n                dst_parm.data.mul_(1 - momentum).add_(\n                    src_parm.data, alpha=momentum)\n        else:\n            for (src_parm,\n                 dst_parm) in zip(model.student.state_dict().values(),\n                                  model.teacher.state_dict().values()):\n                # exclude num_tracking\n                if dst_parm.dtype.is_floating_point:\n                    dst_parm.data.mul_(1 - momentum).add_(\n                        src_parm.data, alpha=momentum)\n"
  },
  {
    "path": "mmdet/engine/hooks/memory_profiler_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Sequence\n\nfrom mmengine.hooks import Hook\nfrom mmengine.runner import Runner\n\nfrom mmdet.registry import HOOKS\nfrom mmdet.structures import DetDataSample\n\n\n@HOOKS.register_module()\nclass MemoryProfilerHook(Hook):\n    \"\"\"Memory profiler hook recording memory information including virtual\n    memory, swap memory, and the memory of the current process.\n\n    Args:\n        interval (int): Checking interval (every k iterations).\n            Default: 50.\n    \"\"\"\n\n    def __init__(self, interval: int = 50) -> None:\n        try:\n            from psutil import swap_memory, virtual_memory\n            self._swap_memory = swap_memory\n            self._virtual_memory = virtual_memory\n        except ImportError:\n            raise ImportError('psutil is not installed, please install it by: '\n                              'pip install psutil')\n\n        try:\n            from memory_profiler import memory_usage\n            self._memory_usage = memory_usage\n        except ImportError:\n            raise ImportError(\n                'memory_profiler is not installed, please install it by: '\n                'pip install memory_profiler')\n\n        self.interval = interval\n\n    def _record_memory_information(self, runner: Runner) -> None:\n        \"\"\"Regularly record memory information.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the training or evaluation\n                process.\n        \"\"\"\n        # in Byte\n        virtual_memory = self._virtual_memory()\n        swap_memory = self._swap_memory()\n        # in MB\n        process_memory = self._memory_usage()[0]\n        factor = 1024 * 1024\n        runner.logger.info(\n            'Memory information '\n            'available_memory: '\n            f'{round(virtual_memory.available / factor)} MB, '\n            'used_memory: '\n            f'{round(virtual_memory.used / factor)} MB, '\n            f'memory_utilization: {virtual_memory.percent} %, '\n            'available_swap_memory: '\n            f'{round((swap_memory.total - swap_memory.used) / factor)}'\n            ' MB, '\n            f'used_swap_memory: {round(swap_memory.used / factor)} MB, '\n            f'swap_memory_utilization: {swap_memory.percent} %, '\n            'current_process_memory: '\n            f'{round(process_memory)} MB')\n\n    def after_train_iter(self,\n                         runner: Runner,\n                         batch_idx: int,\n                         data_batch: Optional[dict] = None,\n                         outputs: Optional[dict] = None) -> None:\n        \"\"\"Regularly record memory information.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the training process.\n            batch_idx (int): The index of the current batch in the train loop.\n            data_batch (dict, optional): Data from dataloader.\n                Defaults to None.\n            outputs (dict, optional): Outputs from model. Defaults to None.\n        \"\"\"\n        if self.every_n_inner_iters(batch_idx, self.interval):\n            self._record_memory_information(runner)\n\n    def after_val_iter(\n            self,\n            runner: Runner,\n            batch_idx: int,\n            data_batch: Optional[dict] = None,\n            outputs: Optional[Sequence[DetDataSample]] = None) -> None:\n        \"\"\"Regularly record memory information.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the validation process.\n            batch_idx (int): The index of the current batch in the val loop.\n            data_batch (dict, optional): Data from dataloader.\n                Defaults to None.\n            outputs (Sequence[:obj:`DetDataSample`], optional):\n                Outputs from model. Defaults to None.\n        \"\"\"\n        if self.every_n_inner_iters(batch_idx, self.interval):\n            self._record_memory_information(runner)\n\n    def after_test_iter(\n            self,\n            runner: Runner,\n            batch_idx: int,\n            data_batch: Optional[dict] = None,\n            outputs: Optional[Sequence[DetDataSample]] = None) -> None:\n        \"\"\"Regularly record memory information.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the testing process.\n            batch_idx (int): The index of the current batch in the test loop.\n            data_batch (dict, optional): Data from dataloader.\n                Defaults to None.\n            outputs (Sequence[:obj:`DetDataSample`], optional):\n                Outputs from model. Defaults to None.\n        \"\"\"\n        if self.every_n_inner_iters(batch_idx, self.interval):\n            self._record_memory_information(runner)\n"
  },
  {
    "path": "mmdet/engine/hooks/num_class_check_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import VGG\nfrom mmengine.hooks import Hook\nfrom mmengine.runner import Runner\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass NumClassCheckHook(Hook):\n    \"\"\"Check whether the `num_classes` in head matches the length of `classes`\n    in `dataset.metainfo`.\"\"\"\n\n    def _check_head(self, runner: Runner, mode: str) -> None:\n        \"\"\"Check whether the `num_classes` in head matches the length of\n        `classes` in `dataset.metainfo`.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the training or evaluation\n                process.\n        \"\"\"\n        assert mode in ['train', 'val']\n        model = runner.model\n        dataset = runner.train_dataloader.dataset if mode == 'train' else \\\n            runner.val_dataloader.dataset\n        if dataset.metainfo.get('classes', None) is None:\n            runner.logger.warning(\n                f'Please set `classes` '\n                f'in the {dataset.__class__.__name__} `metainfo` and'\n                f'check if it is consistent with the `num_classes` '\n                f'of head')\n        else:\n            classes = dataset.metainfo['classes']\n            assert type(classes) is not str, \\\n                (f'`classes` in {dataset.__class__.__name__}'\n                 f'should be a tuple of str.'\n                 f'Add comma if number of classes is 1 as '\n                 f'classes = ({classes},)')\n            from mmdet.models.roi_heads.mask_heads import FusedSemanticHead\n            for name, module in model.named_modules():\n                if hasattr(module, 'num_classes') and not name.endswith(\n                        'rpn_head') and not isinstance(\n                            module, (VGG, FusedSemanticHead)):\n                    assert module.num_classes == len(classes), \\\n                        (f'The `num_classes` ({module.num_classes}) in '\n                         f'{module.__class__.__name__} of '\n                         f'{model.__class__.__name__} does not matches '\n                         f'the length of `classes` '\n                         f'{len(classes)}) in '\n                         f'{dataset.__class__.__name__}')\n\n    def before_train_epoch(self, runner: Runner) -> None:\n        \"\"\"Check whether the training dataset is compatible with head.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the training or evaluation\n                process.\n        \"\"\"\n        self._check_head(runner, 'train')\n\n    def before_val_epoch(self, runner: Runner) -> None:\n        \"\"\"Check whether the dataset in val epoch is compatible with head.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the training or evaluation\n                process.\n        \"\"\"\n        self._check_head(runner, 'val')\n"
  },
  {
    "path": "mmdet/engine/hooks/pipeline_switch_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.transforms import Compose\nfrom mmengine.hooks import Hook\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass PipelineSwitchHook(Hook):\n    \"\"\"Switch data pipeline at switch_epoch.\n\n    Args:\n        switch_epoch (int): switch pipeline at this epoch.\n        switch_pipeline (list[dict]): the pipeline to switch to.\n    \"\"\"\n\n    def __init__(self, switch_epoch, switch_pipeline):\n        self.switch_epoch = switch_epoch\n        self.switch_pipeline = switch_pipeline\n        self._restart_dataloader = False\n\n    def before_train_epoch(self, runner):\n        \"\"\"switch pipeline.\"\"\"\n        epoch = runner.epoch\n        train_loader = runner.train_dataloader\n        if epoch == self.switch_epoch:\n            runner.logger.info('Switch pipeline now!')\n            # The dataset pipeline cannot be updated when persistent_workers\n            # is True, so we need to force the dataloader's multi-process\n            # restart. This is a very hacky approach.\n            train_loader.dataset.pipeline = Compose(self.switch_pipeline)\n            if hasattr(train_loader, 'persistent_workers'\n                       ) and train_loader.persistent_workers is True:\n                train_loader._DataLoader__initialized = False\n                train_loader._iterator = None\n                self._restart_dataloader = True\n\n        else:\n            # Once the restart is complete, we need to restore\n            # the initialization flag.\n            if self._restart_dataloader:\n                train_loader._DataLoader__initialized = True\n"
  },
  {
    "path": "mmdet/engine/hooks/set_epoch_info_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.hooks import Hook\nfrom mmengine.model.wrappers import is_model_wrapper\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass SetEpochInfoHook(Hook):\n    \"\"\"Set runner's epoch information to the model.\"\"\"\n\n    def before_train_epoch(self, runner):\n        epoch = runner.epoch\n        model = runner.model\n        if is_model_wrapper(model):\n            model = model.module\n        model.set_epoch(epoch)\n"
  },
  {
    "path": "mmdet/engine/hooks/sync_norm_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections import OrderedDict\n\nfrom mmengine.dist import get_dist_info\nfrom mmengine.hooks import Hook\nfrom torch import nn\n\nfrom mmdet.registry import HOOKS\nfrom mmdet.utils import all_reduce_dict\n\n\ndef get_norm_states(module: nn.Module) -> OrderedDict:\n    \"\"\"Get the state_dict of batch norms in the module.\"\"\"\n    async_norm_states = OrderedDict()\n    for name, child in module.named_modules():\n        if isinstance(child, nn.modules.batchnorm._NormBase):\n            for k, v in child.state_dict().items():\n                async_norm_states['.'.join([name, k])] = v\n    return async_norm_states\n\n\n@HOOKS.register_module()\nclass SyncNormHook(Hook):\n    \"\"\"Synchronize Norm states before validation, currently used in YOLOX.\"\"\"\n\n    def before_val_epoch(self, runner):\n        \"\"\"Synchronizing norm.\"\"\"\n        module = runner.model\n        _, world_size = get_dist_info()\n        if world_size == 1:\n            return\n        norm_states = get_norm_states(module)\n        if len(norm_states) == 0:\n            return\n        # TODO: use `all_reduce_dict` in mmengine\n        norm_states = all_reduce_dict(norm_states, op='mean')\n        module.load_state_dict(norm_states, strict=False)\n"
  },
  {
    "path": "mmdet/engine/hooks/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\ndef trigger_visualization_hook(cfg, args):\n    default_hooks = cfg.default_hooks\n    if 'visualization' in default_hooks:\n        visualization_hook = default_hooks['visualization']\n        # Turn on visualization\n        visualization_hook['draw'] = True\n        if args.show:\n            visualization_hook['show'] = True\n            visualization_hook['wait_time'] = args.wait_time\n        if args.show_dir:\n            visualization_hook['test_out_dir'] = args.show_dir\n    else:\n        raise RuntimeError(\n            'VisualizationHook must be included in default_hooks.'\n            'refer to usage '\n            '\"visualization=dict(type=\\'VisualizationHook\\')\"')\n\n    return cfg\n"
  },
  {
    "path": "mmdet/engine/hooks/visualization_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport warnings\nfrom typing import Optional, Sequence\n\nimport mmcv\nfrom mmengine.fileio import FileClient\nfrom mmengine.hooks import Hook\nfrom mmengine.runner import Runner\nfrom mmengine.utils import mkdir_or_exist\nfrom mmengine.visualization import Visualizer\n\nfrom mmdet.registry import HOOKS\nfrom mmdet.structures import DetDataSample\n\n\n@HOOKS.register_module()\nclass DetVisualizationHook(Hook):\n    \"\"\"Detection Visualization Hook. Used to visualize validation and testing\n    process prediction results.\n\n    In the testing phase:\n\n    1. If ``show`` is True, it means that only the prediction results are\n        visualized without storing data, so ``vis_backends`` needs to\n        be excluded.\n    2. If ``test_out_dir`` is specified, it means that the prediction results\n        need to be saved to ``test_out_dir``. In order to avoid vis_backends\n        also storing data, so ``vis_backends`` needs to be excluded.\n    3. ``vis_backends`` takes effect if the user does not specify ``show``\n        and `test_out_dir``. You can set ``vis_backends`` to WandbVisBackend or\n        TensorboardVisBackend to store the prediction result in Wandb or\n        Tensorboard.\n\n    Args:\n        draw (bool): whether to draw prediction results. If it is False,\n            it means that no drawing will be done. Defaults to False.\n        interval (int): The interval of visualization. Defaults to 50.\n        score_thr (float): The threshold to visualize the bboxes\n            and masks. Defaults to 0.3.\n        show (bool): Whether to display the drawn image. Default to False.\n        wait_time (float): The interval of show (s). Defaults to 0.\n        test_out_dir (str, optional): directory where painted images\n            will be saved in testing process.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 draw: bool = False,\n                 interval: int = 50,\n                 score_thr: float = 0.3,\n                 show: bool = False,\n                 wait_time: float = 0.,\n                 test_out_dir: Optional[str] = None,\n                 file_client_args: dict = dict(backend='disk')):\n        self._visualizer: Visualizer = Visualizer.get_current_instance()\n        self.interval = interval\n        self.score_thr = score_thr\n        self.show = show\n        if self.show:\n            # No need to think about vis backends.\n            self._visualizer._vis_backends = {}\n            warnings.warn('The show is True, it means that only '\n                          'the prediction results are visualized '\n                          'without storing data, so vis_backends '\n                          'needs to be excluded.')\n\n        self.wait_time = wait_time\n        self.file_client_args = file_client_args.copy()\n        self.file_client = None\n        self.draw = draw\n        self.test_out_dir = test_out_dir\n        self._test_index = 0\n\n    def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict,\n                       outputs: Sequence[DetDataSample]) -> None:\n        \"\"\"Run after every ``self.interval`` validation iterations.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the validation process.\n            batch_idx (int): The index of the current batch in the val loop.\n            data_batch (dict): Data from dataloader.\n            outputs (Sequence[:obj:`DetDataSample`]]): A batch of data samples\n                that contain annotations and predictions.\n        \"\"\"\n        if self.draw is False:\n            return\n\n        if self.file_client is None:\n            self.file_client = FileClient(**self.file_client_args)\n\n        # There is no guarantee that the same batch of images\n        # is visualized for each evaluation.\n        total_curr_iter = runner.iter + batch_idx\n\n        # Visualize only the first data\n        img_path = outputs[0].img_path\n        img_bytes = self.file_client.get(img_path)\n        img = mmcv.imfrombytes(img_bytes, channel_order='rgb')\n\n        if total_curr_iter % self.interval == 0:\n            self._visualizer.add_datasample(\n                osp.basename(img_path) if self.show else 'val_img',\n                img,\n                data_sample=outputs[0],\n                show=self.show,\n                wait_time=self.wait_time,\n                pred_score_thr=self.score_thr,\n                step=total_curr_iter)\n\n    def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict,\n                        outputs: Sequence[DetDataSample]) -> None:\n        \"\"\"Run after every testing iterations.\n\n        Args:\n            runner (:obj:`Runner`): The runner of the testing process.\n            batch_idx (int): The index of the current batch in the val loop.\n            data_batch (dict): Data from dataloader.\n            outputs (Sequence[:obj:`DetDataSample`]): A batch of data samples\n                that contain annotations and predictions.\n        \"\"\"\n        if self.draw is False:\n            return\n\n        if self.test_out_dir is not None:\n            self.test_out_dir = osp.join(runner.work_dir, runner.timestamp,\n                                         self.test_out_dir)\n            mkdir_or_exist(self.test_out_dir)\n\n        if self.file_client is None:\n            self.file_client = FileClient(**self.file_client_args)\n\n        for data_sample in outputs:\n            self._test_index += 1\n\n            img_path = data_sample.img_path\n            img_bytes = self.file_client.get(img_path)\n            img = mmcv.imfrombytes(img_bytes, channel_order='rgb')\n\n            out_file = None\n            if self.test_out_dir is not None:\n                out_file = osp.basename(img_path)\n                out_file = osp.join(self.test_out_dir, out_file)\n\n            self._visualizer.add_datasample(\n                osp.basename(img_path) if self.show else 'test_img',\n                img,\n                data_sample=data_sample,\n                show=self.show,\n                wait_time=self.wait_time,\n                pred_score_thr=self.score_thr,\n                out_file=out_file,\n                step=self._test_index)\n"
  },
  {
    "path": "mmdet/engine/hooks/yolox_mode_switch_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Sequence\n\nfrom mmengine.hooks import Hook\nfrom mmengine.model import is_model_wrapper\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass YOLOXModeSwitchHook(Hook):\n    \"\"\"Switch the mode of YOLOX during training.\n\n    This hook turns off the mosaic and mixup data augmentation and switches\n    to use L1 loss in bbox_head.\n\n    Args:\n        num_last_epochs (int): The number of latter epochs in the end of the\n            training to close the data augmentation and switch to L1 loss.\n            Defaults to 15.\n       skip_type_keys (Sequence[str], optional): Sequence of type string to be\n            skip pipeline. Defaults to ('Mosaic', 'RandomAffine', 'MixUp').\n    \"\"\"\n\n    def __init__(\n        self,\n        num_last_epochs: int = 15,\n        skip_type_keys: Sequence[str] = ('Mosaic', 'RandomAffine', 'MixUp')\n    ) -> None:\n        self.num_last_epochs = num_last_epochs\n        self.skip_type_keys = skip_type_keys\n        self._restart_dataloader = False\n\n    def before_train_epoch(self, runner) -> None:\n        \"\"\"Close mosaic and mixup augmentation and switches to use L1 loss.\"\"\"\n        epoch = runner.epoch\n        train_loader = runner.train_dataloader\n        model = runner.model\n        # TODO: refactor after mmengine using model wrapper\n        if is_model_wrapper(model):\n            model = model.module\n        if (epoch + 1) == runner.max_epochs - self.num_last_epochs:\n            runner.logger.info('No mosaic and mixup aug now!')\n            # The dataset pipeline cannot be updated when persistent_workers\n            # is True, so we need to force the dataloader's multi-process\n            # restart. This is a very hacky approach.\n            train_loader.dataset.update_skip_type_keys(self.skip_type_keys)\n            if hasattr(train_loader, 'persistent_workers'\n                       ) and train_loader.persistent_workers is True:\n                train_loader._DataLoader__initialized = False\n                train_loader._iterator = None\n                self._restart_dataloader = True\n            runner.logger.info('Add additional L1 loss now!')\n            model.bbox_head.use_l1 = True\n        else:\n            # Once the restart is complete, we need to restore\n            # the initialization flag.\n            if self._restart_dataloader:\n                train_loader._DataLoader__initialized = True\n"
  },
  {
    "path": "mmdet/engine/optimizers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .layer_decay_optimizer_constructor import \\\n    LearningRateDecayOptimizerConstructor\n\n__all__ = ['LearningRateDecayOptimizerConstructor']\n"
  },
  {
    "path": "mmdet/engine/optimizers/layer_decay_optimizer_constructor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport json\nfrom typing import List\n\nimport torch.nn as nn\nfrom mmengine.dist import get_dist_info\nfrom mmengine.logging import MMLogger\nfrom mmengine.optim import DefaultOptimWrapperConstructor\n\nfrom mmdet.registry import OPTIM_WRAPPER_CONSTRUCTORS\n\n\ndef get_layer_id_for_convnext(var_name, max_layer_id):\n    \"\"\"Get the layer id to set the different learning rates in ``layer_wise``\n    decay_type.\n\n    Args:\n        var_name (str): The key of the model.\n        max_layer_id (int): Maximum layer id.\n\n    Returns:\n        int: The id number corresponding to different learning rate in\n        ``LearningRateDecayOptimizerConstructor``.\n    \"\"\"\n\n    if var_name in ('backbone.cls_token', 'backbone.mask_token',\n                    'backbone.pos_embed'):\n        return 0\n    elif var_name.startswith('backbone.downsample_layers'):\n        stage_id = int(var_name.split('.')[2])\n        if stage_id == 0:\n            layer_id = 0\n        elif stage_id == 1:\n            layer_id = 2\n        elif stage_id == 2:\n            layer_id = 3\n        elif stage_id == 3:\n            layer_id = max_layer_id\n        return layer_id\n    elif var_name.startswith('backbone.stages'):\n        stage_id = int(var_name.split('.')[2])\n        block_id = int(var_name.split('.')[3])\n        if stage_id == 0:\n            layer_id = 1\n        elif stage_id == 1:\n            layer_id = 2\n        elif stage_id == 2:\n            layer_id = 3 + block_id // 3\n        elif stage_id == 3:\n            layer_id = max_layer_id\n        return layer_id\n    else:\n        return max_layer_id + 1\n\n\ndef get_stage_id_for_convnext(var_name, max_stage_id):\n    \"\"\"Get the stage id to set the different learning rates in ``stage_wise``\n    decay_type.\n\n    Args:\n        var_name (str): The key of the model.\n        max_stage_id (int): Maximum stage id.\n\n    Returns:\n        int: The id number corresponding to different learning rate in\n        ``LearningRateDecayOptimizerConstructor``.\n    \"\"\"\n\n    if var_name in ('backbone.cls_token', 'backbone.mask_token',\n                    'backbone.pos_embed'):\n        return 0\n    elif var_name.startswith('backbone.downsample_layers'):\n        return 0\n    elif var_name.startswith('backbone.stages'):\n        stage_id = int(var_name.split('.')[2])\n        return stage_id + 1\n    else:\n        return max_stage_id - 1\n\n\n@OPTIM_WRAPPER_CONSTRUCTORS.register_module()\nclass LearningRateDecayOptimizerConstructor(DefaultOptimWrapperConstructor):\n    # Different learning rates are set for different layers of backbone.\n    # Note: Currently, this optimizer constructor is built for ConvNeXt.\n\n    def add_params(self, params: List[dict], module: nn.Module,\n                   **kwargs) -> None:\n        \"\"\"Add all parameters of module to the params list.\n\n        The parameters of the given module will be added to the list of param\n        groups, with specific rules defined by paramwise_cfg.\n\n        Args:\n            params (list[dict]): A list of param groups, it will be modified\n                in place.\n            module (nn.Module): The module to be added.\n        \"\"\"\n        logger = MMLogger.get_current_instance()\n\n        parameter_groups = {}\n        logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')\n        num_layers = self.paramwise_cfg.get('num_layers') + 2\n        decay_rate = self.paramwise_cfg.get('decay_rate')\n        decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise')\n        logger.info('Build LearningRateDecayOptimizerConstructor  '\n                    f'{decay_type} {decay_rate} - {num_layers}')\n        weight_decay = self.base_wd\n        for name, param in module.named_parameters():\n            if not param.requires_grad:\n                continue  # frozen weights\n            if len(param.shape) == 1 or name.endswith('.bias') or name in (\n                    'pos_embed', 'cls_token'):\n                group_name = 'no_decay'\n                this_weight_decay = 0.\n            else:\n                group_name = 'decay'\n                this_weight_decay = weight_decay\n            if 'layer_wise' in decay_type:\n                if 'ConvNeXt' in module.backbone.__class__.__name__:\n                    layer_id = get_layer_id_for_convnext(\n                        name, self.paramwise_cfg.get('num_layers'))\n                    logger.info(f'set param {name} as id {layer_id}')\n                else:\n                    raise NotImplementedError()\n            elif decay_type == 'stage_wise':\n                if 'ConvNeXt' in module.backbone.__class__.__name__:\n                    layer_id = get_stage_id_for_convnext(name, num_layers)\n                    logger.info(f'set param {name} as id {layer_id}')\n                else:\n                    raise NotImplementedError()\n            group_name = f'layer_{layer_id}_{group_name}'\n\n            if group_name not in parameter_groups:\n                scale = decay_rate**(num_layers - layer_id - 1)\n\n                parameter_groups[group_name] = {\n                    'weight_decay': this_weight_decay,\n                    'params': [],\n                    'param_names': [],\n                    'lr_scale': scale,\n                    'group_name': group_name,\n                    'lr': scale * self.base_lr,\n                }\n\n            parameter_groups[group_name]['params'].append(param)\n            parameter_groups[group_name]['param_names'].append(name)\n        rank, _ = get_dist_info()\n        if rank == 0:\n            to_display = {}\n            for key in parameter_groups:\n                to_display[key] = {\n                    'param_names': parameter_groups[key]['param_names'],\n                    'lr_scale': parameter_groups[key]['lr_scale'],\n                    'lr': parameter_groups[key]['lr'],\n                    'weight_decay': parameter_groups[key]['weight_decay'],\n                }\n            logger.info(f'Param groups = {json.dumps(to_display, indent=2)}')\n        params.extend(parameter_groups.values())\n"
  },
  {
    "path": "mmdet/engine/runner/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .loops import TeacherStudentValLoop\n\n__all__ = ['TeacherStudentValLoop']\n"
  },
  {
    "path": "mmdet/engine/runner/loops.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nfrom mmengine.model import is_model_wrapper\nfrom mmengine.runner import ValLoop\n\nfrom mmdet.registry import LOOPS\n\n\n@LOOPS.register_module()\nclass TeacherStudentValLoop(ValLoop):\n    \"\"\"Loop for validation of model teacher and student.\"\"\"\n\n    def run(self):\n        \"\"\"Launch validation for model teacher and student.\"\"\"\n        self.runner.call_hook('before_val')\n        self.runner.call_hook('before_val_epoch')\n        self.runner.model.eval()\n\n        model = self.runner.model\n        if is_model_wrapper(model):\n            model = model.module\n        assert hasattr(model, 'teacher')\n        assert hasattr(model, 'student')\n\n        predict_on = model.semi_test_cfg.get('predict_on', None)\n        multi_metrics = dict()\n        for _predict_on in ['teacher', 'student']:\n            model.semi_test_cfg['predict_on'] = _predict_on\n            for idx, data_batch in enumerate(self.dataloader):\n                self.run_iter(idx, data_batch)\n            # compute metrics\n            metrics = self.evaluator.evaluate(len(self.dataloader.dataset))\n            multi_metrics.update(\n                {'/'.join((_predict_on, k)): v\n                 for k, v in metrics.items()})\n        model.semi_test_cfg['predict_on'] = predict_on\n\n        self.runner.call_hook('after_val_epoch', metrics=multi_metrics)\n        self.runner.call_hook('after_val')\n"
  },
  {
    "path": "mmdet/engine/schedulers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .quadratic_warmup import (QuadraticWarmupLR, QuadraticWarmupMomentum,\n                               QuadraticWarmupParamScheduler)\n\n__all__ = [\n    'QuadraticWarmupParamScheduler', 'QuadraticWarmupMomentum',\n    'QuadraticWarmupLR'\n]\n"
  },
  {
    "path": "mmdet/engine/schedulers/quadratic_warmup.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.optim.scheduler.lr_scheduler import LRSchedulerMixin\nfrom mmengine.optim.scheduler.momentum_scheduler import MomentumSchedulerMixin\nfrom mmengine.optim.scheduler.param_scheduler import INF, _ParamScheduler\nfrom torch.optim import Optimizer\n\nfrom mmdet.registry import PARAM_SCHEDULERS\n\n\n@PARAM_SCHEDULERS.register_module()\nclass QuadraticWarmupParamScheduler(_ParamScheduler):\n    r\"\"\"Warm up the parameter value of each parameter group by quadratic\n    formula:\n\n    .. math::\n\n        X_{t} = X_{t-1} + \\frac{2t+1}{{(end-begin)}^{2}} \\times X_{base}\n\n    Args:\n        optimizer (Optimizer): Wrapped optimizer.\n        param_name (str): Name of the parameter to be adjusted, such as\n            ``lr``, ``momentum``.\n        begin (int): Step at which to start updating the parameters.\n            Defaults to 0.\n        end (int): Step at which to stop updating the parameters.\n            Defaults to INF.\n        last_step (int): The index of last step. Used for resume without\n            state dict. Defaults to -1.\n        by_epoch (bool): Whether the scheduled parameters are updated by\n            epochs. Defaults to True.\n        verbose (bool): Whether to print the value for each update.\n            Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 optimizer: Optimizer,\n                 param_name: str,\n                 begin: int = 0,\n                 end: int = INF,\n                 last_step: int = -1,\n                 by_epoch: bool = True,\n                 verbose: bool = False):\n        if end >= INF:\n            raise ValueError('``end`` must be less than infinity,'\n                             'Please set ``end`` parameter of '\n                             '``QuadraticWarmupScheduler`` as the '\n                             'number of warmup end.')\n        self.total_iters = end - begin\n        super().__init__(\n            optimizer=optimizer,\n            param_name=param_name,\n            begin=begin,\n            end=end,\n            last_step=last_step,\n            by_epoch=by_epoch,\n            verbose=verbose)\n\n    @classmethod\n    def build_iter_from_epoch(cls,\n                              *args,\n                              begin=0,\n                              end=INF,\n                              by_epoch=True,\n                              epoch_length=None,\n                              **kwargs):\n        \"\"\"Build an iter-based instance of this scheduler from an epoch-based\n        config.\"\"\"\n        assert by_epoch, 'Only epoch-based kwargs whose `by_epoch=True` can ' \\\n                         'be converted to iter-based.'\n        assert epoch_length is not None and epoch_length > 0, \\\n            f'`epoch_length` must be a positive integer, ' \\\n            f'but got {epoch_length}.'\n        by_epoch = False\n        begin = begin * epoch_length\n        if end != INF:\n            end = end * epoch_length\n        return cls(*args, begin=begin, end=end, by_epoch=by_epoch, **kwargs)\n\n    def _get_value(self):\n        \"\"\"Compute value using chainable form of the scheduler.\"\"\"\n        if self.last_step == 0:\n            return [\n                base_value * (2 * self.last_step + 1) / self.total_iters**2\n                for base_value in self.base_values\n            ]\n\n        return [\n            group[self.param_name] + base_value *\n            (2 * self.last_step + 1) / self.total_iters**2\n            for base_value, group in zip(self.base_values,\n                                         self.optimizer.param_groups)\n        ]\n\n\n@PARAM_SCHEDULERS.register_module()\nclass QuadraticWarmupLR(LRSchedulerMixin, QuadraticWarmupParamScheduler):\n    \"\"\"Warm up the learning rate of each parameter group by quadratic formula.\n\n    Args:\n        optimizer (Optimizer): Wrapped optimizer.\n        begin (int): Step at which to start updating the parameters.\n            Defaults to 0.\n        end (int): Step at which to stop updating the parameters.\n            Defaults to INF.\n        last_step (int): The index of last step. Used for resume without\n            state dict. Defaults to -1.\n        by_epoch (bool): Whether the scheduled parameters are updated by\n            epochs. Defaults to True.\n        verbose (bool): Whether to print the value for each update.\n            Defaults to False.\n    \"\"\"\n\n\n@PARAM_SCHEDULERS.register_module()\nclass QuadraticWarmupMomentum(MomentumSchedulerMixin,\n                              QuadraticWarmupParamScheduler):\n    \"\"\"Warm up the momentum value of each parameter group by quadratic formula.\n\n    Args:\n        optimizer (Optimizer): Wrapped optimizer.\n        begin (int): Step at which to start updating the parameters.\n            Defaults to 0.\n        end (int): Step at which to stop updating the parameters.\n            Defaults to INF.\n        last_step (int): The index of last step. Used for resume without\n            state dict. Defaults to -1.\n        by_epoch (bool): Whether the scheduled parameters are updated by\n            epochs. Defaults to True.\n        verbose (bool): Whether to print the value for each update.\n            Defaults to False.\n    \"\"\"\n"
  },
  {
    "path": "mmdet/evaluation/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .functional import *  # noqa: F401,F403\nfrom .metrics import *  # noqa: F401,F403\n"
  },
  {
    "path": "mmdet/evaluation/functional/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bbox_overlaps import bbox_overlaps\nfrom .class_names import (cityscapes_classes, coco_classes,\n                          coco_panoptic_classes, dataset_aliases, get_classes,\n                          imagenet_det_classes, imagenet_vid_classes,\n                          objects365v1_classes, objects365v2_classes,\n                          oid_challenge_classes, oid_v6_classes, voc_classes)\nfrom .mean_ap import average_precision, eval_map, print_map_summary\nfrom .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,\n                             pq_compute_single_core)\nfrom .recall import (eval_recalls, plot_iou_recall, plot_num_recall,\n                     print_recall_summary)\n\n__all__ = [\n    'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',\n    'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',\n    'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',\n    'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',\n    'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',\n    'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',\n    'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes'\n]\n"
  },
  {
    "path": "mmdet/evaluation/functional/bbox_overlaps.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\n\ndef bbox_overlaps(bboxes1,\n                  bboxes2,\n                  mode='iou',\n                  eps=1e-6,\n                  use_legacy_coordinate=False):\n    \"\"\"Calculate the ious between each bbox of bboxes1 and bboxes2.\n\n    Args:\n        bboxes1 (ndarray): Shape (n, 4)\n        bboxes2 (ndarray): Shape (k, 4)\n        mode (str): IOU (intersection over union) or IOF (intersection\n            over foreground)\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Note when function is used in `VOCDataset`, it should be\n            True to align with the official implementation\n            `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar`\n            Default: False.\n\n    Returns:\n        ious (ndarray): Shape (n, k)\n    \"\"\"\n\n    assert mode in ['iou', 'iof']\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n    bboxes1 = bboxes1.astype(np.float32)\n    bboxes2 = bboxes2.astype(np.float32)\n    rows = bboxes1.shape[0]\n    cols = bboxes2.shape[0]\n    ious = np.zeros((rows, cols), dtype=np.float32)\n    if rows * cols == 0:\n        return ious\n    exchange = False\n    if bboxes1.shape[0] > bboxes2.shape[0]:\n        bboxes1, bboxes2 = bboxes2, bboxes1\n        ious = np.zeros((cols, rows), dtype=np.float32)\n        exchange = True\n    area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * (\n        bboxes1[:, 3] - bboxes1[:, 1] + extra_length)\n    area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * (\n        bboxes2[:, 3] - bboxes2[:, 1] + extra_length)\n    for i in range(bboxes1.shape[0]):\n        x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n        y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n        x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n        y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n        overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum(\n            y_end - y_start + extra_length, 0)\n        if mode == 'iou':\n            union = area1[i] + area2 - overlap\n        else:\n            union = area1[i] if not exchange else area2\n        union = np.maximum(union, eps)\n        ious[i, :] = overlap / union\n    if exchange:\n        ious = ious.T\n    return ious\n"
  },
  {
    "path": "mmdet/evaluation/functional/class_names.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.utils import is_str\n\n\ndef wider_face_classes() -> list:\n    \"\"\"Class names of WIDERFace.\"\"\"\n    return ['face']\n\n\ndef voc_classes() -> list:\n    \"\"\"Class names of PASCAL VOC.\"\"\"\n    return [\n        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',\n        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'\n    ]\n\n\ndef imagenet_det_classes() -> list:\n    \"\"\"Class names of ImageNet Det.\"\"\"\n    return [\n        'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',\n        'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',\n        'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',\n        'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',\n        'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',\n        'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',\n        'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',\n        'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',\n        'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',\n        'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',\n        'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',\n        'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',\n        'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',\n        'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',\n        'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',\n        'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',\n        'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',\n        'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',\n        'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',\n        'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',\n        'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',\n        'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',\n        'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',\n        'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',\n        'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',\n        'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',\n        'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',\n        'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',\n        'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',\n        'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',\n        'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',\n        'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',\n        'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',\n        'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',\n        'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',\n        'whale', 'wine_bottle', 'zebra'\n    ]\n\n\ndef imagenet_vid_classes() -> list:\n    \"\"\"Class names of ImageNet VID.\"\"\"\n    return [\n        'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',\n        'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',\n        'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',\n        'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',\n        'watercraft', 'whale', 'zebra'\n    ]\n\n\ndef coco_classes() -> list:\n    \"\"\"Class names of COCO.\"\"\"\n    return [\n        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n        'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',\n        'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n        'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',\n        'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',\n        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n        'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',\n        'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',\n        'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',\n        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n        'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'\n    ]\n\n\ndef coco_panoptic_classes() -> list:\n    \"\"\"Class names of COCO panoptic.\"\"\"\n    return [\n        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n        'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n        'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n        'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n        'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n        'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n        'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n        'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n        'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',\n        'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',\n        'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',\n        'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',\n        'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',\n        'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',\n        'wall-wood', 'water-other', 'window-blind', 'window-other',\n        'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',\n        'cabinet-merged', 'table-merged', 'floor-other-merged',\n        'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',\n        'paper-merged', 'food-other-merged', 'building-other-merged',\n        'rock-merged', 'wall-other-merged', 'rug-merged'\n    ]\n\n\ndef cityscapes_classes() -> list:\n    \"\"\"Class names of Cityscapes.\"\"\"\n    return [\n        'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',\n        'bicycle'\n    ]\n\n\ndef oid_challenge_classes() -> list:\n    \"\"\"Class names of Open Images Challenge.\"\"\"\n    return [\n        'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle',\n        'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl',\n        'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert',\n        'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee',\n        'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink',\n        'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table',\n        'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light',\n        'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum',\n        'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat',\n        'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt',\n        'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear',\n        'Vehicle registration plate', 'Microphone', 'Musical keyboard',\n        'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable',\n        'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries',\n        'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane',\n        'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail',\n        'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle',\n        'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat',\n        'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame',\n        'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet',\n        'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag',\n        'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree',\n        'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine',\n        'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance',\n        'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard',\n        'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf',\n        'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch',\n        'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster',\n        'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal',\n        'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer',\n        'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer',\n        'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace',\n        'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry',\n        'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot',\n        'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite',\n        'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper',\n        'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft',\n        'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter',\n        'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra',\n        'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard',\n        'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building',\n        'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll',\n        'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon',\n        'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock',\n        'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance',\n        'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair',\n        'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat',\n        'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen',\n        'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust',\n        'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot',\n        'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken',\n        'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod',\n        'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet',\n        'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture',\n        'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat',\n        'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep',\n        'Tablet computer', 'Pillow', 'Kitchen & dining room table',\n        'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree',\n        'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread',\n        'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope',\n        'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber',\n        'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies',\n        'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch',\n        'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags',\n        'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock',\n        'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza',\n        'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store',\n        'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry',\n        'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase',\n        'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft',\n        'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer',\n        'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon',\n        'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger',\n        'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball',\n        'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin',\n        'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle',\n        'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot',\n        'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle',\n        'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman',\n        'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper',\n        'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone',\n        'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear',\n        'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail',\n        'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn',\n        'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango',\n        'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell',\n        'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase',\n        'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup',\n        'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula',\n        'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon'\n    ]\n\n\ndef oid_v6_classes() -> list:\n    \"\"\"Class names of Open Images V6.\"\"\"\n    return [\n        'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football',\n        'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy',\n        'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye',\n        'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard',\n        'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber',\n        'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick',\n        'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle',\n        'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot',\n        'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy',\n        'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt',\n        'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear',\n        'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot',\n        'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee',\n        'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw',\n        'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern',\n        'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace',\n        'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer',\n        'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock',\n        'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft',\n        'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile',\n        'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel',\n        'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola',\n        'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building',\n        'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor',\n        'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment',\n        'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini',\n        'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur',\n        'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula',\n        'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser',\n        'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero',\n        'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener',\n        'Goggles', 'Human body', 'Roller skates', 'Coffee cup',\n        'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign',\n        'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker',\n        'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food',\n        'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove',\n        'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax',\n        'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart',\n        'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind',\n        'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light',\n        'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear',\n        'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle',\n        'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat',\n        'Baseball bat', 'Baseball glove', 'Mixing bowl',\n        'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House',\n        'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed',\n        'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer',\n        'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster',\n        'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw',\n        'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate',\n        'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove',\n        'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)',\n        'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet',\n        'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife',\n        'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse',\n        'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard',\n        'Billiard table', 'Mammal', 'Mouse', 'Motorcycle',\n        'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow',\n        'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk',\n        'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom',\n        'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device',\n        'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard',\n        'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball',\n        'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl',\n        'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta',\n        'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer',\n        'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile',\n        'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda',\n        'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood',\n        'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi',\n        'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine',\n        'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table',\n        'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco',\n        'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree',\n        'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray',\n        'Trousers', 'Bowling equipment', 'Football helmet', 'Truck',\n        'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag',\n        'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale',\n        'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion',\n        'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck',\n        'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper',\n        'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog',\n        'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer',\n        'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark',\n        'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser',\n        'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger',\n        'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus',\n        'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull',\n        'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench',\n        'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange',\n        'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet',\n        'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut',\n        'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera',\n        'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable',\n        'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish',\n        'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple',\n        'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower',\n        'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug',\n        'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow',\n        'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone',\n        'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray',\n        'Kitchen & dining room table', 'Dog bed', 'Cake stand',\n        'Cat furniture', 'Bathroom accessory', 'Facial tissue holder',\n        'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler',\n        'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry',\n        'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily',\n        'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant',\n        'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon',\n        'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich',\n        'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod',\n        'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume',\n        'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair',\n        'Rugby ball', 'Armadillo', 'Maracas', 'Helmet'\n    ]\n\n\ndef objects365v1_classes() -> list:\n    \"\"\"Class names of Objects365 V1.\"\"\"\n    return [\n        'person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle',\n        'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk',\n        'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes',\n        'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv',\n        'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl',\n        'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can',\n        'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket',\n        'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv',\n        'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone',\n        'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle',\n        'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish',\n        'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle',\n        'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle',\n        'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus',\n        'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone',\n        'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon',\n        'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal',\n        'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane',\n        'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote',\n        'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato',\n        'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone',\n        'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine',\n        'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove',\n        'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat',\n        'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun',\n        'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck',\n        'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator',\n        'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies',\n        'cutting/chopping board', 'tennis racket', 'candy',\n        'skating and skiing shoes', 'scissors', 'folder', 'baseball',\n        'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine',\n        'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear',\n        'american football', 'basketball', 'potato', 'paint brush', 'printer',\n        'billiards', 'fire hydrant', 'goose', 'projector', 'sausage',\n        'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball',\n        'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee',\n        'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender',\n        'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango',\n        'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion',\n        'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale',\n        'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple',\n        'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle',\n        'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar',\n        'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD',\n        'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado',\n        'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear',\n        'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn',\n        'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball',\n        'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice',\n        'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel',\n        'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope',\n        'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut',\n        'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly',\n        'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker',\n        'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion',\n        'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill',\n        'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup',\n        'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish',\n        'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun',\n        'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal',\n        'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case',\n        'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey',\n        'durian', 'game board', 'rabbit', 'french horn', 'ambulance',\n        'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon',\n        'chainsaw', 'lobster', 'iron', 'flashlight'\n    ]\n\n\ndef objects365v2_classes() -> list:\n    \"\"\"Class names of Objects365 V2.\"\"\"\n    return [\n        'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp',\n        'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf',\n        'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet',\n        'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower',\n        'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots',\n        'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt',\n        'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker',\n        'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool',\n        'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum',\n        'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar',\n        'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',\n        'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy',\n        'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent',\n        'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner',\n        'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork',\n        'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock', 'Pot',\n        'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger',\n        'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine',\n        'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle',\n        'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane',\n        'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage',\n        'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone',\n        'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane',\n        'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',\n        'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza',\n        'Elephant', 'Skateboard', 'Surfboard', 'Gun',\n        'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot',\n        'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper',\n        'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',\n        'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board',\n        'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder',\n        'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball',\n        'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin',\n        'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards',\n        'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase',\n        'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck',\n        'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket',\n        'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis',\n        'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion',\n        'Green beans', 'Projector', 'Frisbee',\n        'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon',\n        'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon',\n        'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog',\n        'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer',\n        'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple',\n        'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle',\n        'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone',\n        'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion',\n        'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom',\n        'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',\n        'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese',\n        'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue',\n        'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap',\n        'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut',\n        'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak',\n        'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate',\n        'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba',\n        'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly',\n        'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill',\n        'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter',\n        'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target',\n        'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak',\n        'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop',\n        'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle',\n        'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster',\n        'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling',\n        'Table Tennis '\n    ]\n\n\ndataset_aliases = {\n    'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],\n    'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],\n    'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],\n    'coco': ['coco', 'mscoco', 'ms_coco'],\n    'coco_panoptic': ['coco_panoptic', 'panoptic'],\n    'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],\n    'cityscapes': ['cityscapes'],\n    'oid_challenge': ['oid_challenge', 'openimages_challenge'],\n    'oid_v6': ['oid_v6', 'openimages_v6'],\n    'objects365v1': ['objects365v1', 'obj365v1'],\n    'objects365v2': ['objects365v2', 'obj365v2']\n}\n\n\ndef get_classes(dataset) -> list:\n    \"\"\"Get class names of a dataset.\"\"\"\n    alias2name = {}\n    for name, aliases in dataset_aliases.items():\n        for alias in aliases:\n            alias2name[alias] = name\n\n    if is_str(dataset):\n        if dataset in alias2name:\n            labels = eval(alias2name[dataset] + '_classes()')\n        else:\n            raise ValueError(f'Unrecognized dataset: {dataset}')\n    else:\n        raise TypeError(f'dataset must a str, but got {type(dataset)}')\n    return labels\n"
  },
  {
    "path": "mmdet/evaluation/functional/mean_ap.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom mmengine.logging import print_log\nfrom mmengine.utils import is_str\nfrom terminaltables import AsciiTable\n\nfrom .bbox_overlaps import bbox_overlaps\nfrom .class_names import get_classes\n\n\ndef average_precision(recalls, precisions, mode='area'):\n    \"\"\"Calculate average precision (for single or multiple scales).\n\n    Args:\n        recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )\n        precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )\n        mode (str): 'area' or '11points', 'area' means calculating the area\n            under precision-recall curve, '11points' means calculating\n            the average precision of recalls at [0, 0.1, ..., 1]\n\n    Returns:\n        float or ndarray: calculated average precision\n    \"\"\"\n    no_scale = False\n    if recalls.ndim == 1:\n        no_scale = True\n        recalls = recalls[np.newaxis, :]\n        precisions = precisions[np.newaxis, :]\n    assert recalls.shape == precisions.shape and recalls.ndim == 2\n    num_scales = recalls.shape[0]\n    ap = np.zeros(num_scales, dtype=np.float32)\n    if mode == 'area':\n        zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)\n        ones = np.ones((num_scales, 1), dtype=recalls.dtype)\n        mrec = np.hstack((zeros, recalls, ones))\n        mpre = np.hstack((zeros, precisions, zeros))\n        for i in range(mpre.shape[1] - 1, 0, -1):\n            mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])\n        for i in range(num_scales):\n            ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]\n            ap[i] = np.sum(\n                (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n    elif mode == '11points':\n        for i in range(num_scales):\n            for thr in np.arange(0, 1 + 1e-3, 0.1):\n                precs = precisions[i, recalls[i, :] >= thr]\n                prec = precs.max() if precs.size > 0 else 0\n                ap[i] += prec\n        ap /= 11\n    else:\n        raise ValueError(\n            'Unrecognized mode, only \"area\" and \"11points\" are supported')\n    if no_scale:\n        ap = ap[0]\n    return ap\n\n\ndef tpfp_imagenet(det_bboxes,\n                  gt_bboxes,\n                  gt_bboxes_ignore=None,\n                  default_iou_thr=0.5,\n                  area_ranges=None,\n                  use_legacy_coordinate=False,\n                  **kwargs):\n    \"\"\"Check if detected bboxes are true positive or false positive.\n\n    Args:\n        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n            of shape (k, 4). Defaults to None\n        default_iou_thr (float): IoU threshold to be considered as matched for\n            medium and large bboxes (small ones have special rules).\n            Defaults to 0.5.\n        area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,\n            in the format [(min1, max1), (min2, max2), ...]. Defaults to None.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Defaults to False.\n\n    Returns:\n        tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n        each array is (num_scales, m).\n    \"\"\"\n\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    # an indicator of ignored gts\n    gt_ignore_inds = np.concatenate(\n        (np.zeros(gt_bboxes.shape[0],\n                  dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))\n    # stack gt_bboxes and gt_bboxes_ignore for convenience\n    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n    num_dets = det_bboxes.shape[0]\n    num_gts = gt_bboxes.shape[0]\n    if area_ranges is None:\n        area_ranges = [(None, None)]\n    num_scales = len(area_ranges)\n    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp\n    # of a certain scale.\n    tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    if gt_bboxes.shape[0] == 0:\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n        return tp, fp\n    ious = bbox_overlaps(\n        det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate)\n    gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length\n    gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length\n    iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),\n                          default_iou_thr)\n    # sort all detections by scores in descending order\n    sort_inds = np.argsort(-det_bboxes[:, -1])\n    for k, (min_area, max_area) in enumerate(area_ranges):\n        gt_covered = np.zeros(num_gts, dtype=bool)\n        # if no area range is specified, gt_area_ignore is all False\n        if min_area is None:\n            gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n        else:\n            gt_areas = gt_w * gt_h\n            gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n        for i in sort_inds:\n            max_iou = -1\n            matched_gt = -1\n            # find best overlapped available gt\n            for j in range(num_gts):\n                # different from PASCAL VOC: allow finding other gts if the\n                # best overlapped ones are already matched by other det bboxes\n                if gt_covered[j]:\n                    continue\n                elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:\n                    max_iou = ious[i, j]\n                    matched_gt = j\n            # there are 4 cases for a det bbox:\n            # 1. it matches a gt, tp = 1, fp = 0\n            # 2. it matches an ignored gt, tp = 0, fp = 0\n            # 3. it matches no gt and within area range, tp = 0, fp = 1\n            # 4. it matches no gt but is beyond area range, tp = 0, fp = 0\n            if matched_gt >= 0:\n                gt_covered[matched_gt] = 1\n                if not (gt_ignore_inds[matched_gt]\n                        or gt_area_ignore[matched_gt]):\n                    tp[k, i] = 1\n            elif min_area is None:\n                fp[k, i] = 1\n            else:\n                bbox = det_bboxes[i, :4]\n                area = (bbox[2] - bbox[0] + extra_length) * (\n                    bbox[3] - bbox[1] + extra_length)\n                if area >= min_area and area < max_area:\n                    fp[k, i] = 1\n    return tp, fp\n\n\ndef tpfp_default(det_bboxes,\n                 gt_bboxes,\n                 gt_bboxes_ignore=None,\n                 iou_thr=0.5,\n                 area_ranges=None,\n                 use_legacy_coordinate=False,\n                 **kwargs):\n    \"\"\"Check if detected bboxes are true positive or false positive.\n\n    Args:\n        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n            of shape (k, 4). Defaults to None\n        iou_thr (float): IoU threshold to be considered as matched.\n            Defaults to 0.5.\n        area_ranges (list[tuple] | None): Range of bbox areas to be\n            evaluated, in the format [(min1, max1), (min2, max2), ...].\n            Defaults to None.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Defaults to False.\n\n    Returns:\n        tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n        each array is (num_scales, m).\n    \"\"\"\n\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    # an indicator of ignored gts\n    gt_ignore_inds = np.concatenate(\n        (np.zeros(gt_bboxes.shape[0],\n                  dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))\n    # stack gt_bboxes and gt_bboxes_ignore for convenience\n    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n    num_dets = det_bboxes.shape[0]\n    num_gts = gt_bboxes.shape[0]\n    if area_ranges is None:\n        area_ranges = [(None, None)]\n    num_scales = len(area_ranges)\n    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of\n    # a certain scale\n    tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n\n    # if there is no gt bboxes in this image, then all det bboxes\n    # within area range are false positives\n    if gt_bboxes.shape[0] == 0:\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n        return tp, fp\n\n    ious = bbox_overlaps(\n        det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate)\n    # for each det, the max iou with all gts\n    ious_max = ious.max(axis=1)\n    # for each det, which gt overlaps most with it\n    ious_argmax = ious.argmax(axis=1)\n    # sort all dets in descending order by scores\n    sort_inds = np.argsort(-det_bboxes[:, -1])\n    for k, (min_area, max_area) in enumerate(area_ranges):\n        gt_covered = np.zeros(num_gts, dtype=bool)\n        # if no area range is specified, gt_area_ignore is all False\n        if min_area is None:\n            gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n        else:\n            gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * (\n                gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length)\n            gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n        for i in sort_inds:\n            if ious_max[i] >= iou_thr:\n                matched_gt = ious_argmax[i]\n                if not (gt_ignore_inds[matched_gt]\n                        or gt_area_ignore[matched_gt]):\n                    if not gt_covered[matched_gt]:\n                        gt_covered[matched_gt] = True\n                        tp[k, i] = 1\n                    else:\n                        fp[k, i] = 1\n                # otherwise ignore this detected bbox, tp = 0, fp = 0\n            elif min_area is None:\n                fp[k, i] = 1\n            else:\n                bbox = det_bboxes[i, :4]\n                area = (bbox[2] - bbox[0] + extra_length) * (\n                    bbox[3] - bbox[1] + extra_length)\n                if area >= min_area and area < max_area:\n                    fp[k, i] = 1\n    return tp, fp\n\n\ndef tpfp_openimages(det_bboxes,\n                    gt_bboxes,\n                    gt_bboxes_ignore=None,\n                    iou_thr=0.5,\n                    area_ranges=None,\n                    use_legacy_coordinate=False,\n                    gt_bboxes_group_of=None,\n                    use_group_of=True,\n                    ioa_thr=0.5,\n                    **kwargs):\n    \"\"\"Check if detected bboxes are true positive or false positive.\n\n    Args:\n        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n            of shape (k, 4). Defaults to None\n        iou_thr (float): IoU threshold to be considered as matched.\n            Defaults to 0.5.\n        area_ranges (list[tuple] | None): Range of bbox areas to be\n            evaluated, in the format [(min1, max1), (min2, max2), ...].\n            Defaults to None.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Defaults to False.\n        gt_bboxes_group_of (ndarray): GT group_of of this image, of shape\n            (k, 1). Defaults to None\n        use_group_of (bool): Whether to use group of when calculate TP and FP,\n            which only used in OpenImages evaluation. Defaults to True.\n        ioa_thr (float | None): IoA threshold to be considered as matched,\n            which only used in OpenImages evaluation. Defaults to 0.5.\n\n    Returns:\n        tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where\n        (tp, fp) whose elements are 0 and 1. The shape of each array is\n        (num_scales, m). (det_bboxes) whose will filter those are not\n        matched by group of gts when processing Open Images evaluation.\n        The shape is (num_scales, m).\n    \"\"\"\n\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    # an indicator of ignored gts\n    gt_ignore_inds = np.concatenate(\n        (np.zeros(gt_bboxes.shape[0],\n                  dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))\n    # stack gt_bboxes and gt_bboxes_ignore for convenience\n    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n    num_dets = det_bboxes.shape[0]\n    num_gts = gt_bboxes.shape[0]\n    if area_ranges is None:\n        area_ranges = [(None, None)]\n    num_scales = len(area_ranges)\n    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of\n    # a certain scale\n    tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n\n    # if there is no gt bboxes in this image, then all det bboxes\n    # within area range are false positives\n    if gt_bboxes.shape[0] == 0:\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n        return tp, fp, det_bboxes\n\n    if gt_bboxes_group_of is not None and use_group_of:\n        # if handle group-of boxes, divided gt boxes into two parts:\n        # non-group-of and group-of.Then calculate ious and ioas through\n        # non-group-of group-of gts respectively. This only used in\n        # OpenImages evaluation.\n        assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0]\n        non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of]\n        group_gt_bboxes = gt_bboxes[gt_bboxes_group_of]\n        num_gts_group = group_gt_bboxes.shape[0]\n        ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes)\n        ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof')\n    else:\n        # if not consider group-of boxes, only calculate ious through gt boxes\n        ious = bbox_overlaps(\n            det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate)\n        ioas = None\n\n    if ious.shape[1] > 0:\n        # for each det, the max iou with all gts\n        ious_max = ious.max(axis=1)\n        # for each det, which gt overlaps most with it\n        ious_argmax = ious.argmax(axis=1)\n        # sort all dets in descending order by scores\n        sort_inds = np.argsort(-det_bboxes[:, -1])\n        for k, (min_area, max_area) in enumerate(area_ranges):\n            gt_covered = np.zeros(num_gts, dtype=bool)\n            # if no area range is specified, gt_area_ignore is all False\n            if min_area is None:\n                gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n            else:\n                gt_areas = (\n                    gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * (\n                        gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length)\n                gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n            for i in sort_inds:\n                if ious_max[i] >= iou_thr:\n                    matched_gt = ious_argmax[i]\n                    if not (gt_ignore_inds[matched_gt]\n                            or gt_area_ignore[matched_gt]):\n                        if not gt_covered[matched_gt]:\n                            gt_covered[matched_gt] = True\n                            tp[k, i] = 1\n                        else:\n                            fp[k, i] = 1\n                    # otherwise ignore this detected bbox, tp = 0, fp = 0\n                elif min_area is None:\n                    fp[k, i] = 1\n                else:\n                    bbox = det_bboxes[i, :4]\n                    area = (bbox[2] - bbox[0] + extra_length) * (\n                        bbox[3] - bbox[1] + extra_length)\n                    if area >= min_area and area < max_area:\n                        fp[k, i] = 1\n    else:\n        # if there is no no-group-of gt bboxes in this image,\n        # then all det bboxes within area range are false positives.\n        # Only used in OpenImages evaluation.\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n\n    if ioas is None or ioas.shape[1] <= 0:\n        return tp, fp, det_bboxes\n    else:\n        # The evaluation of group-of TP and FP are done in two stages:\n        # 1. All detections are first matched to non group-of boxes; true\n        #    positives are determined.\n        # 2. Detections that are determined as false positives are matched\n        #    against group-of boxes and calculated group-of TP and FP.\n        # Only used in OpenImages evaluation.\n        det_bboxes_group = np.zeros(\n            (num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float)\n        match_group_of = np.zeros((num_scales, num_dets), dtype=bool)\n        tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32)\n        ioas_max = ioas.max(axis=1)\n        # for each det, which gt overlaps most with it\n        ioas_argmax = ioas.argmax(axis=1)\n        # sort all dets in descending order by scores\n        sort_inds = np.argsort(-det_bboxes[:, -1])\n        for k, (min_area, max_area) in enumerate(area_ranges):\n            box_is_covered = tp[k]\n            # if no area range is specified, gt_area_ignore is all False\n            if min_area is None:\n                gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n            else:\n                gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n                    gt_bboxes[:, 3] - gt_bboxes[:, 1])\n                gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n            for i in sort_inds:\n                matched_gt = ioas_argmax[i]\n                if not box_is_covered[i]:\n                    if ioas_max[i] >= ioa_thr:\n                        if not (gt_ignore_inds[matched_gt]\n                                or gt_area_ignore[matched_gt]):\n                            if not tp_group[k, matched_gt]:\n                                tp_group[k, matched_gt] = 1\n                                match_group_of[k, i] = True\n                            else:\n                                match_group_of[k, i] = True\n\n                            if det_bboxes_group[k, matched_gt, -1] < \\\n                                    det_bboxes[i, -1]:\n                                det_bboxes_group[k, matched_gt] = \\\n                                    det_bboxes[i]\n\n        fp_group = (tp_group <= 0).astype(float)\n        tps = []\n        fps = []\n        # concatenate tp, fp, and det-boxes which not matched group of\n        # gt boxes and tp_group, fp_group, and det_bboxes_group which\n        # matched group of boxes respectively.\n        for i in range(num_scales):\n            tps.append(\n                np.concatenate((tp[i][~match_group_of[i]], tp_group[i])))\n            fps.append(\n                np.concatenate((fp[i][~match_group_of[i]], fp_group[i])))\n            det_bboxes = np.concatenate(\n                (det_bboxes[~match_group_of[i]], det_bboxes_group[i]))\n\n        tp = np.vstack(tps)\n        fp = np.vstack(fps)\n        return tp, fp, det_bboxes\n\n\ndef get_cls_results(det_results, annotations, class_id):\n    \"\"\"Get det results and gt information of a certain class.\n\n    Args:\n        det_results (list[list]): Same as `eval_map()`.\n        annotations (list[dict]): Same as `eval_map()`.\n        class_id (int): ID of a specific class.\n\n    Returns:\n        tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes\n    \"\"\"\n    cls_dets = [img_res[class_id] for img_res in det_results]\n    cls_gts = []\n    cls_gts_ignore = []\n    for ann in annotations:\n        gt_inds = ann['labels'] == class_id\n        cls_gts.append(ann['bboxes'][gt_inds, :])\n\n        if ann.get('labels_ignore', None) is not None:\n            ignore_inds = ann['labels_ignore'] == class_id\n            cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])\n        else:\n            cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))\n\n    return cls_dets, cls_gts, cls_gts_ignore\n\n\ndef get_cls_group_ofs(annotations, class_id):\n    \"\"\"Get `gt_group_of` of a certain class, which is used in Open Images.\n\n    Args:\n        annotations (list[dict]): Same as `eval_map()`.\n        class_id (int): ID of a specific class.\n\n    Returns:\n        list[np.ndarray]: `gt_group_of` of a certain class.\n    \"\"\"\n    gt_group_ofs = []\n    for ann in annotations:\n        gt_inds = ann['labels'] == class_id\n        if ann.get('gt_is_group_ofs', None) is not None:\n            gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds])\n        else:\n            gt_group_ofs.append(np.empty((0, 1), dtype=bool))\n\n    return gt_group_ofs\n\n\ndef eval_map(det_results,\n             annotations,\n             scale_ranges=None,\n             iou_thr=0.5,\n             ioa_thr=None,\n             dataset=None,\n             logger=None,\n             tpfp_fn=None,\n             nproc=4,\n             use_legacy_coordinate=False,\n             use_group_of=False,\n             eval_mode='area'):\n    \"\"\"Evaluate mAP of a dataset.\n\n    Args:\n        det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n            The outer list indicates images, and the inner list indicates\n            per-class detected bboxes.\n        annotations (list[dict]): Ground truth annotations where each item of\n            the list indicates an image. Keys of annotations are:\n\n            - `bboxes`: numpy array of shape (n, 4)\n            - `labels`: numpy array of shape (n, )\n            - `bboxes_ignore` (optional): numpy array of shape (k, 4)\n            - `labels_ignore` (optional): numpy array of shape (k, )\n        scale_ranges (list[tuple] | None): Range of scales to be evaluated,\n            in the format [(min1, max1), (min2, max2), ...]. A range of\n            (32, 64) means the area range between (32**2, 64**2).\n            Defaults to None.\n        iou_thr (float): IoU threshold to be considered as matched.\n            Defaults to 0.5.\n        ioa_thr (float | None): IoA threshold to be considered as matched,\n            which only used in OpenImages evaluation. Defaults to None.\n        dataset (list[str] | str | None): Dataset name or dataset classes,\n            there are minor differences in metrics for different datasets, e.g.\n            \"voc\", \"imagenet_det\", etc. Defaults to None.\n        logger (logging.Logger | str | None): The way to print the mAP\n            summary. See `mmengine.logging.print_log()` for details.\n            Defaults to None.\n        tpfp_fn (callable | None): The function used to determine true/\n            false positives. If None, :func:`tpfp_default` is used as default\n            unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this\n            case). If it is given as a function, then this function is used\n            to evaluate tp & fp. Default None.\n        nproc (int): Processes used for computing TP and FP.\n            Defaults to 4.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Defaults to False.\n        use_group_of (bool): Whether to use group of when calculate TP and FP,\n            which only used in OpenImages evaluation. Defaults to False.\n        eval_mode (str): 'area' or '11points', 'area' means calculating the\n            area under precision-recall curve, '11points' means calculating\n            the average precision of recalls at [0, 0.1, ..., 1],\n            PASCAL VOC2007 uses `11points` as default evaluate mode, while\n            others are 'area'. Defaults to 'area'.\n\n    Returns:\n        tuple: (mAP, [dict, dict, ...])\n    \"\"\"\n    assert len(det_results) == len(annotations)\n    assert eval_mode in ['area', '11points'], \\\n        f'Unrecognized {eval_mode} mode, only \"area\" and \"11points\" ' \\\n        'are supported'\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    num_imgs = len(det_results)\n    num_scales = len(scale_ranges) if scale_ranges is not None else 1\n    num_classes = len(det_results[0])  # positive class num\n    area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]\n                   if scale_ranges is not None else None)\n\n    # There is no need to use multi processes to process\n    # when num_imgs = 1 .\n    if num_imgs > 1:\n        assert nproc > 0, 'nproc must be at least one.'\n        nproc = min(nproc, num_imgs)\n        pool = Pool(nproc)\n\n    eval_results = []\n    for i in range(num_classes):\n        # get gt and det bboxes of this class\n        cls_dets, cls_gts, cls_gts_ignore = get_cls_results(\n            det_results, annotations, i)\n        # choose proper function according to datasets to compute tp and fp\n        if tpfp_fn is None:\n            if dataset in ['det', 'vid']:\n                tpfp_fn = tpfp_imagenet\n            elif dataset in ['oid_challenge', 'oid_v6'] \\\n                    or use_group_of is True:\n                tpfp_fn = tpfp_openimages\n            else:\n                tpfp_fn = tpfp_default\n        if not callable(tpfp_fn):\n            raise ValueError(\n                f'tpfp_fn has to be a function or None, but got {tpfp_fn}')\n\n        if num_imgs > 1:\n            # compute tp and fp for each image with multiple processes\n            args = []\n            if use_group_of:\n                # used in Open Images Dataset evaluation\n                gt_group_ofs = get_cls_group_ofs(annotations, i)\n                args.append(gt_group_ofs)\n                args.append([use_group_of for _ in range(num_imgs)])\n            if ioa_thr is not None:\n                args.append([ioa_thr for _ in range(num_imgs)])\n\n            tpfp = pool.starmap(\n                tpfp_fn,\n                zip(cls_dets, cls_gts, cls_gts_ignore,\n                    [iou_thr for _ in range(num_imgs)],\n                    [area_ranges for _ in range(num_imgs)],\n                    [use_legacy_coordinate for _ in range(num_imgs)], *args))\n        else:\n            tpfp = tpfp_fn(\n                cls_dets[0],\n                cls_gts[0],\n                cls_gts_ignore[0],\n                iou_thr,\n                area_ranges,\n                use_legacy_coordinate,\n                gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0]\n                                    if use_group_of else None),\n                use_group_of=use_group_of,\n                ioa_thr=ioa_thr)\n            tpfp = [tpfp]\n\n        if use_group_of:\n            tp, fp, cls_dets = tuple(zip(*tpfp))\n        else:\n            tp, fp = tuple(zip(*tpfp))\n        # calculate gt number of each scale\n        # ignored gts or gts beyond the specific scale are not counted\n        num_gts = np.zeros(num_scales, dtype=int)\n        for j, bbox in enumerate(cls_gts):\n            if area_ranges is None:\n                num_gts[0] += bbox.shape[0]\n            else:\n                gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * (\n                    bbox[:, 3] - bbox[:, 1] + extra_length)\n                for k, (min_area, max_area) in enumerate(area_ranges):\n                    num_gts[k] += np.sum((gt_areas >= min_area)\n                                         & (gt_areas < max_area))\n        # sort all det bboxes by score, also sort tp and fp\n        cls_dets = np.vstack(cls_dets)\n        num_dets = cls_dets.shape[0]\n        sort_inds = np.argsort(-cls_dets[:, -1])\n        tp = np.hstack(tp)[:, sort_inds]\n        fp = np.hstack(fp)[:, sort_inds]\n        # calculate recall and precision with tp and fp\n        tp = np.cumsum(tp, axis=1)\n        fp = np.cumsum(fp, axis=1)\n        eps = np.finfo(np.float32).eps\n        recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)\n        precisions = tp / np.maximum((tp + fp), eps)\n        # calculate AP\n        if scale_ranges is None:\n            recalls = recalls[0, :]\n            precisions = precisions[0, :]\n            num_gts = num_gts.item()\n        ap = average_precision(recalls, precisions, eval_mode)\n        eval_results.append({\n            'num_gts': num_gts,\n            'num_dets': num_dets,\n            'recall': recalls,\n            'precision': precisions,\n            'ap': ap\n        })\n\n    if num_imgs > 1:\n        pool.close()\n\n    if scale_ranges is not None:\n        # shape (num_classes, num_scales)\n        all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])\n        all_num_gts = np.vstack(\n            [cls_result['num_gts'] for cls_result in eval_results])\n        mean_ap = []\n        for i in range(num_scales):\n            if np.any(all_num_gts[:, i] > 0):\n                mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())\n            else:\n                mean_ap.append(0.0)\n    else:\n        aps = []\n        for cls_result in eval_results:\n            if cls_result['num_gts'] > 0:\n                aps.append(cls_result['ap'])\n        mean_ap = np.array(aps).mean().item() if aps else 0.0\n\n    print_map_summary(\n        mean_ap, eval_results, dataset, area_ranges, logger=logger)\n\n    return mean_ap, eval_results\n\n\ndef print_map_summary(mean_ap,\n                      results,\n                      dataset=None,\n                      scale_ranges=None,\n                      logger=None):\n    \"\"\"Print mAP and results of each class.\n\n    A table will be printed to show the gts/dets/recall/AP of each class and\n    the mAP.\n\n    Args:\n        mean_ap (float): Calculated from `eval_map()`.\n        results (list[dict]): Calculated from `eval_map()`.\n        dataset (list[str] | str | None): Dataset name or dataset classes.\n        scale_ranges (list[tuple] | None): Range of scales to be evaluated.\n        logger (logging.Logger | str | None): The way to print the mAP\n            summary. See `mmengine.logging.print_log()` for details.\n            Defaults to None.\n    \"\"\"\n\n    if logger == 'silent':\n        return\n\n    if isinstance(results[0]['ap'], np.ndarray):\n        num_scales = len(results[0]['ap'])\n    else:\n        num_scales = 1\n\n    if scale_ranges is not None:\n        assert len(scale_ranges) == num_scales\n\n    num_classes = len(results)\n\n    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)\n    aps = np.zeros((num_scales, num_classes), dtype=np.float32)\n    num_gts = np.zeros((num_scales, num_classes), dtype=int)\n    for i, cls_result in enumerate(results):\n        if cls_result['recall'].size > 0:\n            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]\n        aps[:, i] = cls_result['ap']\n        num_gts[:, i] = cls_result['num_gts']\n\n    if dataset is None:\n        label_names = [str(i) for i in range(num_classes)]\n    elif is_str(dataset):\n        label_names = get_classes(dataset)\n    else:\n        label_names = dataset\n\n    if not isinstance(mean_ap, list):\n        mean_ap = [mean_ap]\n\n    header = ['class', 'gts', 'dets', 'recall', 'ap']\n    for i in range(num_scales):\n        if scale_ranges is not None:\n            print_log(f'Scale range {scale_ranges[i]}', logger=logger)\n        table_data = [header]\n        for j in range(num_classes):\n            row_data = [\n                label_names[j], num_gts[i, j], results[j]['num_dets'],\n                f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'\n            ]\n            table_data.append(row_data)\n        table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])\n        table = AsciiTable(table_data)\n        table.inner_footing_row_border = True\n        print_log('\\n' + table.table, logger=logger)\n"
  },
  {
    "path": "mmdet/evaluation/functional/panoptic_utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# Copyright (c) 2018, Alexander Kirillov\n# This file supports `file_client` for `panopticapi`,\n# the source code is copied from `panopticapi`,\n# only the way to load the gt images is modified.\nimport multiprocessing\nimport os\n\nimport mmcv\nimport numpy as np\nfrom mmengine.fileio import FileClient\n\n# A custom value to distinguish instance ID and category ID; need to\n# be greater than the number of categories.\n# For a pixel in the panoptic result map:\n#   pan_id = ins_id * INSTANCE_OFFSET + cat_id\nINSTANCE_OFFSET = 1000\n\ntry:\n    from panopticapi.evaluation import OFFSET, VOID, PQStat\n    from panopticapi.utils import rgb2id\nexcept ImportError:\n    PQStat = None\n    rgb2id = None\n    VOID = 0\n    OFFSET = 256 * 256 * 256\n\n\ndef pq_compute_single_core(proc_id,\n                           annotation_set,\n                           gt_folder,\n                           pred_folder,\n                           categories,\n                           file_client=None,\n                           print_log=False):\n    \"\"\"The single core function to evaluate the metric of Panoptic\n    Segmentation.\n\n    Same as the function with the same name in `panopticapi`. Only the function\n    to load the images is changed to use the file client.\n\n    Args:\n        proc_id (int): The id of the mini process.\n        gt_folder (str): The path of the ground truth images.\n        pred_folder (str): The path of the prediction images.\n        categories (str): The categories of the dataset.\n        file_client (object): The file client of the dataset. If None,\n            the backend will be set to `disk`.\n        print_log (bool): Whether to print the log. Defaults to False.\n    \"\"\"\n    if PQStat is None:\n        raise RuntimeError(\n            'panopticapi is not installed, please install it by: '\n            'pip install git+https://github.com/cocodataset/'\n            'panopticapi.git.')\n\n    if file_client is None:\n        file_client_args = dict(backend='disk')\n        file_client = FileClient(**file_client_args)\n\n    pq_stat = PQStat()\n\n    idx = 0\n    for gt_ann, pred_ann in annotation_set:\n        if print_log and idx % 100 == 0:\n            print('Core: {}, {} from {} images processed'.format(\n                proc_id, idx, len(annotation_set)))\n        idx += 1\n        # The gt images can be on the local disk or `ceph`, so we use\n        # file_client here.\n        img_bytes = file_client.get(\n            os.path.join(gt_folder, gt_ann['file_name']))\n        pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb')\n        pan_gt = rgb2id(pan_gt)\n\n        # The predictions can only be on the local dist now.\n        pan_pred = mmcv.imread(\n            os.path.join(pred_folder, pred_ann['file_name']),\n            flag='color',\n            channel_order='rgb')\n        pan_pred = rgb2id(pan_pred)\n\n        gt_segms = {el['id']: el for el in gt_ann['segments_info']}\n        pred_segms = {el['id']: el for el in pred_ann['segments_info']}\n\n        # predicted segments area calculation + prediction sanity checks\n        pred_labels_set = set(el['id'] for el in pred_ann['segments_info'])\n        labels, labels_cnt = np.unique(pan_pred, return_counts=True)\n        for label, label_cnt in zip(labels, labels_cnt):\n            if label not in pred_segms:\n                if label == VOID:\n                    continue\n                raise KeyError(\n                    'In the image with ID {} segment with ID {} is '\n                    'presented in PNG and not presented in JSON.'.format(\n                        gt_ann['image_id'], label))\n            pred_segms[label]['area'] = label_cnt\n            pred_labels_set.remove(label)\n            if pred_segms[label]['category_id'] not in categories:\n                raise KeyError(\n                    'In the image with ID {} segment with ID {} has '\n                    'unknown category_id {}.'.format(\n                        gt_ann['image_id'], label,\n                        pred_segms[label]['category_id']))\n        if len(pred_labels_set) != 0:\n            raise KeyError(\n                'In the image with ID {} the following segment IDs {} '\n                'are presented in JSON and not presented in PNG.'.format(\n                    gt_ann['image_id'], list(pred_labels_set)))\n\n        # confusion matrix calculation\n        pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype(\n            np.uint64)\n        gt_pred_map = {}\n        labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True)\n        for label, intersection in zip(labels, labels_cnt):\n            gt_id = label // OFFSET\n            pred_id = label % OFFSET\n            gt_pred_map[(gt_id, pred_id)] = intersection\n\n        # count all matched pairs\n        gt_matched = set()\n        pred_matched = set()\n        for label_tuple, intersection in gt_pred_map.items():\n            gt_label, pred_label = label_tuple\n            if gt_label not in gt_segms:\n                continue\n            if pred_label not in pred_segms:\n                continue\n            if gt_segms[gt_label]['iscrowd'] == 1:\n                continue\n            if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][\n                    'category_id']:\n                continue\n\n            union = pred_segms[pred_label]['area'] + gt_segms[gt_label][\n                'area'] - intersection - gt_pred_map.get((VOID, pred_label), 0)\n            iou = intersection / union\n            if iou > 0.5:\n                pq_stat[gt_segms[gt_label]['category_id']].tp += 1\n                pq_stat[gt_segms[gt_label]['category_id']].iou += iou\n                gt_matched.add(gt_label)\n                pred_matched.add(pred_label)\n\n        # count false positives\n        crowd_labels_dict = {}\n        for gt_label, gt_info in gt_segms.items():\n            if gt_label in gt_matched:\n                continue\n            # crowd segments are ignored\n            if gt_info['iscrowd'] == 1:\n                crowd_labels_dict[gt_info['category_id']] = gt_label\n                continue\n            pq_stat[gt_info['category_id']].fn += 1\n\n        # count false positives\n        for pred_label, pred_info in pred_segms.items():\n            if pred_label in pred_matched:\n                continue\n            # intersection of the segment with VOID\n            intersection = gt_pred_map.get((VOID, pred_label), 0)\n            # plus intersection with corresponding CROWD region if it exists\n            if pred_info['category_id'] in crowd_labels_dict:\n                intersection += gt_pred_map.get(\n                    (crowd_labels_dict[pred_info['category_id']], pred_label),\n                    0)\n            # predicted segment is ignored if more than half of\n            # the segment correspond to VOID and CROWD regions\n            if intersection / pred_info['area'] > 0.5:\n                continue\n            pq_stat[pred_info['category_id']].fp += 1\n\n    if print_log:\n        print('Core: {}, all {} images processed'.format(\n            proc_id, len(annotation_set)))\n    return pq_stat\n\n\ndef pq_compute_multi_core(matched_annotations_list,\n                          gt_folder,\n                          pred_folder,\n                          categories,\n                          file_client=None,\n                          nproc=32):\n    \"\"\"Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n    Same as the function with the same name in `panopticapi`.\n\n    Args:\n        matched_annotations_list (list): The matched annotation list. Each\n            element is a tuple of annotations of the same image with the\n            format (gt_anns, pred_anns).\n        gt_folder (str): The path of the ground truth images.\n        pred_folder (str): The path of the prediction images.\n        categories (str): The categories of the dataset.\n        file_client (object): The file client of the dataset. If None,\n            the backend will be set to `disk`.\n        nproc (int): Number of processes for panoptic quality computing.\n            Defaults to 32. When `nproc` exceeds the number of cpu cores,\n            the number of cpu cores is used.\n    \"\"\"\n    if PQStat is None:\n        raise RuntimeError(\n            'panopticapi is not installed, please install it by: '\n            'pip install git+https://github.com/cocodataset/'\n            'panopticapi.git.')\n\n    if file_client is None:\n        file_client_args = dict(backend='disk')\n        file_client = FileClient(**file_client_args)\n\n    cpu_num = min(nproc, multiprocessing.cpu_count())\n\n    annotations_split = np.array_split(matched_annotations_list, cpu_num)\n    print('Number of cores: {}, images per core: {}'.format(\n        cpu_num, len(annotations_split[0])))\n    workers = multiprocessing.Pool(processes=cpu_num)\n    processes = []\n    for proc_id, annotation_set in enumerate(annotations_split):\n        p = workers.apply_async(pq_compute_single_core,\n                                (proc_id, annotation_set, gt_folder,\n                                 pred_folder, categories, file_client))\n        processes.append(p)\n\n    # Close the process pool, otherwise it will lead to memory\n    # leaking problems.\n    workers.close()\n    workers.join()\n\n    pq_stat = PQStat()\n    for p in processes:\n        pq_stat += p.get()\n\n    return pq_stat\n"
  },
  {
    "path": "mmdet/evaluation/functional/recall.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Sequence\n\nimport numpy as np\nfrom mmengine.logging import print_log\nfrom terminaltables import AsciiTable\n\nfrom .bbox_overlaps import bbox_overlaps\n\n\ndef _recalls(all_ious, proposal_nums, thrs):\n\n    img_num = all_ious.shape[0]\n    total_gt_num = sum([ious.shape[0] for ious in all_ious])\n\n    _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)\n    for k, proposal_num in enumerate(proposal_nums):\n        tmp_ious = np.zeros(0)\n        for i in range(img_num):\n            ious = all_ious[i][:, :proposal_num].copy()\n            gt_ious = np.zeros((ious.shape[0]))\n            if ious.size == 0:\n                tmp_ious = np.hstack((tmp_ious, gt_ious))\n                continue\n            for j in range(ious.shape[0]):\n                gt_max_overlaps = ious.argmax(axis=1)\n                max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]\n                gt_idx = max_ious.argmax()\n                gt_ious[j] = max_ious[gt_idx]\n                box_idx = gt_max_overlaps[gt_idx]\n                ious[gt_idx, :] = -1\n                ious[:, box_idx] = -1\n            tmp_ious = np.hstack((tmp_ious, gt_ious))\n        _ious[k, :] = tmp_ious\n\n    _ious = np.fliplr(np.sort(_ious, axis=1))\n    recalls = np.zeros((proposal_nums.size, thrs.size))\n    for i, thr in enumerate(thrs):\n        recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)\n\n    return recalls\n\n\ndef set_recall_param(proposal_nums, iou_thrs):\n    \"\"\"Check proposal_nums and iou_thrs and set correct format.\"\"\"\n    if isinstance(proposal_nums, Sequence):\n        _proposal_nums = np.array(proposal_nums)\n    elif isinstance(proposal_nums, int):\n        _proposal_nums = np.array([proposal_nums])\n    else:\n        _proposal_nums = proposal_nums\n\n    if iou_thrs is None:\n        _iou_thrs = np.array([0.5])\n    elif isinstance(iou_thrs, Sequence):\n        _iou_thrs = np.array(iou_thrs)\n    elif isinstance(iou_thrs, float):\n        _iou_thrs = np.array([iou_thrs])\n    else:\n        _iou_thrs = iou_thrs\n\n    return _proposal_nums, _iou_thrs\n\n\ndef eval_recalls(gts,\n                 proposals,\n                 proposal_nums=None,\n                 iou_thrs=0.5,\n                 logger=None,\n                 use_legacy_coordinate=False):\n    \"\"\"Calculate recalls.\n\n    Args:\n        gts (list[ndarray]): a list of arrays of shape (n, 4)\n        proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)\n        proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.\n        iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.\n        logger (logging.Logger | str | None): The way to print the recall\n            summary. See `mmengine.logging.print_log()` for details.\n            Default: None.\n        use_legacy_coordinate (bool): Whether use coordinate system\n            in mmdet v1.x. \"1\" was added to both height and width\n            which means w, h should be\n            computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False.\n\n\n    Returns:\n        ndarray: recalls of different ious and proposal nums\n    \"\"\"\n\n    img_num = len(gts)\n    assert img_num == len(proposals)\n    proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)\n    all_ious = []\n    for i in range(img_num):\n        if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:\n            scores = proposals[i][:, 4]\n            sort_idx = np.argsort(scores)[::-1]\n            img_proposal = proposals[i][sort_idx, :]\n        else:\n            img_proposal = proposals[i]\n        prop_num = min(img_proposal.shape[0], proposal_nums[-1])\n        if gts[i] is None or gts[i].shape[0] == 0:\n            ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)\n        else:\n            ious = bbox_overlaps(\n                gts[i],\n                img_proposal[:prop_num, :4],\n                use_legacy_coordinate=use_legacy_coordinate)\n        all_ious.append(ious)\n    all_ious = np.array(all_ious)\n    recalls = _recalls(all_ious, proposal_nums, iou_thrs)\n\n    print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)\n    return recalls\n\n\ndef print_recall_summary(recalls,\n                         proposal_nums,\n                         iou_thrs,\n                         row_idxs=None,\n                         col_idxs=None,\n                         logger=None):\n    \"\"\"Print recalls in a table.\n\n    Args:\n        recalls (ndarray): calculated from `bbox_recalls`\n        proposal_nums (ndarray or list): top N proposals\n        iou_thrs (ndarray or list): iou thresholds\n        row_idxs (ndarray): which rows(proposal nums) to print\n        col_idxs (ndarray): which cols(iou thresholds) to print\n        logger (logging.Logger | str | None): The way to print the recall\n            summary. See `mmengine.logging.print_log()` for details.\n            Default: None.\n    \"\"\"\n    proposal_nums = np.array(proposal_nums, dtype=np.int32)\n    iou_thrs = np.array(iou_thrs)\n    if row_idxs is None:\n        row_idxs = np.arange(proposal_nums.size)\n    if col_idxs is None:\n        col_idxs = np.arange(iou_thrs.size)\n    row_header = [''] + iou_thrs[col_idxs].tolist()\n    table_data = [row_header]\n    for i, num in enumerate(proposal_nums[row_idxs]):\n        row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]\n        row.insert(0, num)\n        table_data.append(row)\n    table = AsciiTable(table_data)\n    print_log('\\n' + table.table, logger=logger)\n\n\ndef plot_num_recall(recalls, proposal_nums):\n    \"\"\"Plot Proposal_num-Recalls curve.\n\n    Args:\n        recalls(ndarray or list): shape (k,)\n        proposal_nums(ndarray or list): same shape as `recalls`\n    \"\"\"\n    if isinstance(proposal_nums, np.ndarray):\n        _proposal_nums = proposal_nums.tolist()\n    else:\n        _proposal_nums = proposal_nums\n    if isinstance(recalls, np.ndarray):\n        _recalls = recalls.tolist()\n    else:\n        _recalls = recalls\n\n    import matplotlib.pyplot as plt\n    f = plt.figure()\n    plt.plot([0] + _proposal_nums, [0] + _recalls)\n    plt.xlabel('Proposal num')\n    plt.ylabel('Recall')\n    plt.axis([0, proposal_nums.max(), 0, 1])\n    f.show()\n\n\ndef plot_iou_recall(recalls, iou_thrs):\n    \"\"\"Plot IoU-Recalls curve.\n\n    Args:\n        recalls(ndarray or list): shape (k,)\n        iou_thrs(ndarray or list): same shape as `recalls`\n    \"\"\"\n    if isinstance(iou_thrs, np.ndarray):\n        _iou_thrs = iou_thrs.tolist()\n    else:\n        _iou_thrs = iou_thrs\n    if isinstance(recalls, np.ndarray):\n        _recalls = recalls.tolist()\n    else:\n        _recalls = recalls\n\n    import matplotlib.pyplot as plt\n    f = plt.figure()\n    plt.plot(_iou_thrs + [1.0], _recalls + [0.])\n    plt.xlabel('IoU')\n    plt.ylabel('Recall')\n    plt.axis([iou_thrs.min(), 1, 0, 1])\n    f.show()\n"
  },
  {
    "path": "mmdet/evaluation/metrics/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .cityscapes_metric import CityScapesMetric\nfrom .coco_metric import CocoMetric\nfrom .coco_occluded_metric import CocoOccludedSeparatedMetric\nfrom .coco_panoptic_metric import CocoPanopticMetric\nfrom .crowdhuman_metric import CrowdHumanMetric\nfrom .dump_det_results import DumpDetResults\nfrom .dump_proposals_metric import DumpProposals\nfrom .lvis_metric import LVISMetric\nfrom .openimages_metric import OpenImagesMetric\nfrom .voc_metric import VOCMetric\n\n__all__ = [\n    'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',\n    'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',\n    'CocoOccludedSeparatedMetric', 'DumpDetResults'\n]\n"
  },
  {
    "path": "mmdet/evaluation/metrics/cityscapes_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport os.path as osp\nimport shutil\nfrom collections import OrderedDict\nfrom typing import Dict, Optional, Sequence\n\nimport mmcv\nimport numpy as np\nfrom mmengine.dist import is_main_process, master_only\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.logging import MMLogger\n\nfrom mmdet.registry import METRICS\n\ntry:\n    import cityscapesscripts\n    from cityscapesscripts.evaluation import \\\n        evalInstanceLevelSemanticLabeling as CSEval\n    from cityscapesscripts.helpers import labels as CSLabels\nexcept ImportError:\n    cityscapesscripts = None\n    CSLabels = None\n    CSEval = None\n\n\n@METRICS.register_module()\nclass CityScapesMetric(BaseMetric):\n    \"\"\"CityScapes metric for instance segmentation.\n\n    Args:\n        outfile_prefix (str): The prefix of txt and png files. The txt and\n            png file will be save in a directory whose path is\n            \"outfile_prefix.results/\".\n        seg_prefix (str, optional): Path to the directory which contains the\n            cityscapes instance segmentation masks. It's necessary when\n            training and validation. It could be None when infer on test\n            dataset. Defaults to None.\n        format_only (bool): Format the output results without perform\n            evaluation. It is useful when you want to format the result\n            to a specific format and submit it to the test server.\n            Defaults to False.\n        keep_results (bool): Whether to keep the results. When ``format_only``\n            is True, ``keep_results`` must be True. Defaults to False.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n    default_prefix: Optional[str] = 'cityscapes'\n\n    def __init__(self,\n                 outfile_prefix: str,\n                 seg_prefix: Optional[str] = None,\n                 format_only: bool = False,\n                 keep_results: bool = False,\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        if cityscapesscripts is None:\n            raise RuntimeError('Please run \"pip install cityscapesscripts\" to '\n                               'install cityscapesscripts first.')\n\n        assert outfile_prefix, 'outfile_prefix must be not None.'\n\n        if format_only:\n            assert keep_results, 'keep_results must be True when '\n            'format_only is True'\n\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        self.format_only = format_only\n        self.keep_results = keep_results\n        self.seg_out_dir = osp.abspath(f'{outfile_prefix}.results')\n        self.seg_prefix = seg_prefix\n\n        if is_main_process():\n            os.makedirs(self.seg_out_dir, exist_ok=True)\n\n    @master_only\n    def __del__(self) -> None:\n        \"\"\"Clean up.\"\"\"\n        if not self.keep_results:\n            shutil.rmtree(self.seg_out_dir)\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            # parse pred\n            result = dict()\n            pred = data_sample['pred_instances']\n            filename = data_sample['img_path']\n            basename = osp.splitext(osp.basename(filename))[0]\n            pred_txt = osp.join(self.seg_out_dir, basename + '_pred.txt')\n            result['pred_txt'] = pred_txt\n            labels = pred['labels'].cpu().numpy()\n            masks = pred['masks'].cpu().numpy().astype(np.uint8)\n            if 'mask_scores' in pred:\n                # some detectors use different scores for bbox and mask\n                mask_scores = pred['mask_scores'].cpu().numpy()\n            else:\n                mask_scores = pred['scores'].cpu().numpy()\n\n            with open(pred_txt, 'w') as f:\n                for i, (label, mask, mask_score) in enumerate(\n                        zip(labels, masks, mask_scores)):\n                    class_name = self.dataset_meta['classes'][label]\n                    class_id = CSLabels.name2label[class_name].id\n                    png_filename = osp.join(\n                        self.seg_out_dir, basename + f'_{i}_{class_name}.png')\n                    mmcv.imwrite(mask, png_filename)\n                    f.write(f'{osp.basename(png_filename)} '\n                            f'{class_id} {mask_score}\\n')\n\n            # parse gt\n            gt = dict()\n            img_path = filename.replace('leftImg8bit.png',\n                                        'gtFine_instanceIds.png')\n            img_path = img_path.replace('leftImg8bit', 'gtFine')\n            gt['file_name'] = osp.join(self.seg_prefix, img_path)\n\n            self.results.append((gt, result))\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            Dict[str, float]: The computed metrics. The keys are the names of\n                the metrics, and the values are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n\n        if self.format_only:\n            logger.info(\n                f'results are saved to {osp.dirname(self.seg_out_dir)}')\n            return OrderedDict()\n        logger.info('starts to compute metric')\n\n        gts, preds = zip(*results)\n        # set global states in cityscapes evaluation API\n        CSEval.args.cityscapesPath = osp.join(self.seg_prefix, '../..')\n        CSEval.args.predictionPath = self.seg_out_dir\n        CSEval.args.predictionWalk = None\n        CSEval.args.JSONOutput = False\n        CSEval.args.colorized = False\n        CSEval.args.gtInstancesFile = osp.join(self.seg_out_dir,\n                                               'gtInstances.json')\n\n        groundTruthImgList = [gt['file_name'] for gt in gts]\n        predictionImgList = [pred['pred_txt'] for pred in preds]\n        CSEval_results = CSEval.evaluateImgLists(predictionImgList,\n                                                 groundTruthImgList,\n                                                 CSEval.args)['averages']\n        eval_results = OrderedDict()\n        eval_results['mAP'] = CSEval_results['allAp']\n        eval_results['AP@50'] = CSEval_results['allAp50%']\n\n        return eval_results\n"
  },
  {
    "path": "mmdet/evaluation/metrics/coco_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport datetime\nimport itertools\nimport os.path as osp\nimport tempfile\nfrom collections import OrderedDict\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.fileio import FileClient, dump, load\nfrom mmengine.logging import MMLogger\nfrom terminaltables import AsciiTable\n\nfrom mmdet.datasets.api_wrappers import COCO, COCOeval\nfrom mmdet.registry import METRICS\nfrom mmdet.structures.mask import encode_mask_results\nfrom ..functional import eval_recalls\n\n\n@METRICS.register_module()\nclass CocoMetric(BaseMetric):\n    \"\"\"COCO evaluation metric.\n\n    Evaluate AR, AP, and mAP for detection tasks including proposal/box\n    detection and instance segmentation. Please refer to\n    https://cocodataset.org/#detection-eval for more details.\n\n    Args:\n        ann_file (str, optional): Path to the coco format annotation file.\n            If not specified, ground truth annotations from the dataset will\n            be converted to coco format. Defaults to None.\n        metric (str | List[str]): Metrics to be evaluated. Valid metrics\n            include 'bbox', 'segm', 'proposal', and 'proposal_fast'.\n            Defaults to 'bbox'.\n        classwise (bool): Whether to evaluate the metric class-wise.\n            Defaults to False.\n        proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.\n            Defaults to (100, 300, 1000).\n        iou_thrs (float | List[float], optional): IoU threshold to compute AP\n            and AR. If not specified, IoUs from 0.5 to 0.95 will be used.\n            Defaults to None.\n        metric_items (List[str], optional): Metric result names to be\n            recorded in the evaluation result. Defaults to None.\n        format_only (bool): Format the output results without perform\n            evaluation. It is useful when you want to format the result\n            to a specific format and submit it to the test server.\n            Defaults to False.\n        outfile_prefix (str, optional): The prefix of json files. It includes\n            the file path and the prefix of filename, e.g., \"a/b/prefix\".\n            If not specified, a temp file will be created. Defaults to None.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n        sort_categories (bool): Whether sort categories in annotations. Only\n            used for `Objects365V1Dataset`. Defaults to False.\n    \"\"\"\n    default_prefix: Optional[str] = 'coco'\n\n    def __init__(self,\n                 ann_file: Optional[str] = None,\n                 metric: Union[str, List[str]] = 'bbox',\n                 classwise: bool = False,\n                 proposal_nums: Sequence[int] = (100, 300, 1000),\n                 iou_thrs: Optional[Union[float, Sequence[float]]] = None,\n                 metric_items: Optional[Sequence[str]] = None,\n                 format_only: bool = False,\n                 outfile_prefix: Optional[str] = None,\n                 file_client_args: dict = dict(backend='disk'),\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None,\n                 sort_categories: bool = False) -> None:\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        # coco evaluation metrics\n        self.metrics = metric if isinstance(metric, list) else [metric]\n        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n        for metric in self.metrics:\n            if metric not in allowed_metrics:\n                raise KeyError(\n                    \"metric should be one of 'bbox', 'segm', 'proposal', \"\n                    f\"'proposal_fast', but got {metric}.\")\n\n        # do class wise evaluation, default False\n        self.classwise = classwise\n\n        # proposal_nums used to compute recall or precision.\n        self.proposal_nums = list(proposal_nums)\n\n        # iou_thrs used to compute recall or precision.\n        if iou_thrs is None:\n            iou_thrs = np.linspace(\n                .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n        self.iou_thrs = iou_thrs\n        self.metric_items = metric_items\n        self.format_only = format_only\n        if self.format_only:\n            assert outfile_prefix is not None, 'outfile_prefix must be not'\n            'None when format_only is True, otherwise the result files will'\n            'be saved to a temp directory which will be cleaned up at the end.'\n\n        self.outfile_prefix = outfile_prefix\n\n        self.file_client_args = file_client_args\n        self.file_client = FileClient(**file_client_args)\n\n        # if ann_file is not specified,\n        # initialize coco api with the converted dataset\n        if ann_file is not None:\n            with self.file_client.get_local_path(ann_file) as local_path:\n                self._coco_api = COCO(local_path)\n                if sort_categories:\n                    # 'categories' list in objects365_train.json and\n                    # objects365_val.json is inconsistent, need sort\n                    # list(or dict) before get cat_ids.\n                    cats = self._coco_api.cats\n                    sorted_cats = {i: cats[i] for i in sorted(cats)}\n                    self._coco_api.cats = sorted_cats\n                    categories = self._coco_api.dataset['categories']\n                    sorted_categories = sorted(\n                        categories, key=lambda i: i['id'])\n                    self._coco_api.dataset['categories'] = sorted_categories\n        else:\n            self._coco_api = None\n\n        # handle dataset lazy init\n        self.cat_ids = None\n        self.img_ids = None\n\n    def fast_eval_recall(self,\n                         results: List[dict],\n                         proposal_nums: Sequence[int],\n                         iou_thrs: Sequence[float],\n                         logger: Optional[MMLogger] = None) -> np.ndarray:\n        \"\"\"Evaluate proposal recall with COCO's fast_eval_recall.\n\n        Args:\n            results (List[dict]): Results of the dataset.\n            proposal_nums (Sequence[int]): Proposal numbers used for\n                evaluation.\n            iou_thrs (Sequence[float]): IoU thresholds used for evaluation.\n            logger (MMLogger, optional): Logger used for logging the recall\n                summary.\n        Returns:\n            np.ndarray: Averaged recall results.\n        \"\"\"\n        gt_bboxes = []\n        pred_bboxes = [result['bboxes'] for result in results]\n        for i in range(len(self.img_ids)):\n            ann_ids = self._coco_api.get_ann_ids(img_ids=self.img_ids[i])\n            ann_info = self._coco_api.load_anns(ann_ids)\n            if len(ann_info) == 0:\n                gt_bboxes.append(np.zeros((0, 4)))\n                continue\n            bboxes = []\n            for ann in ann_info:\n                if ann.get('ignore', False) or ann['iscrowd']:\n                    continue\n                x1, y1, w, h = ann['bbox']\n                bboxes.append([x1, y1, x1 + w, y1 + h])\n            bboxes = np.array(bboxes, dtype=np.float32)\n            if bboxes.shape[0] == 0:\n                bboxes = np.zeros((0, 4))\n            gt_bboxes.append(bboxes)\n\n        recalls = eval_recalls(\n            gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)\n        ar = recalls.mean(axis=1)\n        return ar\n\n    def xyxy2xywh(self, bbox: np.ndarray) -> list:\n        \"\"\"Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n        evaluation.\n\n        Args:\n            bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n                ``xyxy`` order.\n\n        Returns:\n            list[float]: The converted bounding boxes, in ``xywh`` order.\n        \"\"\"\n\n        _bbox: List = bbox.tolist()\n        return [\n            _bbox[0],\n            _bbox[1],\n            _bbox[2] - _bbox[0],\n            _bbox[3] - _bbox[1],\n        ]\n\n    def results2json(self, results: Sequence[dict],\n                     outfile_prefix: str) -> dict:\n        \"\"\"Dump the detection results to a COCO style json file.\n\n        There are 3 types of results: proposals, bbox predictions, mask\n        predictions, and they have different data types. This method will\n        automatically recognize the type, and dump them to json files.\n\n        Args:\n            results (Sequence[dict]): Testing results of the\n                dataset.\n            outfile_prefix (str): The filename prefix of the json files. If the\n                prefix is \"somepath/xxx\", the json files will be named\n                \"somepath/xxx.bbox.json\", \"somepath/xxx.segm.json\",\n                \"somepath/xxx.proposal.json\".\n\n        Returns:\n            dict: Possible keys are \"bbox\", \"segm\", \"proposal\", and\n            values are corresponding filenames.\n        \"\"\"\n        bbox_json_results = []\n        segm_json_results = [] if 'masks' in results[0] else None\n        for idx, result in enumerate(results):\n            image_id = result.get('img_id', idx)\n            labels = result['labels']\n            bboxes = result['bboxes']\n            scores = result['scores']\n            # bbox results\n            for i, label in enumerate(labels):\n                data = dict()\n                data['image_id'] = image_id\n                data['bbox'] = self.xyxy2xywh(bboxes[i])\n                data['score'] = float(scores[i])\n                data['category_id'] = self.cat_ids[label]\n                bbox_json_results.append(data)\n\n            if segm_json_results is None:\n                continue\n\n            # segm results\n            masks = result['masks']\n            mask_scores = result.get('mask_scores', scores)\n            for i, label in enumerate(labels):\n                data = dict()\n                data['image_id'] = image_id\n                data['bbox'] = self.xyxy2xywh(bboxes[i])\n                data['score'] = float(mask_scores[i])\n                data['category_id'] = self.cat_ids[label]\n                if isinstance(masks[i]['counts'], bytes):\n                    masks[i]['counts'] = masks[i]['counts'].decode()\n                data['segmentation'] = masks[i]\n                segm_json_results.append(data)\n\n        result_files = dict()\n        result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n        result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n        dump(bbox_json_results, result_files['bbox'])\n\n        if segm_json_results is not None:\n            result_files['segm'] = f'{outfile_prefix}.segm.json'\n            dump(segm_json_results, result_files['segm'])\n\n        return result_files\n\n    def gt_to_coco_json(self, gt_dicts: Sequence[dict],\n                        outfile_prefix: str) -> str:\n        \"\"\"Convert ground truth to coco format json file.\n\n        Args:\n            gt_dicts (Sequence[dict]): Ground truth of the dataset.\n            outfile_prefix (str): The filename prefix of the json files. If the\n                prefix is \"somepath/xxx\", the json file will be named\n                \"somepath/xxx.gt.json\".\n        Returns:\n            str: The filename of the json file.\n        \"\"\"\n        categories = [\n            dict(id=id, name=name)\n            for id, name in enumerate(self.dataset_meta['classes'])\n        ]\n        image_infos = []\n        annotations = []\n\n        for idx, gt_dict in enumerate(gt_dicts):\n            img_id = gt_dict.get('img_id', idx)\n            image_info = dict(\n                id=img_id,\n                width=gt_dict['width'],\n                height=gt_dict['height'],\n                file_name='')\n            image_infos.append(image_info)\n            for ann in gt_dict['anns']:\n                label = ann['bbox_label']\n                bbox = ann['bbox']\n                coco_bbox = [\n                    bbox[0],\n                    bbox[1],\n                    bbox[2] - bbox[0],\n                    bbox[3] - bbox[1],\n                ]\n\n                annotation = dict(\n                    id=len(annotations) +\n                    1,  # coco api requires id starts with 1\n                    image_id=img_id,\n                    bbox=coco_bbox,\n                    iscrowd=ann.get('ignore_flag', 0),\n                    category_id=int(label),\n                    area=coco_bbox[2] * coco_bbox[3])\n                if ann.get('mask', None):\n                    mask = ann['mask']\n                    # area = mask_util.area(mask)\n                    if isinstance(mask, dict) and isinstance(\n                            mask['counts'], bytes):\n                        mask['counts'] = mask['counts'].decode()\n                    annotation['segmentation'] = mask\n                    # annotation['area'] = float(area)\n                annotations.append(annotation)\n\n        info = dict(\n            date_created=str(datetime.datetime.now()),\n            description='Coco json file converted by mmdet CocoMetric.')\n        coco_json = dict(\n            info=info,\n            images=image_infos,\n            categories=categories,\n            licenses=None,\n        )\n        if len(annotations) > 0:\n            coco_json['annotations'] = annotations\n        converted_json_path = f'{outfile_prefix}.gt.json'\n        dump(coco_json, converted_json_path)\n        return converted_json_path\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            result = dict()\n            pred = data_sample['pred_instances']\n            result['img_id'] = data_sample['img_id']\n            result['bboxes'] = pred['bboxes'].cpu().numpy()\n            result['scores'] = pred['scores'].cpu().numpy()\n            result['labels'] = pred['labels'].cpu().numpy()\n            # encode mask to RLE\n            if 'masks' in pred:\n                result['masks'] = encode_mask_results(\n                    pred['masks'].detach().cpu().numpy()) if isinstance(\n                        pred['masks'], torch.Tensor) else pred['masks']\n            # some detectors use different scores for bbox and mask\n            if 'mask_scores' in pred:\n                result['mask_scores'] = pred['mask_scores'].cpu().numpy()\n\n            # parse gt\n            gt = dict()\n            gt['width'] = data_sample['ori_shape'][1]\n            gt['height'] = data_sample['ori_shape'][0]\n            gt['img_id'] = data_sample['img_id']\n            if self._coco_api is None:\n                # TODO: Need to refactor to support LoadAnnotations\n                assert 'instances' in data_sample, \\\n                    'ground truth is required for evaluation when ' \\\n                    '`ann_file` is not provided'\n                gt['anns'] = data_sample['instances']\n            # add converted result to the results list\n            self.results.append((gt, result))\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            Dict[str, float]: The computed metrics. The keys are the names of\n            the metrics, and the values are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n\n        # split gt and prediction list\n        gts, preds = zip(*results)\n\n        tmp_dir = None\n        if self.outfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            outfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            outfile_prefix = self.outfile_prefix\n\n        if self._coco_api is None:\n            # use converted gt json file to initialize coco api\n            logger.info('Converting ground truth to coco format...')\n            coco_json_path = self.gt_to_coco_json(\n                gt_dicts=gts, outfile_prefix=outfile_prefix)\n            self._coco_api = COCO(coco_json_path)\n\n        # handle lazy init\n        if self.cat_ids is None:\n            self.cat_ids = self._coco_api.get_cat_ids(\n                cat_names=self.dataset_meta['classes'])\n        if self.img_ids is None:\n            self.img_ids = self._coco_api.get_img_ids()\n\n        # convert predictions to coco format and dump to json file\n        result_files = self.results2json(preds, outfile_prefix)\n\n        eval_results = OrderedDict()\n        if self.format_only:\n            logger.info('results are saved in '\n                        f'{osp.dirname(outfile_prefix)}')\n            return eval_results\n\n        for metric in self.metrics:\n            logger.info(f'Evaluating {metric}...')\n\n            # TODO: May refactor fast_eval_recall to an independent metric?\n            # fast eval recall\n            if metric == 'proposal_fast':\n                ar = self.fast_eval_recall(\n                    preds, self.proposal_nums, self.iou_thrs, logger=logger)\n                log_msg = []\n                for i, num in enumerate(self.proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n                    log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n                log_msg = ''.join(log_msg)\n                logger.info(log_msg)\n                continue\n\n            # evaluate proposal, bbox and segm\n            iou_type = 'bbox' if metric == 'proposal' else metric\n            if metric not in result_files:\n                raise KeyError(f'{metric} is not in results')\n            try:\n                predictions = load(result_files[metric])\n                if iou_type == 'segm':\n                    # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331  # noqa\n                    # When evaluating mask AP, if the results contain bbox,\n                    # cocoapi will use the box area instead of the mask area\n                    # for calculating the instance area. Though the overall AP\n                    # is not affected, this leads to different\n                    # small/medium/large mask AP results.\n                    for x in predictions:\n                        x.pop('bbox')\n                coco_dt = self._coco_api.loadRes(predictions)\n\n            except IndexError:\n                logger.error(\n                    'The testing results of the whole dataset is empty.')\n                break\n\n            coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)\n\n            coco_eval.params.catIds = self.cat_ids\n            coco_eval.params.imgIds = self.img_ids\n            coco_eval.params.maxDets = list(self.proposal_nums)\n            coco_eval.params.iouThrs = self.iou_thrs\n\n            # mapping of cocoEval.stats\n            coco_metric_names = {\n                'mAP': 0,\n                'mAP_50': 1,\n                'mAP_75': 2,\n                'mAP_s': 3,\n                'mAP_m': 4,\n                'mAP_l': 5,\n                'AR@100': 6,\n                'AR@300': 7,\n                'AR@1000': 8,\n                'AR_s@1000': 9,\n                'AR_m@1000': 10,\n                'AR_l@1000': 11\n            }\n            metric_items = self.metric_items\n            if metric_items is not None:\n                for metric_item in metric_items:\n                    if metric_item not in coco_metric_names:\n                        raise KeyError(\n                            f'metric item \"{metric_item}\" is not supported')\n\n            if metric == 'proposal':\n                coco_eval.params.useCats = 0\n                coco_eval.evaluate()\n                coco_eval.accumulate()\n                coco_eval.summarize()\n                if metric_items is None:\n                    metric_items = [\n                        'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',\n                        'AR_m@1000', 'AR_l@1000'\n                    ]\n\n                for item in metric_items:\n                    val = float(\n                        f'{coco_eval.stats[coco_metric_names[item]]:.3f}')\n                    eval_results[item] = val\n            else:\n                coco_eval.evaluate()\n                coco_eval.accumulate()\n                coco_eval.summarize()\n                if self.classwise:  # Compute per-category AP\n                    # Compute per-category AP\n                    # from https://github.com/facebookresearch/detectron2/\n                    precisions = coco_eval.eval['precision']\n                    # precision: (iou, recall, cls, area range, max dets)\n                    assert len(self.cat_ids) == precisions.shape[2]\n\n                    results_per_category = []\n                    for idx, cat_id in enumerate(self.cat_ids):\n                        # area range index 0: all area ranges\n                        # max dets index -1: typically 100 per image\n                        nm = self._coco_api.loadCats(cat_id)[0]\n                        precision = precisions[:, :, idx, 0, -1]\n                        precision = precision[precision > -1]\n                        if precision.size:\n                            ap = np.mean(precision)\n                        else:\n                            ap = float('nan')\n                        results_per_category.append(\n                            (f'{nm[\"name\"]}', f'{round(ap, 3)}'))\n                        eval_results[f'{nm[\"name\"]}_precision'] = round(ap, 3)\n\n                    num_columns = min(6, len(results_per_category) * 2)\n                    results_flatten = list(\n                        itertools.chain(*results_per_category))\n                    headers = ['category', 'AP'] * (num_columns // 2)\n                    results_2d = itertools.zip_longest(*[\n                        results_flatten[i::num_columns]\n                        for i in range(num_columns)\n                    ])\n                    table_data = [headers]\n                    table_data += [result for result in results_2d]\n                    table = AsciiTable(table_data)\n                    logger.info('\\n' + table.table)\n\n                if metric_items is None:\n                    metric_items = [\n                        'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n                    ]\n\n                for metric_item in metric_items:\n                    key = f'{metric}_{metric_item}'\n                    val = coco_eval.stats[coco_metric_names[metric_item]]\n                    eval_results[key] = float(f'{round(val, 3)}')\n\n                ap = coco_eval.stats[:6]\n                logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} '\n                            f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '\n                            f'{ap[4]:.3f} {ap[5]:.3f}')\n\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n"
  },
  {
    "path": "mmdet/evaluation/metrics/coco_occluded_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nimport os.path as osp\nfrom typing import Dict, List, Optional, Union\n\nimport mmengine\nimport numpy as np\nfrom mmengine.fileio import load\nfrom mmengine.logging import print_log\nfrom pycocotools import mask as coco_mask\nfrom terminaltables import AsciiTable\n\nfrom mmdet.registry import METRICS\nfrom .coco_metric import CocoMetric\n\n\n@METRICS.register_module()\nclass CocoOccludedSeparatedMetric(CocoMetric):\n    \"\"\"Metric of separated and occluded masks which presented in paper `A Tri-\n    Layer Plugin to Improve Occluded Detection.\n\n    <https://arxiv.org/abs/2210.10046>`_.\n\n    Separated COCO and Occluded COCO are automatically generated subsets of\n    COCO val dataset, collecting separated objects and partially occluded\n    objects for a large variety of categories. In this way, we define\n    occlusion into two major categories: separated and partially occluded.\n\n    - Separation: target object segmentation mask is separated into distinct\n      regions by the occluder.\n    - Partial Occlusion: target object is partially occluded but the\n      segmentation mask is connected.\n\n    These two new scalable real-image datasets are to benchmark a model's\n    capability to detect occluded objects of 80 common categories.\n\n    Please cite the paper if you use this dataset:\n\n    @article{zhan2022triocc,\n        title={A Tri-Layer Plugin to Improve Occluded Detection},\n        author={Zhan, Guanqi and Xie, Weidi and Zisserman, Andrew},\n        journal={British Machine Vision Conference},\n        year={2022}\n    }\n\n    Args:\n        occluded_ann (str): Path to the occluded coco annotation file.\n        separated_ann (str): Path to the separated coco annotation file.\n        score_thr (float): Score threshold of the detection masks.\n            Defaults to 0.3.\n        iou_thr (float): IoU threshold for the recall calculation.\n            Defaults to 0.75.\n        metric (str | List[str]): Metrics to be evaluated. Valid metrics\n            include 'bbox', 'segm', 'proposal', and 'proposal_fast'.\n            Defaults to 'bbox'.\n    \"\"\"\n    default_prefix: Optional[str] = 'coco'\n\n    def __init__(\n            self,\n            *args,\n            occluded_ann:\n        str = 'https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/occluded_coco.pkl',  # noqa\n            separated_ann:\n        str = 'https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/separated_coco.pkl',  # noqa\n            score_thr: float = 0.3,\n            iou_thr: float = 0.75,\n            metric: Union[str, List[str]] = ['bbox', 'segm'],\n            **kwargs) -> None:\n        super().__init__(*args, metric=metric, **kwargs)\n        # load from local file\n        if osp.isfile(occluded_ann) and not osp.isabs(occluded_ann):\n            occluded_ann = osp.join(self.data_root, occluded_ann)\n        if osp.isfile(separated_ann) and not osp.isabs(separated_ann):\n            separated_ann = osp.join(self.data_root, separated_ann)\n        self.occluded_ann = load(occluded_ann)\n        self.separated_ann = load(separated_ann)\n        self.score_thr = score_thr\n        self.iou_thr = iou_thr\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            Dict[str, float]: The computed metrics. The keys are the names of\n            the metrics, and the values are corresponding results.\n        \"\"\"\n        coco_metric_res = super().compute_metrics(results)\n        eval_res = self.evaluate_occluded_separated(results)\n        coco_metric_res.update(eval_res)\n        return coco_metric_res\n\n    def evaluate_occluded_separated(self, results: List[tuple]) -> dict:\n        \"\"\"Compute the recall of occluded and separated masks.\n\n        Args:\n            results (list[tuple]): Testing results of the dataset.\n\n        Returns:\n            dict[str, float]: The recall of occluded and separated masks.\n        \"\"\"\n        dict_det = {}\n        print_log('processing detection results...')\n        prog_bar = mmengine.ProgressBar(len(results))\n        for i in range(len(results)):\n            gt, dt = results[i]\n            img_id = dt['img_id']\n            cur_img_name = self._coco_api.imgs[img_id]['file_name']\n            if cur_img_name not in dict_det.keys():\n                dict_det[cur_img_name] = []\n\n            for bbox, score, label, mask in zip(dt['bboxes'], dt['scores'],\n                                                dt['labels'], dt['masks']):\n                cur_binary_mask = coco_mask.decode(mask)\n                dict_det[cur_img_name].append([\n                    score, self.dataset_meta['classes'][label],\n                    cur_binary_mask, bbox\n                ])\n            dict_det[cur_img_name].sort(\n                key=lambda x: (-x[0], x[3][0], x[3][1])\n            )  # rank by confidence from high to low, avoid same confidence\n            prog_bar.update()\n        print_log('\\ncomputing occluded mask recall...', logger='current')\n        occluded_correct_num, occluded_recall = self.compute_recall(\n            dict_det, gt_ann=self.occluded_ann, is_occ=True)\n        print_log(\n            f'\\nCOCO occluded mask recall: {occluded_recall:.2f}%',\n            logger='current')\n        print_log(\n            f'COCO occluded mask success num: {occluded_correct_num}',\n            logger='current')\n        print_log('computing separated mask recall...', logger='current')\n        separated_correct_num, separated_recall = self.compute_recall(\n            dict_det, gt_ann=self.separated_ann, is_occ=False)\n        print_log(\n            f'\\nCOCO separated mask recall: {separated_recall:.2f}%',\n            logger='current')\n        print_log(\n            f'COCO separated mask success num: {separated_correct_num}',\n            logger='current')\n        table_data = [\n            ['mask type', 'recall', 'num correct'],\n            ['occluded', f'{occluded_recall:.2f}%', occluded_correct_num],\n            ['separated', f'{separated_recall:.2f}%', separated_correct_num]\n        ]\n        table = AsciiTable(table_data)\n        print_log('\\n' + table.table, logger='current')\n        return dict(\n            occluded_recall=occluded_recall, separated_recall=separated_recall)\n\n    def compute_recall(self,\n                       result_dict: dict,\n                       gt_ann: list,\n                       is_occ: bool = True) -> tuple:\n        \"\"\"Compute the recall of occluded or separated masks.\n\n        Args:\n            result_dict (dict): Processed mask results.\n            gt_ann (list): Occluded or separated coco annotations.\n            is_occ (bool): Whether the annotation is occluded mask.\n                Defaults to True.\n        Returns:\n            tuple: number of correct masks and the recall.\n        \"\"\"\n        correct = 0\n        prog_bar = mmengine.ProgressBar(len(gt_ann))\n        for iter_i in range(len(gt_ann)):\n            cur_item = gt_ann[iter_i]\n            cur_img_name = cur_item[0]\n            cur_gt_bbox = cur_item[3]\n            if is_occ:\n                cur_gt_bbox = [\n                    cur_gt_bbox[0], cur_gt_bbox[1],\n                    cur_gt_bbox[0] + cur_gt_bbox[2],\n                    cur_gt_bbox[1] + cur_gt_bbox[3]\n                ]\n            cur_gt_class = cur_item[1]\n            cur_gt_mask = coco_mask.decode(cur_item[4])\n\n            assert cur_img_name in result_dict.keys()\n            cur_detections = result_dict[cur_img_name]\n\n            correct_flag = False\n            for i in range(len(cur_detections)):\n                cur_det_confidence = cur_detections[i][0]\n                if cur_det_confidence < self.score_thr:\n                    break\n                cur_det_class = cur_detections[i][1]\n                if cur_det_class != cur_gt_class:\n                    continue\n                cur_det_mask = cur_detections[i][2]\n                cur_iou = self.mask_iou(cur_det_mask, cur_gt_mask)\n                if cur_iou >= self.iou_thr:\n                    correct_flag = True\n                    break\n            if correct_flag:\n                correct += 1\n            prog_bar.update()\n        recall = correct / len(gt_ann) * 100\n        return correct, recall\n\n    def mask_iou(self, mask1: np.ndarray, mask2: np.ndarray) -> np.ndarray:\n        \"\"\"Compute IoU between two masks.\"\"\"\n        mask1_area = np.count_nonzero(mask1 == 1)\n        mask2_area = np.count_nonzero(mask2 == 1)\n        intersection = np.count_nonzero(np.logical_and(mask1 == 1, mask2 == 1))\n        iou = intersection / (mask1_area + mask2_area - intersection)\n        return iou\n"
  },
  {
    "path": "mmdet/evaluation/metrics/coco_panoptic_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport datetime\nimport itertools\nimport os.path as osp\nimport tempfile\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport mmcv\nimport numpy as np\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.fileio import FileClient, dump, load\nfrom mmengine.logging import MMLogger, print_log\nfrom terminaltables import AsciiTable\n\nfrom mmdet.datasets.api_wrappers import COCOPanoptic\nfrom mmdet.registry import METRICS\nfrom ..functional import (INSTANCE_OFFSET, pq_compute_multi_core,\n                          pq_compute_single_core)\n\ntry:\n    import panopticapi\n    from panopticapi.evaluation import VOID, PQStat\n    from panopticapi.utils import id2rgb, rgb2id\nexcept ImportError:\n    panopticapi = None\n    id2rgb = None\n    rgb2id = None\n    VOID = None\n    PQStat = None\n\n\n@METRICS.register_module()\nclass CocoPanopticMetric(BaseMetric):\n    \"\"\"COCO panoptic segmentation evaluation metric.\n\n    Evaluate PQ, SQ RQ for panoptic segmentation tasks. Please refer to\n    https://cocodataset.org/#panoptic-eval for more details.\n\n    Args:\n        ann_file (str, optional): Path to the coco format annotation file.\n            If not specified, ground truth annotations from the dataset will\n            be converted to coco format. Defaults to None.\n        seg_prefix (str, optional): Path to the directory which contains the\n            coco panoptic segmentation mask. It should be specified when\n            evaluate. Defaults to None.\n        classwise (bool): Whether to evaluate the metric class-wise.\n            Defaults to False.\n        outfile_prefix (str, optional): The prefix of json files. It includes\n            the file path and the prefix of filename, e.g., \"a/b/prefix\".\n            If not specified, a temp file will be created.\n            It should be specified when format_only is True. Defaults to None.\n        format_only (bool): Format the output results without perform\n            evaluation. It is useful when you want to format the result\n            to a specific format and submit it to the test server.\n            Defaults to False.\n        nproc (int): Number of processes for panoptic quality computing.\n            Defaults to 32. When ``nproc`` exceeds the number of cpu cores,\n            the number of cpu cores is used.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n    default_prefix: Optional[str] = 'coco_panoptic'\n\n    def __init__(self,\n                 ann_file: Optional[str] = None,\n                 seg_prefix: Optional[str] = None,\n                 classwise: bool = False,\n                 format_only: bool = False,\n                 outfile_prefix: Optional[str] = None,\n                 nproc: int = 32,\n                 file_client_args: dict = dict(backend='disk'),\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        if panopticapi is None:\n            raise RuntimeError(\n                'panopticapi is not installed, please install it by: '\n                'pip install git+https://github.com/cocodataset/'\n                'panopticapi.git.')\n\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        self.classwise = classwise\n        self.format_only = format_only\n        if self.format_only:\n            assert outfile_prefix is not None, 'outfile_prefix must be not'\n            'None when format_only is True, otherwise the result files will'\n            'be saved to a temp directory which will be cleaned up at the end.'\n\n        self.tmp_dir = None\n        # outfile_prefix should be a prefix of a path which points to a shared\n        # storage when train or test with multi nodes.\n        self.outfile_prefix = outfile_prefix\n        if outfile_prefix is None:\n            self.tmp_dir = tempfile.TemporaryDirectory()\n            self.outfile_prefix = osp.join(self.tmp_dir.name, 'results')\n        # the directory to save predicted panoptic segmentation mask\n        self.seg_out_dir = f'{self.outfile_prefix}.panoptic'\n        self.nproc = nproc\n        self.seg_prefix = seg_prefix\n\n        self.cat_ids = None\n        self.cat2label = None\n\n        self.file_client_args = file_client_args\n        self.file_client = FileClient(**file_client_args)\n\n        if ann_file:\n            with self.file_client.get_local_path(ann_file) as local_path:\n                self._coco_api = COCOPanoptic(local_path)\n            self.categories = self._coco_api.cats\n        else:\n            self._coco_api = None\n            self.categories = None\n\n        self.file_client = FileClient(**file_client_args)\n\n    def __del__(self) -> None:\n        \"\"\"Clean up.\"\"\"\n        if self.tmp_dir is not None:\n            self.tmp_dir.cleanup()\n\n    def gt_to_coco_json(self, gt_dicts: Sequence[dict],\n                        outfile_prefix: str) -> Tuple[str, str]:\n        \"\"\"Convert ground truth to coco panoptic segmentation format json file.\n\n        Args:\n            gt_dicts (Sequence[dict]): Ground truth of the dataset.\n            outfile_prefix (str): The filename prefix of the json file. If the\n                prefix is \"somepath/xxx\", the json file will be named\n                \"somepath/xxx.gt.json\".\n\n        Returns:\n            Tuple[str, str]: The filename of the json file and the name of the\\\n                directory which contains panoptic segmentation masks.\n        \"\"\"\n        assert len(gt_dicts) > 0, 'gt_dicts is empty.'\n        gt_folder = osp.dirname(gt_dicts[0]['seg_map_path'])\n        converted_json_path = f'{outfile_prefix}.gt.json'\n\n        categories = []\n        for id, name in enumerate(self.dataset_meta['classes']):\n            isthing = 1 if name in self.dataset_meta['thing_classes'] else 0\n            categories.append({'id': id, 'name': name, 'isthing': isthing})\n\n        image_infos = []\n        annotations = []\n        for gt_dict in gt_dicts:\n            img_id = gt_dict['image_id']\n            image_info = {\n                'id': img_id,\n                'width': gt_dict['width'],\n                'height': gt_dict['height'],\n                'file_name': osp.split(gt_dict['seg_map_path'])[-1]\n            }\n            image_infos.append(image_info)\n\n            pan_png = mmcv.imread(gt_dict['seg_map_path']).squeeze()\n            pan_png = pan_png[:, :, ::-1]\n            pan_png = rgb2id(pan_png)\n            segments_info = []\n            for segment_info in gt_dict['segments_info']:\n                id = segment_info['id']\n                label = segment_info['category']\n                mask = pan_png == id\n                isthing = categories[label]['isthing']\n                if isthing:\n                    iscrowd = 1 if not segment_info['is_thing'] else 0\n                else:\n                    iscrowd = 0\n\n                new_segment_info = {\n                    'id': id,\n                    'category_id': label,\n                    'isthing': isthing,\n                    'iscrowd': iscrowd,\n                    'area': mask.sum()\n                }\n                segments_info.append(new_segment_info)\n\n            segm_file = image_info['file_name'].replace('jpg', 'png')\n            annotation = dict(\n                image_id=img_id,\n                segments_info=segments_info,\n                file_name=segm_file)\n            annotations.append(annotation)\n            pan_png = id2rgb(pan_png)\n\n        info = dict(\n            date_created=str(datetime.datetime.now()),\n            description='Coco json file converted by mmdet CocoPanopticMetric.'\n        )\n        coco_json = dict(\n            info=info,\n            images=image_infos,\n            categories=categories,\n            licenses=None,\n        )\n        if len(annotations) > 0:\n            coco_json['annotations'] = annotations\n        dump(coco_json, converted_json_path)\n        return converted_json_path, gt_folder\n\n    def result2json(self, results: Sequence[dict],\n                    outfile_prefix: str) -> Tuple[str, str]:\n        \"\"\"Dump the panoptic results to a COCO style json file and a directory.\n\n        Args:\n            results (Sequence[dict]): Testing results of the dataset.\n            outfile_prefix (str): The filename prefix of the json files and the\n                directory.\n\n        Returns:\n            Tuple[str, str]: The json file and the directory which contains \\\n                panoptic segmentation masks. The filename of the json is\n                \"somepath/xxx.panoptic.json\" and name of the directory is\n                \"somepath/xxx.panoptic\".\n        \"\"\"\n        label2cat = dict((v, k) for (k, v) in self.cat2label.items())\n        pred_annotations = []\n        for idx in range(len(results)):\n            result = results[idx]\n            for segment_info in result['segments_info']:\n                sem_label = segment_info['category_id']\n                # convert sem_label to json label\n                cat_id = label2cat[sem_label]\n                segment_info['category_id'] = label2cat[sem_label]\n                is_thing = self.categories[cat_id]['isthing']\n                segment_info['isthing'] = is_thing\n            pred_annotations.append(result)\n        pan_json_results = dict(annotations=pred_annotations)\n        json_filename = f'{outfile_prefix}.panoptic.json'\n        dump(pan_json_results, json_filename)\n        return json_filename, (\n            self.seg_out_dir\n            if self.tmp_dir is None else tempfile.gettempdir())\n\n    def _parse_predictions(self,\n                           pred: dict,\n                           img_id: int,\n                           segm_file: str,\n                           label2cat=None) -> dict:\n        \"\"\"Parse panoptic segmentation predictions.\n\n        Args:\n            pred (dict): Panoptic segmentation predictions.\n            img_id (int): Image id.\n            segm_file (str): Segmentation file name.\n            label2cat (dict): Mapping from label to category id.\n                Defaults to None.\n\n        Returns:\n            dict: Parsed predictions.\n        \"\"\"\n        result = dict()\n        result['img_id'] = img_id\n        # shape (1, H, W) -> (H, W)\n        pan = pred['pred_panoptic_seg']['sem_seg'].cpu().numpy()[0]\n        pan_labels = np.unique(pan)\n        segments_info = []\n        for pan_label in pan_labels:\n            sem_label = pan_label % INSTANCE_OFFSET\n            # We reserve the length of dataset_meta['classes'] for VOID label\n            if sem_label == len(self.dataset_meta['classes']):\n                continue\n            mask = pan == pan_label\n            area = mask.sum()\n            segments_info.append({\n                'id':\n                int(pan_label),\n                # when ann_file provided, sem_label should be cat_id, otherwise\n                # sem_label should be a continuous id, not the cat_id\n                # defined in dataset\n                'category_id':\n                label2cat[sem_label] if label2cat else sem_label,\n                'area':\n                int(area)\n            })\n        # evaluation script uses 0 for VOID label.\n        pan[pan % INSTANCE_OFFSET == len(self.dataset_meta['classes'])] = VOID\n        pan = id2rgb(pan).astype(np.uint8)\n        mmcv.imwrite(pan[:, :, ::-1], osp.join(self.seg_out_dir, segm_file))\n        result = {\n            'image_id': img_id,\n            'segments_info': segments_info,\n            'file_name': segm_file\n        }\n\n        return result\n\n    def _compute_batch_pq_stats(self, data_samples: Sequence[dict]):\n        \"\"\"Process gts and predictions when ``outfile_prefix`` is not set, gts\n        are from dataset or a json file which is defined by ``ann_file``.\n\n        Intermediate results, ``pq_stats``, are computed here and put into\n        ``self.results``.\n        \"\"\"\n        if self._coco_api is None:\n            categories = dict()\n            for id, name in enumerate(self.dataset_meta['classes']):\n                isthing = 1 if name in self.dataset_meta['thing_classes']\\\n                    else 0\n                categories[id] = {'id': id, 'name': name, 'isthing': isthing}\n            label2cat = None\n        else:\n            categories = self.categories\n            cat_ids = self._coco_api.get_cat_ids(\n                cat_names=self.dataset_meta['classes'])\n            label2cat = {i: cat_id for i, cat_id in enumerate(cat_ids)}\n\n        for data_sample in data_samples:\n            # parse pred\n            img_id = data_sample['img_id']\n            segm_file = osp.basename(data_sample['img_path']).replace(\n                'jpg', 'png')\n            result = self._parse_predictions(\n                pred=data_sample,\n                img_id=img_id,\n                segm_file=segm_file,\n                label2cat=label2cat)\n\n            # parse gt\n            gt = dict()\n            gt['image_id'] = img_id\n            gt['width'] = data_sample['ori_shape'][1]\n            gt['height'] = data_sample['ori_shape'][0]\n            gt['file_name'] = segm_file\n\n            if self._coco_api is None:\n                # get segments_info from data_sample\n                seg_map_path = osp.join(self.seg_prefix, segm_file)\n                pan_png = mmcv.imread(seg_map_path).squeeze()\n                pan_png = pan_png[:, :, ::-1]\n                pan_png = rgb2id(pan_png)\n                segments_info = []\n\n                for segment_info in data_sample['segments_info']:\n                    id = segment_info['id']\n                    label = segment_info['category']\n                    mask = pan_png == id\n                    isthing = categories[label]['isthing']\n                    if isthing:\n                        iscrowd = 1 if not segment_info['is_thing'] else 0\n                    else:\n                        iscrowd = 0\n\n                    new_segment_info = {\n                        'id': id,\n                        'category_id': label,\n                        'isthing': isthing,\n                        'iscrowd': iscrowd,\n                        'area': mask.sum()\n                    }\n                    segments_info.append(new_segment_info)\n            else:\n                # get segments_info from annotation file\n                segments_info = self._coco_api.imgToAnns[img_id]\n\n            gt['segments_info'] = segments_info\n\n            pq_stats = pq_compute_single_core(\n                proc_id=0,\n                annotation_set=[(gt, result)],\n                gt_folder=self.seg_prefix,\n                pred_folder=self.seg_out_dir,\n                categories=categories,\n                file_client=self.file_client)\n\n            self.results.append(pq_stats)\n\n    def _process_gt_and_predictions(self, data_samples: Sequence[dict]):\n        \"\"\"Process gts and predictions when ``outfile_prefix`` is set.\n\n        The predictions will be saved to directory specified by\n        ``outfile_predfix``. The matched pair (gt, result) will be put into\n        ``self.results``.\n        \"\"\"\n        for data_sample in data_samples:\n            # parse pred\n            img_id = data_sample['img_id']\n            segm_file = osp.basename(data_sample['img_path']).replace(\n                'jpg', 'png')\n            result = self._parse_predictions(\n                pred=data_sample, img_id=img_id, segm_file=segm_file)\n\n            # parse gt\n            gt = dict()\n            gt['image_id'] = img_id\n            gt['width'] = data_sample['ori_shape'][1]\n            gt['height'] = data_sample['ori_shape'][0]\n\n            if self._coco_api is None:\n                # get segments_info from dataset\n                gt['segments_info'] = data_sample['segments_info']\n                gt['seg_map_path'] = data_sample['seg_map_path']\n\n            self.results.append((gt, result))\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        # If ``self.tmp_dir`` is none, it will save gt and predictions to\n        # self.results, otherwise, it will compute pq_stats here.\n        if self.tmp_dir is None:\n            self._process_gt_and_predictions(data_samples)\n        else:\n            self._compute_batch_pq_stats(data_samples)\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch. There\n                are two cases:\n\n                - When ``outfile_prefix`` is not provided, the elements in\n                  results are pq_stats which can be summed directly to get PQ.\n                - When ``outfile_prefix`` is provided, the elements in\n                  results are tuples like (gt, pred).\n\n        Returns:\n            Dict[str, float]: The computed metrics. The keys are the names of\n                the metrics, and the values are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n\n        if self.tmp_dir is None:\n            # do evaluation after collect all the results\n\n            # split gt and prediction list\n            gts, preds = zip(*results)\n\n            if self._coco_api is None:\n                # use converted gt json file to initialize coco api\n                logger.info('Converting ground truth to coco format...')\n                coco_json_path, gt_folder = self.gt_to_coco_json(\n                    gt_dicts=gts, outfile_prefix=self.outfile_prefix)\n                self._coco_api = COCOPanoptic(coco_json_path)\n            else:\n                gt_folder = self.seg_prefix\n\n            self.cat_ids = self._coco_api.get_cat_ids(\n                cat_names=self.dataset_meta['classes'])\n            self.cat2label = {\n                cat_id: i\n                for i, cat_id in enumerate(self.cat_ids)\n            }\n            self.img_ids = self._coco_api.get_img_ids()\n            self.categories = self._coco_api.cats\n\n            # convert predictions to coco format and dump to json file\n            json_filename, pred_folder = self.result2json(\n                results=preds, outfile_prefix=self.outfile_prefix)\n\n            if self.format_only:\n                logger.info('results are saved in '\n                            f'{osp.dirname(self.outfile_prefix)}')\n                return dict()\n\n            imgs = self._coco_api.imgs\n            gt_json = self._coco_api.img_ann_map\n            gt_json = [{\n                'image_id': k,\n                'segments_info': v,\n                'file_name': imgs[k]['segm_file']\n            } for k, v in gt_json.items()]\n            pred_json = load(json_filename)\n            pred_json = dict(\n                (el['image_id'], el) for el in pred_json['annotations'])\n\n            # match the gt_anns and pred_anns in the same image\n            matched_annotations_list = []\n            for gt_ann in gt_json:\n                img_id = gt_ann['image_id']\n                if img_id not in pred_json.keys():\n                    raise Exception('no prediction for the image'\n                                    ' with id: {}'.format(img_id))\n                matched_annotations_list.append((gt_ann, pred_json[img_id]))\n\n            pq_stat = pq_compute_multi_core(\n                matched_annotations_list,\n                gt_folder,\n                pred_folder,\n                self.categories,\n                file_client=self.file_client,\n                nproc=self.nproc)\n\n        else:\n            # aggregate the results generated in process\n            if self._coco_api is None:\n                categories = dict()\n                for id, name in enumerate(self.dataset_meta['classes']):\n                    isthing = 1 if name in self.dataset_meta[\n                        'thing_classes'] else 0\n                    categories[id] = {\n                        'id': id,\n                        'name': name,\n                        'isthing': isthing\n                    }\n                self.categories = categories\n\n            pq_stat = PQStat()\n            for result in results:\n                pq_stat += result\n\n        metrics = [('All', None), ('Things', True), ('Stuff', False)]\n        pq_results = {}\n\n        for name, isthing in metrics:\n            pq_results[name], classwise_results = pq_stat.pq_average(\n                self.categories, isthing=isthing)\n            if name == 'All':\n                pq_results['classwise'] = classwise_results\n\n        classwise_results = None\n        if self.classwise:\n            classwise_results = {\n                k: v\n                for k, v in zip(self.dataset_meta['classes'],\n                                pq_results['classwise'].values())\n            }\n\n        print_panoptic_table(pq_results, classwise_results, logger=logger)\n        results = parse_pq_results(pq_results)\n\n        return results\n\n\ndef parse_pq_results(pq_results: dict) -> dict:\n    \"\"\"Parse the Panoptic Quality results.\n\n    Args:\n        pq_results (dict): Panoptic Quality results.\n\n    Returns:\n        dict: Panoptic Quality results parsed.\n    \"\"\"\n    result = dict()\n    result['PQ'] = 100 * pq_results['All']['pq']\n    result['SQ'] = 100 * pq_results['All']['sq']\n    result['RQ'] = 100 * pq_results['All']['rq']\n    result['PQ_th'] = 100 * pq_results['Things']['pq']\n    result['SQ_th'] = 100 * pq_results['Things']['sq']\n    result['RQ_th'] = 100 * pq_results['Things']['rq']\n    result['PQ_st'] = 100 * pq_results['Stuff']['pq']\n    result['SQ_st'] = 100 * pq_results['Stuff']['sq']\n    result['RQ_st'] = 100 * pq_results['Stuff']['rq']\n    return result\n\n\ndef print_panoptic_table(\n        pq_results: dict,\n        classwise_results: Optional[dict] = None,\n        logger: Optional[Union['MMLogger', str]] = None) -> None:\n    \"\"\"Print the panoptic evaluation results table.\n\n    Args:\n        pq_results(dict): The Panoptic Quality results.\n        classwise_results(dict, optional): The classwise Panoptic Quality.\n            results. The keys are class names and the values are metrics.\n            Defaults to None.\n        logger (:obj:`MMLogger` | str, optional): Logger used for printing\n            related information during evaluation. Default: None.\n    \"\"\"\n\n    headers = ['', 'PQ', 'SQ', 'RQ', 'categories']\n    data = [headers]\n    for name in ['All', 'Things', 'Stuff']:\n        numbers = [\n            f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']\n        ]\n        row = [name] + numbers + [pq_results[name]['n']]\n        data.append(row)\n    table = AsciiTable(data)\n    print_log('Panoptic Evaluation Results:\\n' + table.table, logger=logger)\n\n    if classwise_results is not None:\n        class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}'\n                                          for k in ['pq', 'sq', 'rq'])\n                         for name, metrics in classwise_results.items()]\n        num_columns = min(8, len(class_metrics) * 4)\n        results_flatten = list(itertools.chain(*class_metrics))\n        headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4)\n        results_2d = itertools.zip_longest(\n            *[results_flatten[i::num_columns] for i in range(num_columns)])\n        data = [headers]\n        data += [result for result in results_2d]\n        table = AsciiTable(data)\n        print_log(\n            'Classwise Panoptic Evaluation Results:\\n' + table.table,\n            logger=logger)\n"
  },
  {
    "path": "mmdet/evaluation/metrics/crowdhuman_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport json\nimport os.path as osp\nimport tempfile\nfrom collections import OrderedDict\nfrom multiprocessing import Process, Queue\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.fileio import FileClient, dump, load\nfrom mmengine.logging import MMLogger\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import maximum_bipartite_matching\n\nfrom mmdet.evaluation.functional.bbox_overlaps import bbox_overlaps\nfrom mmdet.registry import METRICS\n\nPERSON_CLASSES = ['background', 'person']\n\n\n@METRICS.register_module()\nclass CrowdHumanMetric(BaseMetric):\n    \"\"\"CrowdHuman evaluation metric.\n\n    Evaluate Average Precision (AP), Miss Rate (MR) and Jaccard Index (JI)\n    for detection tasks.\n\n    Args:\n        ann_file (str): Path to the annotation file.\n        metric (str | List[str]): Metrics to be evaluated. Valid metrics\n            include 'AP', 'MR' and 'JI'. Defaults to 'AP'.\n        format_only (bool): Format the output results without perform\n            evaluation. It is useful when you want to format the result\n            to a specific format and submit it to the test server.\n            Defaults to False.\n        outfile_prefix (str, optional): The prefix of json files. It includes\n            the file path and the prefix of filename, e.g., \"a/b/prefix\".\n            If not specified, a temp file will be created. Defaults to None.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n        eval_mode (int): Select the mode of evaluate. Valid mode include\n            0(just body box), 1(just head box) and 2(both of them).\n            Defaults to 0.\n        iou_thres (float): IoU threshold. Defaults to 0.5.\n        compare_matching_method (str, optional): Matching method to compare\n            the detection results with the ground_truth when compute 'AP'\n            and 'MR'.Valid method include VOC and None(CALTECH). Default to\n            None.\n        mr_ref (str): Different parameter selection to calculate MR. Valid\n            ref include CALTECH_-2 and CALTECH_-4. Defaults to CALTECH_-2.\n        num_ji_process (int): The number of processes to evaluation JI.\n            Defaults to 10.\n    \"\"\"\n    default_prefix: Optional[str] = 'crowd_human'\n\n    def __init__(self,\n                 ann_file: str,\n                 metric: Union[str, List[str]] = ['AP', 'MR', 'JI'],\n                 format_only: bool = False,\n                 outfile_prefix: Optional[str] = None,\n                 file_client_args: dict = dict(backend='disk'),\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None,\n                 eval_mode: int = 0,\n                 iou_thres: float = 0.5,\n                 compare_matching_method: Optional[str] = None,\n                 mr_ref: str = 'CALTECH_-2',\n                 num_ji_process: int = 10) -> None:\n        super().__init__(collect_device=collect_device, prefix=prefix)\n\n        self.ann_file = ann_file\n        # crowdhuman evaluation metrics\n        self.metrics = metric if isinstance(metric, list) else [metric]\n        allowed_metrics = ['MR', 'AP', 'JI']\n        for metric in self.metrics:\n            if metric not in allowed_metrics:\n                raise KeyError(f\"metric should be one of 'MR', 'AP', 'JI',\"\n                               f'but got {metric}.')\n\n        self.format_only = format_only\n        if self.format_only:\n            assert outfile_prefix is not None, 'outfile_prefix must be not'\n            'None when format_only is True, otherwise the result files will'\n            'be saved to a temp directory which will be cleaned up at the end.'\n        self.outfile_prefix = outfile_prefix\n        self.file_client_args = file_client_args\n        self.file_client = FileClient(**file_client_args)\n\n        assert eval_mode in [0, 1, 2], \\\n            \"Unknown eval mode. mr_ref should be one of '0', '1', '2'.\"\n        assert compare_matching_method is None or \\\n               compare_matching_method == 'VOC', \\\n               'The alternative compare_matching_method is VOC.' \\\n               'This parameter defaults to CALTECH(None)'\n        assert mr_ref == 'CALTECH_-2' or mr_ref == 'CALTECH_-4', \\\n            \"mr_ref should be one of 'CALTECH_-2', 'CALTECH_-4'.\"\n        self.eval_mode = eval_mode\n        self.iou_thres = iou_thres\n        self.compare_matching_method = compare_matching_method\n        self.mr_ref = mr_ref\n        self.num_ji_process = num_ji_process\n\n    @staticmethod\n    def results2json(results: Sequence[dict], outfile_prefix: str) -> str:\n        \"\"\"Dump the detection results to a json file.\"\"\"\n        result_file_path = f'{outfile_prefix}.json'\n        bbox_json_results = []\n        for i, result in enumerate(results):\n            ann, pred = result\n            dump_dict = dict()\n            dump_dict['ID'] = ann['ID']\n            dump_dict['width'] = ann['width']\n            dump_dict['height'] = ann['height']\n            dtboxes = []\n            bboxes = pred.tolist()\n            for _, single_bbox in enumerate(bboxes):\n                temp_dict = dict()\n                x1, y1, x2, y2, score = single_bbox\n                temp_dict['box'] = [x1, y1, x2 - x1, y2 - y1]\n                temp_dict['score'] = score\n                temp_dict['tag'] = 1\n                dtboxes.append(temp_dict)\n            dump_dict['dtboxes'] = dtboxes\n            bbox_json_results.append(dump_dict)\n        dump(bbox_json_results, result_file_path)\n        return result_file_path\n\n    def process(self, data_batch: Sequence[dict],\n                data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            ann = dict()\n            ann['ID'] = data_sample['img_id']\n            ann['width'] = data_sample['ori_shape'][1]\n            ann['height'] = data_sample['ori_shape'][0]\n            pred_bboxes = data_sample['pred_instances']['bboxes'].cpu().numpy()\n            pred_scores = data_sample['pred_instances']['scores'].cpu().numpy()\n\n            pred_bbox_scores = np.hstack(\n                [pred_bboxes, pred_scores.reshape((-1, 1))])\n\n            self.results.append((ann, pred_bbox_scores))\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            eval_results(Dict[str, float]): The computed metrics.\n            The keys are the names of the metrics, and the values\n            are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n\n        tmp_dir = None\n        if self.outfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            outfile_prefix = osp.join(tmp_dir.name, 'result')\n        else:\n            outfile_prefix = self.outfile_prefix\n\n        # convert predictions to coco format and dump to json file\n        result_file = self.results2json(results, outfile_prefix)\n        eval_results = OrderedDict()\n        if self.format_only:\n            logger.info(f'results are saved in {osp.dirname(outfile_prefix)}')\n            return eval_results\n\n        # load evaluation samples\n        eval_samples = self.load_eval_samples(result_file)\n\n        if 'AP' in self.metrics or 'MR' in self.metrics:\n            score_list = self.compare(eval_samples)\n            gt_num = sum([eval_samples[i].gt_num for i in eval_samples])\n            ign_num = sum([eval_samples[i].ign_num for i in eval_samples])\n            gt_num = gt_num - ign_num\n            img_num = len(eval_samples)\n\n        for metric in self.metrics:\n            logger.info(f'Evaluating {metric}...')\n            if metric == 'AP':\n                AP = self.eval_ap(score_list, gt_num, img_num)\n                eval_results['mAP'] = float(f'{round(AP, 4)}')\n            if metric == 'MR':\n                MR = self.eval_mr(score_list, gt_num, img_num)\n                eval_results['mMR'] = float(f'{round(MR, 4)}')\n            if metric == 'JI':\n                JI = self.eval_ji(eval_samples)\n                eval_results['JI'] = float(f'{round(JI, 4)}')\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n\n        return eval_results\n\n    def load_eval_samples(self, result_file):\n        \"\"\"Load data from annotations file and detection results.\n\n        Args:\n            result_file (str): The file path of the saved detection results.\n\n        Returns:\n            Dict[Image]: The detection result packaged by Image\n        \"\"\"\n        gt_str = self.file_client.get_text(self.ann_file).strip().split('\\n')\n        gt_records = [json.loads(line) for line in gt_str]\n\n        pred_records = load(result_file)\n        eval_samples = dict()\n        for gt_record, pred_record in zip(gt_records, pred_records):\n            assert gt_record['ID'] == pred_record['ID'], \\\n                'please set val_dataloader.sampler.shuffle=False and try again'\n            eval_samples[pred_record['ID']] = Image(self.eval_mode)\n            eval_samples[pred_record['ID']].load(gt_record, 'box', None,\n                                                 PERSON_CLASSES, True)\n            eval_samples[pred_record['ID']].load(pred_record, 'box', None,\n                                                 PERSON_CLASSES, False)\n            eval_samples[pred_record['ID']].clip_all_boader()\n        return eval_samples\n\n    def compare(self, samples):\n        \"\"\"Match the detection results with the ground_truth.\n\n        Args:\n            samples (dict[Image]): The detection result packaged by Image.\n\n        Returns:\n            score_list(list[tuple[ndarray, int, str]]): Matching result.\n            a list of tuples (dtbox, label, imgID) in the descending\n            sort of dtbox.score.\n        \"\"\"\n        score_list = list()\n        for id in samples:\n            if self.compare_matching_method == 'VOC':\n                result = samples[id].compare_voc(self.iou_thres)\n            else:\n                result = samples[id].compare_caltech(self.iou_thres)\n            score_list.extend(result)\n        # In the descending sort of dtbox score.\n        score_list.sort(key=lambda x: x[0][-1], reverse=True)\n        return score_list\n\n    @staticmethod\n    def eval_ap(score_list, gt_num, img_num):\n        \"\"\"Evaluate by average precision.\n\n        Args:\n            score_list(list[tuple[ndarray, int, str]]): Matching result.\n                a list of tuples (dtbox, label, imgID) in the descending\n                sort of dtbox.score.\n            gt_num(int): The number of gt boxes in the entire dataset.\n            img_num(int)： The number of images in the entire dataset.\n\n        Returns:\n            ap(float): result of average precision.\n        \"\"\"\n\n        # calculate general ap score\n        def _calculate_map(_recall, _precision):\n            assert len(_recall) == len(_precision)\n            area = 0\n            for k in range(1, len(_recall)):\n                delta_h = (_precision[k - 1] + _precision[k]) / 2\n                delta_w = _recall[k] - _recall[k - 1]\n                area += delta_w * delta_h\n            return area\n\n        tp, fp = 0.0, 0.0\n        rpX, rpY = list(), list()\n\n        fpn = []\n        recalln = []\n        thr = []\n        fppi = []\n        for i, item in enumerate(score_list):\n            if item[1] == 1:\n                tp += 1.0\n            elif item[1] == 0:\n                fp += 1.0\n            fn = gt_num - tp\n            recall = tp / (tp + fn)\n            precision = tp / (tp + fp)\n            rpX.append(recall)\n            rpY.append(precision)\n            fpn.append(fp)\n            recalln.append(tp)\n            thr.append(item[0][-1])\n            fppi.append(fp / img_num)\n\n        ap = _calculate_map(rpX, rpY)\n        return ap\n\n    def eval_mr(self, score_list, gt_num, img_num):\n        \"\"\"Evaluate by Caltech-style log-average miss rate.\n\n        Args:\n            score_list(list[tuple[ndarray, int, str]]): Matching result.\n                a list of tuples (dtbox, label, imgID) in the descending\n                sort of dtbox.score.\n            gt_num(int): The number of gt boxes in the entire dataset.\n            img_num(int): The number of image in the entire dataset.\n\n        Returns:\n            mr(float): result of miss rate.\n        \"\"\"\n\n        # find greater_than\n        def _find_gt(lst, target):\n            for idx, _item in enumerate(lst):\n                if _item >= target:\n                    return idx\n            return len(lst) - 1\n\n        if self.mr_ref == 'CALTECH_-2':\n            # CALTECH_MRREF_2: anchor points (from 10^-2 to 1) as in\n            # P.Dollar's paper\n            ref = [\n                0.0100, 0.0178, 0.03160, 0.0562, 0.1000, 0.1778, 0.3162,\n                0.5623, 1.000\n            ]\n        else:\n            # CALTECH_MRREF_4: anchor points (from 10^-4 to 1) as in\n            # S.Zhang's paper\n            ref = [\n                0.0001, 0.0003, 0.00100, 0.0032, 0.0100, 0.0316, 0.1000,\n                0.3162, 1.000\n            ]\n\n        tp, fp = 0.0, 0.0\n        fppiX, fppiY = list(), list()\n        for i, item in enumerate(score_list):\n            if item[1] == 1:\n                tp += 1.0\n            elif item[1] == 0:\n                fp += 1.0\n\n            fn = gt_num - tp\n            recall = tp / (tp + fn)\n            missrate = 1.0 - recall\n            fppi = fp / img_num\n            fppiX.append(fppi)\n            fppiY.append(missrate)\n\n        score = list()\n        for pos in ref:\n            argmin = _find_gt(fppiX, pos)\n            if argmin >= 0:\n                score.append(fppiY[argmin])\n        score = np.array(score)\n        mr = np.exp(np.log(score).mean())\n        return mr\n\n    def eval_ji(self, samples):\n        \"\"\"Evaluate by JI using multi_process.\n\n        Args:\n            samples(Dict[str, Image]): The detection result packaged by Image.\n\n        Returns:\n            ji(float): result of jaccard index.\n        \"\"\"\n        import math\n        res_line = []\n        res_ji = []\n        for i in range(10):\n            score_thr = 1e-1 * i\n            total = len(samples)\n            stride = math.ceil(total / self.num_ji_process)\n            result_queue = Queue(10000)\n            results, procs = [], []\n            records = list(samples.items())\n            for i in range(self.num_ji_process):\n                start = i * stride\n                end = np.min([start + stride, total])\n                sample_data = dict(records[start:end])\n                p = Process(\n                    target=self.compute_ji_with_ignore,\n                    args=(result_queue, sample_data, score_thr))\n                p.start()\n                procs.append(p)\n            for i in range(total):\n                t = result_queue.get()\n                results.append(t)\n            for p in procs:\n                p.join()\n            line, mean_ratio = self.gather(results)\n            line = 'score_thr:{:.1f}, {}'.format(score_thr, line)\n            res_line.append(line)\n            res_ji.append(mean_ratio)\n        return max(res_ji)\n\n    def compute_ji_with_ignore(self, result_queue, dt_result, score_thr):\n        \"\"\"Compute JI with ignore.\n\n        Args:\n            result_queue(Queue): The Queue for save compute result when\n                multi_process.\n            dt_result(dict[Image]): Detection result packaged by Image.\n            score_thr(float): The threshold of detection score.\n        Returns:\n            dict: compute result.\n        \"\"\"\n        for ID, record in dt_result.items():\n            gt_boxes = record.gt_boxes\n            dt_boxes = record.dt_boxes\n            keep = dt_boxes[:, -1] > score_thr\n            dt_boxes = dt_boxes[keep][:, :-1]\n\n            gt_tag = np.array(gt_boxes[:, -1] != -1)\n            matches = self.compute_ji_matching(dt_boxes, gt_boxes[gt_tag, :4])\n            # get the unmatched_indices\n            matched_indices = np.array([j for (j, _) in matches])\n            unmatched_indices = list(\n                set(np.arange(dt_boxes.shape[0])) - set(matched_indices))\n            num_ignore_dt = self.get_ignores(dt_boxes[unmatched_indices],\n                                             gt_boxes[~gt_tag, :4])\n            matched_indices = np.array([j for (_, j) in matches])\n            unmatched_indices = list(\n                set(np.arange(gt_boxes[gt_tag].shape[0])) -\n                set(matched_indices))\n            num_ignore_gt = self.get_ignores(\n                gt_boxes[gt_tag][unmatched_indices], gt_boxes[~gt_tag, :4])\n            # compute results\n            eps = 1e-6\n            k = len(matches)\n            m = gt_tag.sum() - num_ignore_gt\n            n = dt_boxes.shape[0] - num_ignore_dt\n            ratio = k / (m + n - k + eps)\n            recall = k / (m + eps)\n            cover = k / (n + eps)\n            noise = 1 - cover\n            result_dict = dict(\n                ratio=ratio,\n                recall=recall,\n                cover=cover,\n                noise=noise,\n                k=k,\n                m=m,\n                n=n)\n            result_queue.put_nowait(result_dict)\n\n    @staticmethod\n    def gather(results):\n        \"\"\"Integrate test results.\"\"\"\n        assert len(results)\n        img_num = 0\n        for result in results:\n            if result['n'] != 0 or result['m'] != 0:\n                img_num += 1\n        mean_ratio = np.sum([rb['ratio'] for rb in results]) / img_num\n        valids = np.sum([rb['k'] for rb in results])\n        total = np.sum([rb['n'] for rb in results])\n        gtn = np.sum([rb['m'] for rb in results])\n        line = 'mean_ratio:{:.4f}, valids:{}, total:{}, gtn:{}'\\\n            .format(mean_ratio, valids, total, gtn)\n        return line, mean_ratio\n\n    def compute_ji_matching(self, dt_boxes, gt_boxes):\n        \"\"\"Match the annotation box for each detection box.\n\n        Args:\n            dt_boxes(ndarray): Detection boxes.\n            gt_boxes(ndarray): Ground_truth boxes.\n\n        Returns:\n            matches_(list[tuple[int, int]]): Match result.\n        \"\"\"\n        assert dt_boxes.shape[-1] > 3 and gt_boxes.shape[-1] > 3\n        if dt_boxes.shape[0] < 1 or gt_boxes.shape[0] < 1:\n            return list()\n\n        ious = bbox_overlaps(dt_boxes, gt_boxes, mode='iou')\n        input_ = copy.deepcopy(ious)\n        input_[input_ < self.iou_thres] = 0\n        match_scipy = maximum_bipartite_matching(\n            csr_matrix(input_), perm_type='column')\n        matches_ = []\n        for i in range(len(match_scipy)):\n            if match_scipy[i] != -1:\n                matches_.append((i, int(match_scipy[i])))\n        return matches_\n\n    def get_ignores(self, dt_boxes, gt_boxes):\n        \"\"\"Get the number of ignore bboxes.\"\"\"\n        if gt_boxes.size:\n            ioas = bbox_overlaps(dt_boxes, gt_boxes, mode='iof')\n            ioas = np.max(ioas, axis=1)\n            rows = np.where(ioas > self.iou_thres)[0]\n            return len(rows)\n        else:\n            return 0\n\n\nclass Image(object):\n    \"\"\"Data structure for evaluation of CrowdHuman.\n\n    Note:\n        This implementation is modified from https://github.com/Purkialo/\n        CrowdDet/blob/master/lib/evaluate/APMRToolkits/image.py\n\n    Args:\n        mode (int): Select the mode of evaluate. Valid mode include\n            0(just body box), 1(just head box) and 2(both of them).\n            Defaults to 0.\n    \"\"\"\n\n    def __init__(self, mode):\n        self.ID = None\n        self.width = None\n        self.height = None\n        self.dt_boxes = None\n        self.gt_boxes = None\n        self.eval_mode = mode\n\n        self.ign_num = None\n        self.gt_num = None\n        self.dt_num = None\n\n    def load(self, record, body_key, head_key, class_names, gt_flag):\n        \"\"\"Loading information for evaluation.\n\n        Args:\n            record (dict): Label information or test results.\n                The format might look something like this:\n                {\n                    'ID': '273271,c9db000d5146c15',\n                    'gtboxes': [\n                        {'fbox': [72, 202, 163, 503], 'tag': 'person', ...},\n                        {'fbox': [199, 180, 144, 499], 'tag': 'person', ...},\n                        ...\n                    ]\n                }\n                or:\n                {\n                    'ID': '273271,c9db000d5146c15',\n                    'width': 800,\n                    'height': 1067,\n                    'dtboxes': [\n                        {\n                            'box': [306.22, 205.95, 164.05, 394.04],\n                            'score': 0.99,\n                            'tag': 1\n                        },\n                        {\n                            'box': [403.60, 178.66, 157.15, 421.33],\n                            'score': 0.99,\n                            'tag': 1\n                        },\n                        ...\n                    ]\n                }\n            body_key (str, None): key of detection body box.\n                Valid when loading detection results and self.eval_mode!=1.\n            head_key (str, None): key of detection head box.\n                Valid when loading detection results and self.eval_mode!=0.\n            class_names (list[str]):class names of data set.\n                Defaults to ['background', 'person'].\n            gt_flag (bool): Indicate whether record is ground truth\n                or predicting the outcome.\n        \"\"\"\n        if 'ID' in record and self.ID is None:\n            self.ID = record['ID']\n        if 'width' in record and self.width is None:\n            self.width = record['width']\n        if 'height' in record and self.height is None:\n            self.height = record['height']\n        if gt_flag:\n            self.gt_num = len(record['gtboxes'])\n            body_bbox, head_bbox = self.load_gt_boxes(record, 'gtboxes',\n                                                      class_names)\n            if self.eval_mode == 0:\n                self.gt_boxes = body_bbox\n                self.ign_num = (body_bbox[:, -1] == -1).sum()\n            elif self.eval_mode == 1:\n                self.gt_boxes = head_bbox\n                self.ign_num = (head_bbox[:, -1] == -1).sum()\n            else:\n                gt_tag = np.array([\n                    body_bbox[i, -1] != -1 and head_bbox[i, -1] != -1\n                    for i in range(len(body_bbox))\n                ])\n                self.ign_num = (gt_tag == 0).sum()\n                self.gt_boxes = np.hstack(\n                    (body_bbox[:, :-1], head_bbox[:, :-1],\n                     gt_tag.reshape(-1, 1)))\n\n        if not gt_flag:\n            self.dt_num = len(record['dtboxes'])\n            if self.eval_mode == 0:\n                self.dt_boxes = self.load_det_boxes(record, 'dtboxes',\n                                                    body_key, 'score')\n            elif self.eval_mode == 1:\n                self.dt_boxes = self.load_det_boxes(record, 'dtboxes',\n                                                    head_key, 'score')\n            else:\n                body_dtboxes = self.load_det_boxes(record, 'dtboxes', body_key,\n                                                   'score')\n                head_dtboxes = self.load_det_boxes(record, 'dtboxes', head_key,\n                                                   'score')\n                self.dt_boxes = np.hstack((body_dtboxes, head_dtboxes))\n\n    @staticmethod\n    def load_gt_boxes(dict_input, key_name, class_names):\n        \"\"\"load ground_truth and transform [x, y, w, h] to [x1, y1, x2, y2]\"\"\"\n        assert key_name in dict_input\n        if len(dict_input[key_name]) < 1:\n            return np.empty([0, 5])\n        head_bbox = []\n        body_bbox = []\n        for rb in dict_input[key_name]:\n            if rb['tag'] in class_names:\n                body_tag = class_names.index(rb['tag'])\n                head_tag = copy.deepcopy(body_tag)\n            else:\n                body_tag = -1\n                head_tag = -1\n            if 'extra' in rb:\n                if 'ignore' in rb['extra']:\n                    if rb['extra']['ignore'] != 0:\n                        body_tag = -1\n                        head_tag = -1\n            if 'head_attr' in rb:\n                if 'ignore' in rb['head_attr']:\n                    if rb['head_attr']['ignore'] != 0:\n                        head_tag = -1\n            head_bbox.append(np.hstack((rb['hbox'], head_tag)))\n            body_bbox.append(np.hstack((rb['fbox'], body_tag)))\n        head_bbox = np.array(head_bbox)\n        head_bbox[:, 2:4] += head_bbox[:, :2]\n        body_bbox = np.array(body_bbox)\n        body_bbox[:, 2:4] += body_bbox[:, :2]\n        return body_bbox, head_bbox\n\n    @staticmethod\n    def load_det_boxes(dict_input, key_name, key_box, key_score, key_tag=None):\n        \"\"\"load detection boxes.\"\"\"\n        assert key_name in dict_input\n        if len(dict_input[key_name]) < 1:\n            return np.empty([0, 5])\n        else:\n            assert key_box in dict_input[key_name][0]\n            if key_score:\n                assert key_score in dict_input[key_name][0]\n            if key_tag:\n                assert key_tag in dict_input[key_name][0]\n        if key_score:\n            if key_tag:\n                bboxes = np.vstack([\n                    np.hstack((rb[key_box], rb[key_score], rb[key_tag]))\n                    for rb in dict_input[key_name]\n                ])\n            else:\n                bboxes = np.vstack([\n                    np.hstack((rb[key_box], rb[key_score]))\n                    for rb in dict_input[key_name]\n                ])\n        else:\n            if key_tag:\n                bboxes = np.vstack([\n                    np.hstack((rb[key_box], rb[key_tag]))\n                    for rb in dict_input[key_name]\n                ])\n            else:\n                bboxes = np.vstack(\n                    [rb[key_box] for rb in dict_input[key_name]])\n        bboxes[:, 2:4] += bboxes[:, :2]\n        return bboxes\n\n    def clip_all_boader(self):\n        \"\"\"Make sure boxes are within the image range.\"\"\"\n\n        def _clip_boundary(boxes, height, width):\n            assert boxes.shape[-1] >= 4\n            boxes[:, 0] = np.minimum(np.maximum(boxes[:, 0], 0), width - 1)\n            boxes[:, 1] = np.minimum(np.maximum(boxes[:, 1], 0), height - 1)\n            boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], width), 0)\n            boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], height), 0)\n            return boxes\n\n        assert self.dt_boxes.shape[-1] >= 4\n        assert self.gt_boxes.shape[-1] >= 4\n        assert self.width is not None and self.height is not None\n        if self.eval_mode == 2:\n            self.dt_boxes[:, :4] = _clip_boundary(self.dt_boxes[:, :4],\n                                                  self.height, self.width)\n            self.gt_boxes[:, :4] = _clip_boundary(self.gt_boxes[:, :4],\n                                                  self.height, self.width)\n            self.dt_boxes[:, 4:8] = _clip_boundary(self.dt_boxes[:, 4:8],\n                                                   self.height, self.width)\n            self.gt_boxes[:, 4:8] = _clip_boundary(self.gt_boxes[:, 4:8],\n                                                   self.height, self.width)\n        else:\n            self.dt_boxes = _clip_boundary(self.dt_boxes, self.height,\n                                           self.width)\n            self.gt_boxes = _clip_boundary(self.gt_boxes, self.height,\n                                           self.width)\n\n    def compare_voc(self, thres):\n        \"\"\"Match the detection results with the ground_truth by VOC.\n\n        Args:\n            thres (float): IOU threshold.\n\n        Returns:\n            score_list(list[tuple[ndarray, int, str]]): Matching result.\n            a list of tuples (dtbox, label, imgID) in the descending\n            sort of dtbox.score.\n        \"\"\"\n        if self.dt_boxes is None:\n            return list()\n        dtboxes = self.dt_boxes\n        gtboxes = self.gt_boxes if self.gt_boxes is not None else list()\n        dtboxes.sort(key=lambda x: x.score, reverse=True)\n        gtboxes.sort(key=lambda x: x.ign)\n\n        score_list = list()\n        for i, dt in enumerate(dtboxes):\n            maxpos = -1\n            maxiou = thres\n\n            for j, gt in enumerate(gtboxes):\n                overlap = dt.iou(gt)\n                if overlap > maxiou:\n                    maxiou = overlap\n                    maxpos = j\n\n            if maxpos >= 0:\n                if gtboxes[maxpos].ign == 0:\n                    gtboxes[maxpos].matched = 1\n                    dtboxes[i].matched = 1\n                    score_list.append((dt, self.ID))\n                else:\n                    dtboxes[i].matched = -1\n            else:\n                dtboxes[i].matched = 0\n                score_list.append((dt, self.ID))\n        return score_list\n\n    def compare_caltech(self, thres):\n        \"\"\"Match the detection results with the ground_truth by Caltech\n        matching strategy.\n\n        Args:\n            thres (float): IOU threshold.\n\n        Returns:\n            score_list(list[tuple[ndarray, int, str]]): Matching result.\n            a list of tuples (dtbox, label, imgID) in the descending\n            sort of dtbox.score.\n        \"\"\"\n        if self.dt_boxes is None or self.gt_boxes is None:\n            return list()\n\n        dtboxes = self.dt_boxes if self.dt_boxes is not None else list()\n        gtboxes = self.gt_boxes if self.gt_boxes is not None else list()\n        dt_matched = np.zeros(dtboxes.shape[0])\n        gt_matched = np.zeros(gtboxes.shape[0])\n\n        dtboxes = np.array(sorted(dtboxes, key=lambda x: x[-1], reverse=True))\n        gtboxes = np.array(sorted(gtboxes, key=lambda x: x[-1], reverse=True))\n        if len(dtboxes):\n            overlap_iou = bbox_overlaps(dtboxes, gtboxes, mode='iou')\n            overlap_ioa = bbox_overlaps(dtboxes, gtboxes, mode='iof')\n        else:\n            return list()\n\n        score_list = list()\n        for i, dt in enumerate(dtboxes):\n            maxpos = -1\n            maxiou = thres\n            for j, gt in enumerate(gtboxes):\n                if gt_matched[j] == 1:\n                    continue\n                if gt[-1] > 0:\n                    overlap = overlap_iou[i][j]\n                    if overlap > maxiou:\n                        maxiou = overlap\n                        maxpos = j\n                else:\n                    if maxpos >= 0:\n                        break\n                    else:\n                        overlap = overlap_ioa[i][j]\n                        if overlap > thres:\n                            maxiou = overlap\n                            maxpos = j\n            if maxpos >= 0:\n                if gtboxes[maxpos, -1] > 0:\n                    gt_matched[maxpos] = 1\n                    dt_matched[i] = 1\n                    score_list.append((dt, 1, self.ID))\n                else:\n                    dt_matched[i] = -1\n            else:\n                dt_matched[i] = 0\n                score_list.append((dt, 0, self.ID))\n        return score_list\n"
  },
  {
    "path": "mmdet/evaluation/metrics/dump_det_results.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import Sequence\n\nfrom mmengine.evaluator import DumpResults\nfrom mmengine.evaluator.metric import _to_cpu\n\nfrom mmdet.registry import METRICS\nfrom mmdet.structures.mask import encode_mask_results\n\n\n@METRICS.register_module()\nclass DumpDetResults(DumpResults):\n    \"\"\"Dump model predictions to a pickle file for offline evaluation.\n\n    Different from `DumpResults` in MMEngine, it compresses instance\n    segmentation masks into RLE format.\n\n    Args:\n        out_file_path (str): Path of the dumped file. Must end with '.pkl'\n            or '.pickle'.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n    \"\"\"\n\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"transfer tensors in predictions to CPU.\"\"\"\n        data_samples = _to_cpu(data_samples)\n        for data_sample in data_samples:\n            # remove gt\n            data_sample.pop('gt_instances', None)\n            data_sample.pop('ignored_instances', None)\n            data_sample.pop('gt_panoptic_seg', None)\n\n            if 'pred_instances' in data_sample:\n                pred = data_sample['pred_instances']\n                # encode mask to RLE\n                if 'masks' in pred:\n                    pred['masks'] = encode_mask_results(pred['masks'].numpy())\n            if 'pred_panoptic_seg' in data_sample:\n                warnings.warn(\n                    'Panoptic segmentation map will not be compressed. '\n                    'The dumped file will be extremely large! '\n                    'Suggest using `CocoPanopticMetric` to save the coco '\n                    'format json and segmentation png files directly.')\n        self.results.extend(data_samples)\n"
  },
  {
    "path": "mmdet/evaluation/metrics/dump_proposals_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport os.path as osp\nfrom typing import Optional, Sequence\n\nfrom mmengine.dist import is_main_process\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.fileio import dump\nfrom mmengine.logging import MMLogger\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import METRICS\n\n\n@METRICS.register_module()\nclass DumpProposals(BaseMetric):\n    \"\"\"Dump proposals pseudo metric.\n\n    Args:\n        output_dir (str): The root directory for ``proposals_file``.\n            Defaults to ''.\n        proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.\n        num_max_proposals (int, optional): Maximum number of proposals to dump.\n            If not specified, all proposals will be dumped.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n\n    default_prefix: Optional[str] = 'dump_proposals'\n\n    def __init__(self,\n                 output_dir: str = '',\n                 proposals_file: str = 'proposals.pkl',\n                 num_max_proposals: Optional[int] = None,\n                 file_client_args: dict = dict(backend='disk'),\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        self.num_max_proposals = num_max_proposals\n        # TODO: update after mmengine finish refactor fileio.\n        self.file_client_args = file_client_args\n        self.output_dir = output_dir\n        assert proposals_file.endswith(('.pkl', '.pickle')), \\\n            'The output file must be a pkl file.'\n\n        self.proposals_file = os.path.join(self.output_dir, proposals_file)\n        if is_main_process():\n            os.makedirs(self.output_dir, exist_ok=True)\n\n    def process(self, data_batch: Sequence[dict],\n                data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            pred = data_sample['pred_instances']\n            # `bboxes` is sorted by `scores`\n            ranked_scores, rank_inds = pred['scores'].sort(descending=True)\n            ranked_bboxes = pred['bboxes'][rank_inds, :]\n\n            ranked_bboxes = ranked_bboxes.cpu().numpy()\n            ranked_scores = ranked_scores.cpu().numpy()\n\n            pred_instance = InstanceData()\n            pred_instance.bboxes = ranked_bboxes\n            pred_instance.scores = ranked_scores\n            if self.num_max_proposals is not None:\n                pred_instance = pred_instance[:self.num_max_proposals]\n\n            img_path = data_sample['img_path']\n            # `file_name` is the key to obtain the proposals from the\n            # `proposals_list`.\n            file_name = osp.join(\n                osp.split(osp.split(img_path)[0])[-1],\n                osp.split(img_path)[-1])\n            result = {file_name: pred_instance}\n            self.results.append(result)\n\n    def compute_metrics(self, results: list) -> dict:\n        \"\"\"Dump the processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            dict: An empty dict.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n        dump_results = {}\n        for result in results:\n            dump_results.update(result)\n        dump(\n            dump_results,\n            file=self.proposals_file,\n            file_client_args=self.file_client_args)\n        logger.info(f'Results are saved at {self.proposals_file}')\n        return {}\n"
  },
  {
    "path": "mmdet/evaluation/metrics/lvis_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nimport os.path as osp\nimport tempfile\nimport warnings\nfrom collections import OrderedDict\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nfrom mmengine.logging import MMLogger\nfrom terminaltables import AsciiTable\n\nfrom mmdet.registry import METRICS\nfrom mmdet.structures.mask import encode_mask_results\nfrom ..functional import eval_recalls\nfrom .coco_metric import CocoMetric\n\ntry:\n    import lvis\n    if getattr(lvis, '__version__', '0') >= '10.5.3':\n        warnings.warn(\n            'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"',  # noqa: E501\n            UserWarning)\n    from lvis import LVIS, LVISEval, LVISResults\nexcept ImportError:\n    lvis = None\n    LVISEval = None\n    LVISResults = None\n\n\n@METRICS.register_module()\nclass LVISMetric(CocoMetric):\n    \"\"\"LVIS evaluation metric.\n\n    Args:\n        ann_file (str, optional): Path to the coco format annotation file.\n            If not specified, ground truth annotations from the dataset will\n            be converted to coco format. Defaults to None.\n        metric (str | List[str]): Metrics to be evaluated. Valid metrics\n            include 'bbox', 'segm', 'proposal', and 'proposal_fast'.\n            Defaults to 'bbox'.\n        classwise (bool): Whether to evaluate the metric class-wise.\n            Defaults to False.\n        proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.\n            Defaults to (100, 300, 1000).\n        iou_thrs (float | List[float], optional): IoU threshold to compute AP\n            and AR. If not specified, IoUs from 0.5 to 0.95 will be used.\n            Defaults to None.\n        metric_items (List[str], optional): Metric result names to be\n            recorded in the evaluation result. Defaults to None.\n        format_only (bool): Format the output results without perform\n            evaluation. It is useful when you want to format the result\n            to a specific format and submit it to the test server.\n            Defaults to False.\n        outfile_prefix (str, optional): The prefix of json files. It includes\n            the file path and the prefix of filename, e.g., \"a/b/prefix\".\n            If not specified, a temp file will be created. Defaults to None.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n\n    default_prefix: Optional[str] = 'lvis'\n\n    def __init__(self,\n                 ann_file: Optional[str] = None,\n                 metric: Union[str, List[str]] = 'bbox',\n                 classwise: bool = False,\n                 proposal_nums: Sequence[int] = (100, 300, 1000),\n                 iou_thrs: Optional[Union[float, Sequence[float]]] = None,\n                 metric_items: Optional[Sequence[str]] = None,\n                 format_only: bool = False,\n                 outfile_prefix: Optional[str] = None,\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        if lvis is None:\n            raise RuntimeError(\n                'Package lvis is not installed. Please run \"pip install '\n                'git+https://github.com/lvis-dataset/lvis-api.git\".')\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        # coco evaluation metrics\n        self.metrics = metric if isinstance(metric, list) else [metric]\n        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n        for metric in self.metrics:\n            if metric not in allowed_metrics:\n                raise KeyError(\n                    \"metric should be one of 'bbox', 'segm', 'proposal', \"\n                    f\"'proposal_fast', but got {metric}.\")\n\n        # do class wise evaluation, default False\n        self.classwise = classwise\n\n        # proposal_nums used to compute recall or precision.\n        self.proposal_nums = list(proposal_nums)\n\n        # iou_thrs used to compute recall or precision.\n        if iou_thrs is None:\n            iou_thrs = np.linspace(\n                .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n        self.iou_thrs = iou_thrs\n        self.metric_items = metric_items\n        self.format_only = format_only\n        if self.format_only:\n            assert outfile_prefix is not None, 'outfile_prefix must be not'\n            'None when format_only is True, otherwise the result files will'\n            'be saved to a temp directory which will be cleaned up at the end.'\n\n        self.outfile_prefix = outfile_prefix\n\n        # if ann_file is not specified,\n        # initialize lvis api with the converted dataset\n        self._lvis_api = LVIS(ann_file) if ann_file else None\n\n        # handle dataset lazy init\n        self.cat_ids = None\n        self.img_ids = None\n\n    def fast_eval_recall(self,\n                         results: List[dict],\n                         proposal_nums: Sequence[int],\n                         iou_thrs: Sequence[float],\n                         logger: Optional[MMLogger] = None) -> np.ndarray:\n        \"\"\"Evaluate proposal recall with LVIS's fast_eval_recall.\n\n        Args:\n            results (List[dict]): Results of the dataset.\n            proposal_nums (Sequence[int]): Proposal numbers used for\n                evaluation.\n            iou_thrs (Sequence[float]): IoU thresholds used for evaluation.\n            logger (MMLogger, optional): Logger used for logging the recall\n                summary.\n        Returns:\n            np.ndarray: Averaged recall results.\n        \"\"\"\n        gt_bboxes = []\n        pred_bboxes = [result['bboxes'] for result in results]\n        for i in range(len(self.img_ids)):\n            ann_ids = self._lvis_api.get_ann_ids(img_ids=[self.img_ids[i]])\n            ann_info = self._lvis_api.load_anns(ann_ids)\n            if len(ann_info) == 0:\n                gt_bboxes.append(np.zeros((0, 4)))\n                continue\n            bboxes = []\n            for ann in ann_info:\n                x1, y1, w, h = ann['bbox']\n                bboxes.append([x1, y1, x1 + w, y1 + h])\n            bboxes = np.array(bboxes, dtype=np.float32)\n            if bboxes.shape[0] == 0:\n                bboxes = np.zeros((0, 4))\n            gt_bboxes.append(bboxes)\n\n        recalls = eval_recalls(\n            gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)\n        ar = recalls.mean(axis=1)\n        return ar\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            result = dict()\n            pred = data_sample['pred_instances']\n            result['img_id'] = data_sample['img_id']\n            result['bboxes'] = pred['bboxes'].cpu().numpy()\n            result['scores'] = pred['scores'].cpu().numpy()\n            result['labels'] = pred['labels'].cpu().numpy()\n            # encode mask to RLE\n            if 'masks' in pred:\n                result['masks'] = encode_mask_results(\n                    pred['masks'].detach().cpu().numpy())\n            # some detectors use different scores for bbox and mask\n            if 'mask_scores' in pred:\n                result['mask_scores'] = pred['mask_scores'].cpu().numpy()\n\n            # parse gt\n            gt = dict()\n            gt['width'] = data_sample['ori_shape'][1]\n            gt['height'] = data_sample['ori_shape'][0]\n            gt['img_id'] = data_sample['img_id']\n            if self._lvis_api is None:\n                # TODO: Need to refactor to support LoadAnnotations\n                assert 'instances' in data_sample, \\\n                    'ground truth is required for evaluation when ' \\\n                    '`ann_file` is not provided'\n                gt['anns'] = data_sample['instances']\n            # add converted result to the results list\n            self.results.append((gt, result))\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            Dict[str, float]: The computed metrics. The keys are the names of\n            the metrics, and the values are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n\n        # split gt and prediction list\n        gts, preds = zip(*results)\n\n        tmp_dir = None\n        if self.outfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            outfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            outfile_prefix = self.outfile_prefix\n\n        if self._lvis_api is None:\n            # use converted gt json file to initialize coco api\n            logger.info('Converting ground truth to coco format...')\n            coco_json_path = self.gt_to_coco_json(\n                gt_dicts=gts, outfile_prefix=outfile_prefix)\n            self._lvis_api = LVIS(coco_json_path)\n\n        # handle lazy init\n        if self.cat_ids is None:\n            self.cat_ids = self._lvis_api.get_cat_ids()\n        if self.img_ids is None:\n            self.img_ids = self._lvis_api.get_img_ids()\n\n        # convert predictions to coco format and dump to json file\n        result_files = self.results2json(preds, outfile_prefix)\n\n        eval_results = OrderedDict()\n        if self.format_only:\n            logger.info('results are saved in '\n                        f'{osp.dirname(outfile_prefix)}')\n            return eval_results\n\n        lvis_gt = self._lvis_api\n\n        for metric in self.metrics:\n            logger.info(f'Evaluating {metric}...')\n\n            # TODO: May refactor fast_eval_recall to an independent metric?\n            # fast eval recall\n            if metric == 'proposal_fast':\n                ar = self.fast_eval_recall(\n                    preds, self.proposal_nums, self.iou_thrs, logger=logger)\n                log_msg = []\n                for i, num in enumerate(self.proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n                    log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n                log_msg = ''.join(log_msg)\n                logger.info(log_msg)\n                continue\n\n            try:\n                lvis_dt = LVISResults(lvis_gt, result_files[metric])\n            except IndexError:\n                logger.info(\n                    'The testing results of the whole dataset is empty.')\n                break\n\n            iou_type = 'bbox' if metric == 'proposal' else metric\n            lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)\n            lvis_eval.params.imgIds = self.img_ids\n            metric_items = self.metric_items\n            if metric == 'proposal':\n                lvis_eval.params.useCats = 0\n                lvis_eval.params.maxDets = list(self.proposal_nums)\n                lvis_eval.evaluate()\n                lvis_eval.accumulate()\n                lvis_eval.summarize()\n                if metric_items is None:\n                    metric_items = ['AR@300', 'ARs@300', 'ARm@300', 'ARl@300']\n                for k, v in lvis_eval.get_results().items():\n                    if k in metric_items:\n                        val = float('{:.3f}'.format(float(v)))\n                        eval_results[k] = val\n\n            else:\n                lvis_eval.evaluate()\n                lvis_eval.accumulate()\n                lvis_eval.summarize()\n                lvis_results = lvis_eval.get_results()\n                if self.classwise:  # Compute per-category AP\n                    # Compute per-category AP\n                    # from https://github.com/facebookresearch/detectron2/\n                    precisions = lvis_eval.eval['precision']\n                    # precision: (iou, recall, cls, area range, max dets)\n                    assert len(self.cat_ids) == precisions.shape[2]\n\n                    results_per_category = []\n                    for idx, catId in enumerate(self.cat_ids):\n                        # area range index 0: all area ranges\n                        # max dets index -1: typically 100 per image\n                        # the dimensions of precisions are\n                        # [num_thrs, num_recalls, num_cats, num_area_rngs]\n                        nm = self._lvis_api.load_cats([catId])[0]\n                        precision = precisions[:, :, idx, 0]\n                        precision = precision[precision > -1]\n                        if precision.size:\n                            ap = np.mean(precision)\n                        else:\n                            ap = float('nan')\n                        results_per_category.append(\n                            (f'{nm[\"name\"]}', f'{float(ap):0.3f}'))\n                        eval_results[f'{nm[\"name\"]}_precision'] = round(ap, 3)\n\n                    num_columns = min(6, len(results_per_category) * 2)\n                    results_flatten = list(\n                        itertools.chain(*results_per_category))\n                    headers = ['category', 'AP'] * (num_columns // 2)\n                    results_2d = itertools.zip_longest(*[\n                        results_flatten[i::num_columns]\n                        for i in range(num_columns)\n                    ])\n                    table_data = [headers]\n                    table_data += [result for result in results_2d]\n                    table = AsciiTable(table_data)\n                    logger.info('\\n' + table.table)\n\n                if metric_items is None:\n                    metric_items = [\n                        'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'APr',\n                        'APc', 'APf'\n                    ]\n\n                for k, v in lvis_results.items():\n                    if k in metric_items:\n                        key = '{}_{}'.format(metric, k)\n                        val = float('{:.3f}'.format(float(v)))\n                        eval_results[key] = val\n\n            lvis_eval.print_results()\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n"
  },
  {
    "path": "mmdet/evaluation/metrics/openimages_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom collections import OrderedDict\nfrom typing import List, Optional, Sequence, Union\n\nimport numpy as np\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.logging import MMLogger, print_log\n\nfrom mmdet.registry import METRICS\nfrom ..functional import eval_map\n\n\n@METRICS.register_module()\nclass OpenImagesMetric(BaseMetric):\n    \"\"\"OpenImages evaluation metric.\n\n    Evaluate detection mAP for OpenImages. Please refer to\n    https://storage.googleapis.com/openimages/web/evaluation.html for more\n    details.\n\n    Args:\n        iou_thrs (float or List[float]): IoU threshold. Defaults to 0.5.\n        ioa_thrs (float or List[float]): IoA threshold. Defaults to 0.5.\n        scale_ranges (List[tuple], optional): Scale ranges for evaluating\n            mAP. If not specified, all bounding boxes would be included in\n            evaluation. Defaults to None\n        use_group_of (bool): Whether consider group of groud truth bboxes\n            during evaluating. Defaults to True.\n        get_supercategory (bool): Whether to get parent class of the\n            current class. Default: True.\n        filter_labels (bool): Whether filter unannotated classes.\n            Default: True.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n    default_prefix: Optional[str] = 'openimages'\n\n    def __init__(self,\n                 iou_thrs: Union[float, List[float]] = 0.5,\n                 ioa_thrs: Union[float, List[float]] = 0.5,\n                 scale_ranges: Optional[List[tuple]] = None,\n                 use_group_of: bool = True,\n                 get_supercategory: bool = True,\n                 filter_labels: bool = True,\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        self.iou_thrs = [iou_thrs] if isinstance(iou_thrs, float) else iou_thrs\n        self.ioa_thrs = [ioa_thrs] if (isinstance(ioa_thrs, float)\n                                       or ioa_thrs is None) else ioa_thrs\n        assert isinstance(self.iou_thrs, list) and isinstance(\n            self.ioa_thrs, list)\n        assert len(self.iou_thrs) == len(self.ioa_thrs)\n\n        self.scale_ranges = scale_ranges\n        self.use_group_of = use_group_of\n        self.get_supercategory = get_supercategory\n        self.filter_labels = filter_labels\n\n    def _get_supercategory_ann(self, instances: List[dict]) -> List[dict]:\n        \"\"\"Get parent classes's annotation of the corresponding class.\n\n        Args:\n            instances (List[dict]): A list of annotations of the instances.\n\n        Returns:\n            List[dict]: Annotations extended with super-category.\n        \"\"\"\n        supercat_instances = []\n        relation_matrix = self.dataset_meta['RELATION_MATRIX']\n        for instance in instances:\n            labels = np.where(relation_matrix[instance['bbox_label']])[0]\n            for label in labels:\n                if label == instance['bbox_label']:\n                    continue\n                new_instance = copy.deepcopy(instance)\n                new_instance['bbox_label'] = label\n                supercat_instances.append(new_instance)\n        return supercat_instances\n\n    def _process_predictions(self, pred_bboxes: np.ndarray,\n                             pred_scores: np.ndarray, pred_labels: np.ndarray,\n                             gt_instances: list,\n                             image_level_labels: np.ndarray) -> tuple:\n        \"\"\"Process results of the corresponding class of the detection bboxes.\n\n        Note: It will choose to do the following two processing according to\n        the parameters:\n\n        1. Whether to add parent classes of the corresponding class of the\n        detection bboxes.\n\n        2. Whether to ignore the classes that unannotated on that image.\n\n        Args:\n            pred_bboxes (np.ndarray): bboxes predicted by the model\n            pred_scores (np.ndarray): scores predicted by the model\n            pred_labels (np.ndarray): labels predicted by the model\n            gt_instances (list): ground truth annotations\n            image_level_labels (np.ndarray): human-verified image level labels\n\n        Returns:\n            tuple: Processed bboxes, scores, and labels.\n        \"\"\"\n        processed_bboxes = copy.deepcopy(pred_bboxes)\n        processed_scores = copy.deepcopy(pred_scores)\n        processed_labels = copy.deepcopy(pred_labels)\n        gt_labels = np.array([ins['bbox_label'] for ins in gt_instances],\n                             dtype=np.int64)\n        if image_level_labels is not None:\n            allowed_classes = np.unique(\n                np.append(gt_labels, image_level_labels))\n        else:\n            allowed_classes = np.unique(gt_labels)\n        relation_matrix = self.dataset_meta['RELATION_MATRIX']\n        pred_classes = np.unique(pred_labels)\n        for pred_class in pred_classes:\n            classes = np.where(relation_matrix[pred_class])[0]\n            for cls in classes:\n                if (cls in allowed_classes and cls != pred_class\n                        and self.get_supercategory):\n                    # add super-supercategory preds\n                    index = np.where(pred_labels == pred_class)[0]\n                    processed_scores = np.concatenate(\n                        [processed_scores, pred_scores[index]])\n                    processed_bboxes = np.concatenate(\n                        [processed_bboxes, pred_bboxes[index]])\n                    extend_labels = np.full(index.shape, cls, dtype=np.int64)\n                    processed_labels = np.concatenate(\n                        [processed_labels, extend_labels])\n                elif cls not in allowed_classes and self.filter_labels:\n                    # remove unannotated preds\n                    index = np.where(processed_labels != cls)[0]\n                    processed_scores = processed_scores[index]\n                    processed_bboxes = processed_bboxes[index]\n                    processed_labels = processed_labels[index]\n        return processed_bboxes, processed_scores, processed_labels\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            gt = copy.deepcopy(data_sample)\n            # add super-category instances\n            # TODO: Need to refactor to support LoadAnnotations\n            instances = gt['instances']\n            if self.get_supercategory:\n                supercat_instances = self._get_supercategory_ann(instances)\n                instances.extend(supercat_instances)\n            gt_labels = []\n            gt_bboxes = []\n            is_group_ofs = []\n            for ins in instances:\n                gt_labels.append(ins['bbox_label'])\n                gt_bboxes.append(ins['bbox'])\n                is_group_ofs.append(ins['is_group_of'])\n            ann = dict(\n                labels=np.array(gt_labels, dtype=np.int64),\n                bboxes=np.array(gt_bboxes, dtype=np.float32).reshape((-1, 4)),\n                gt_is_group_ofs=np.array(is_group_ofs, dtype=bool))\n\n            image_level_labels = gt.get('image_level_labels', None)\n            pred = data_sample['pred_instances']\n            pred_bboxes = pred['bboxes'].cpu().numpy()\n            pred_scores = pred['scores'].cpu().numpy()\n            pred_labels = pred['labels'].cpu().numpy()\n\n            pred_bboxes, pred_scores, pred_labels = self._process_predictions(\n                pred_bboxes, pred_scores, pred_labels, instances,\n                image_level_labels)\n\n            dets = []\n            for label in range(len(self.dataset_meta['classes'])):\n                index = np.where(pred_labels == label)[0]\n                pred_bbox_scores = np.hstack(\n                    [pred_bboxes[index], pred_scores[index].reshape((-1, 1))])\n                dets.append(pred_bbox_scores)\n            self.results.append((ann, dets))\n\n    def compute_metrics(self, results: list) -> dict:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            dict: The computed metrics. The keys are the names of the metrics,\n            and the values are corresponding results.\n        \"\"\"\n        logger = MMLogger.get_current_instance()\n        gts, preds = zip(*results)\n        eval_results = OrderedDict()\n        # get dataset type\n        dataset_type = self.dataset_meta.get('dataset_type')\n        if dataset_type not in ['oid_challenge', 'oid_v6']:\n            dataset_type = 'oid_v6'\n            print_log(\n                'Cannot infer dataset type from the length of the'\n                ' classes. Set `oid_v6` as dataset type.',\n                logger='current')\n        mean_aps = []\n        for i, (iou_thr,\n                ioa_thr) in enumerate(zip(self.iou_thrs, self.ioa_thrs)):\n            if self.use_group_of:\n                assert ioa_thr is not None, 'ioa_thr must have value when' \\\n                                            ' using group_of in evaluation.'\n            print_log(f'\\n{\"-\" * 15}iou_thr, ioa_thr: {iou_thr}, {ioa_thr}'\n                      f'{\"-\" * 15}')\n            mean_ap, _ = eval_map(\n                preds,\n                gts,\n                scale_ranges=self.scale_ranges,\n                iou_thr=iou_thr,\n                ioa_thr=ioa_thr,\n                dataset=dataset_type,\n                logger=logger,\n                use_group_of=self.use_group_of)\n\n            mean_aps.append(mean_ap)\n            eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)\n        eval_results['mAP'] = sum(mean_aps) / len(mean_aps)\n        return eval_results\n"
  },
  {
    "path": "mmdet/evaluation/metrics/voc_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\nfrom collections import OrderedDict\nfrom typing import List, Optional, Sequence, Union\n\nimport numpy as np\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.logging import MMLogger\n\nfrom mmdet.registry import METRICS\nfrom ..functional import eval_map, eval_recalls\n\n\n@METRICS.register_module()\nclass VOCMetric(BaseMetric):\n    \"\"\"Pascal VOC evaluation metric.\n\n    Args:\n        iou_thrs (float or List[float]): IoU threshold. Defaults to 0.5.\n        scale_ranges (List[tuple], optional): Scale ranges for evaluating\n            mAP. If not specified, all bounding boxes would be included in\n            evaluation. Defaults to None.\n        metric (str | list[str]): Metrics to be evaluated. Options are\n            'mAP', 'recall'. If is list, the first setting in the list will\n             be used to evaluate metric.\n        proposal_nums (Sequence[int]): Proposal number used for evaluating\n            recalls, such as recall@100, recall@1000.\n            Default: (100, 300, 1000).\n        eval_mode (str): 'area' or '11points', 'area' means calculating the\n            area under precision-recall curve, '11points' means calculating\n            the average precision of recalls at [0, 0.1, ..., 1].\n            The PASCAL VOC2007 defaults to use '11points', while PASCAL\n            VOC2012 defaults to use 'area'.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n\n    default_prefix: Optional[str] = 'pascal_voc'\n\n    def __init__(self,\n                 iou_thrs: Union[float, List[float]] = 0.5,\n                 scale_ranges: Optional[List[tuple]] = None,\n                 metric: Union[str, List[str]] = 'mAP',\n                 proposal_nums: Sequence[int] = (100, 300, 1000),\n                 eval_mode: str = '11points',\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        self.iou_thrs = [iou_thrs] if isinstance(iou_thrs, float) \\\n            else iou_thrs\n        self.scale_ranges = scale_ranges\n        # voc evaluation metrics\n        if not isinstance(metric, str):\n            assert len(metric) == 1\n            metric = metric[0]\n        allowed_metrics = ['recall', 'mAP']\n        if metric not in allowed_metrics:\n            raise KeyError(\n                f\"metric should be one of 'recall', 'mAP', but got {metric}.\")\n        self.metric = metric\n        self.proposal_nums = proposal_nums\n        assert eval_mode in ['area', '11points'], \\\n            'Unrecognized mode, only \"area\" and \"11points\" are supported'\n        self.eval_mode = eval_mode\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            gt = copy.deepcopy(data_sample)\n            # TODO: Need to refactor to support LoadAnnotations\n            gt_instances = gt['gt_instances']\n            gt_ignore_instances = gt['ignored_instances']\n            ann = dict(\n                labels=gt_instances['labels'].cpu().numpy(),\n                bboxes=gt_instances['bboxes'].cpu().numpy(),\n                bboxes_ignore=gt_ignore_instances['bboxes'].cpu().numpy(),\n                labels_ignore=gt_ignore_instances['labels'].cpu().numpy())\n\n            pred = data_sample['pred_instances']\n            pred_bboxes = pred['bboxes'].cpu().numpy()\n            pred_scores = pred['scores'].cpu().numpy()\n            pred_labels = pred['labels'].cpu().numpy()\n\n            dets = []\n            for label in range(len(self.dataset_meta['classes'])):\n                index = np.where(pred_labels == label)[0]\n                pred_bbox_scores = np.hstack(\n                    [pred_bboxes[index], pred_scores[index].reshape((-1, 1))])\n                dets.append(pred_bbox_scores)\n\n            self.results.append((ann, dets))\n\n    def compute_metrics(self, results: list) -> dict:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            dict: The computed metrics. The keys are the names of the metrics,\n            and the values are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n        gts, preds = zip(*results)\n        eval_results = OrderedDict()\n        if self.metric == 'mAP':\n            assert isinstance(self.iou_thrs, list)\n            dataset_type = self.dataset_meta.get('dataset_type')\n            if dataset_type in ['VOC2007', 'VOC2012']:\n                dataset_name = 'voc'\n                if dataset_type == 'VOC2007' and self.eval_mode != '11points':\n                    warnings.warn('Pascal VOC2007 uses `11points` as default '\n                                  'evaluate mode, but you are using '\n                                  f'{self.eval_mode}.')\n                elif dataset_type == 'VOC2012' and self.eval_mode != 'area':\n                    warnings.warn('Pascal VOC2012 uses `area` as default '\n                                  'evaluate mode, but you are using '\n                                  f'{self.eval_mode}.')\n            else:\n                dataset_name = self.dataset_meta['classes']\n\n            mean_aps = []\n            for iou_thr in self.iou_thrs:\n                logger.info(f'\\n{\"-\" * 15}iou_thr: {iou_thr}{\"-\" * 15}')\n                # Follow the official implementation,\n                # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar\n                # we should use the legacy coordinate system in mmdet 1.x,\n                # which means w, h should be computed as 'x2 - x1 + 1` and\n                # `y2 - y1 + 1`\n                mean_ap, _ = eval_map(\n                    preds,\n                    gts,\n                    scale_ranges=self.scale_ranges,\n                    iou_thr=iou_thr,\n                    dataset=dataset_name,\n                    logger=logger,\n                    eval_mode=self.eval_mode,\n                    use_legacy_coordinate=True)\n                mean_aps.append(mean_ap)\n                eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)\n            eval_results['mAP'] = sum(mean_aps) / len(mean_aps)\n            eval_results.move_to_end('mAP', last=False)\n        elif self.metric == 'recall':\n            # TODO: Currently not checked.\n            gt_bboxes = [ann['bboxes'] for ann in self.annotations]\n            recalls = eval_recalls(\n                gt_bboxes,\n                results,\n                self.proposal_nums,\n                self.iou_thrs,\n                logger=logger,\n                use_legacy_coordinate=True)\n            for i, num in enumerate(self.proposal_nums):\n                for j, iou_thr in enumerate(self.iou_thrs):\n                    eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]\n            if recalls.shape[1] > 1:\n                ar = recalls.mean(axis=1)\n                for i, num in enumerate(self.proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n        return eval_results\n"
  },
  {
    "path": "mmdet/models/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .backbones import *  # noqa: F401,F403\nfrom .data_preprocessors import *  # noqa: F401,F403\nfrom .dense_heads import *  # noqa: F401,F403\nfrom .detectors import *  # noqa: F401,F403\nfrom .layers import *  # noqa: F401,F403\nfrom .losses import *  # noqa: F401,F403\nfrom .necks import *  # noqa: F401,F403\nfrom .roi_heads import *  # noqa: F401,F403\nfrom .seg_heads import *  # noqa: F401,F403\nfrom .task_modules import *  # noqa: F401,F403\nfrom .test_time_augs import *  # noqa: F401,F403\n"
  },
  {
    "path": "mmdet/models/backbones/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .csp_darknet import CSPDarknet\nfrom .cspnext import CSPNeXt\nfrom .darknet import Darknet\nfrom .detectors_resnet import DetectoRS_ResNet\nfrom .detectors_resnext import DetectoRS_ResNeXt\nfrom .efficientnet import EfficientNet\nfrom .hourglass import HourglassNet\nfrom .hrnet import HRNet\nfrom .mobilenet_v2 import MobileNetV2\nfrom .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2\nfrom .regnet import RegNet\nfrom .res2net import Res2Net\nfrom .resnest import ResNeSt\nfrom .resnet import ResNet, ResNetV1d\nfrom .resnext import ResNeXt\nfrom .ssd_vgg import SSDVGG\nfrom .swin import SwinTransformer\nfrom .trident_resnet import TridentResNet\n\n__all__ = [\n    'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',\n    'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',\n    'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',\n    'SwinTransformer', 'PyramidVisionTransformer',\n    'PyramidVisionTransformerV2', 'EfficientNet', 'CSPNeXt'\n]\n"
  },
  {
    "path": "mmdet/models/backbones/csp_darknet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.model import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\nfrom ..layers import CSPLayer\n\n\nclass Focus(nn.Module):\n    \"\"\"Focus width and height information into channel space.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        kernel_size (int): The kernel size of the convolution. Default: 1\n        stride (int): The stride of the convolution. Default: 1\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN', momentum=0.03, eps=0.001).\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish').\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=1,\n                 stride=1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish')):\n        super().__init__()\n        self.conv = ConvModule(\n            in_channels * 4,\n            out_channels,\n            kernel_size,\n            stride,\n            padding=(kernel_size - 1) // 2,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n    def forward(self, x):\n        # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)\n        patch_top_left = x[..., ::2, ::2]\n        patch_top_right = x[..., ::2, 1::2]\n        patch_bot_left = x[..., 1::2, ::2]\n        patch_bot_right = x[..., 1::2, 1::2]\n        x = torch.cat(\n            (\n                patch_top_left,\n                patch_bot_left,\n                patch_top_right,\n                patch_bot_right,\n            ),\n            dim=1,\n        )\n        return self.conv(x)\n\n\nclass SPPBottleneck(BaseModule):\n    \"\"\"Spatial pyramid pooling layer used in YOLOv3-SPP.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling\n            layers. Default: (5, 9, 13).\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish').\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_sizes=(5, 9, 13),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 init_cfg=None):\n        super().__init__(init_cfg)\n        mid_channels = in_channels // 2\n        self.conv1 = ConvModule(\n            in_channels,\n            mid_channels,\n            1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.poolings = nn.ModuleList([\n            nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)\n            for ks in kernel_sizes\n        ])\n        conv2_channels = mid_channels * (len(kernel_sizes) + 1)\n        self.conv2 = ConvModule(\n            conv2_channels,\n            out_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n    def forward(self, x):\n        x = self.conv1(x)\n        with torch.cuda.amp.autocast(enabled=False):\n            x = torch.cat(\n                [x] + [pooling(x) for pooling in self.poolings], dim=1)\n        x = self.conv2(x)\n        return x\n\n\n@MODELS.register_module()\nclass CSPDarknet(BaseModule):\n    \"\"\"CSP-Darknet backbone used in YOLOv5 and YOLOX.\n\n    Args:\n        arch (str): Architecture of CSP-Darknet, from {P5, P6}.\n            Default: P5.\n        deepen_factor (float): Depth multiplier, multiply number of\n            blocks in CSP layer by this amount. Default: 1.0.\n        widen_factor (float): Width multiplier, multiply number of\n            channels in each layer by this amount. Default: 1.0.\n        out_indices (Sequence[int]): Output from which stages.\n            Default: (2, 3, 4).\n        frozen_stages (int): Stages to be frozen (stop grad and set eval\n            mode). -1 means not freezing any parameters. Default: -1.\n        use_depthwise (bool): Whether to use depthwise separable convolution.\n            Default: False.\n        arch_ovewrite(list): Overwrite default arch settings. Default: None.\n        spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP\n            layers. Default: (5, 9, 13).\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True).\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    Example:\n        >>> from mmdet.models import CSPDarknet\n        >>> import torch\n        >>> self = CSPDarknet(depth=53)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 416, 416)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        ...\n        (1, 256, 52, 52)\n        (1, 512, 26, 26)\n        (1, 1024, 13, 13)\n    \"\"\"\n    # From left to right:\n    # in_channels, out_channels, num_blocks, add_identity, use_spp\n    arch_settings = {\n        'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],\n               [256, 512, 9, True, False], [512, 1024, 3, False, True]],\n        'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False],\n               [256, 512, 9, True, False], [512, 768, 3, True, False],\n               [768, 1024, 3, False, True]]\n    }\n\n    def __init__(self,\n                 arch='P5',\n                 deepen_factor=1.0,\n                 widen_factor=1.0,\n                 out_indices=(2, 3, 4),\n                 frozen_stages=-1,\n                 use_depthwise=False,\n                 arch_ovewrite=None,\n                 spp_kernal_sizes=(5, 9, 13),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 norm_eval=False,\n                 init_cfg=dict(\n                     type='Kaiming',\n                     layer='Conv2d',\n                     a=math.sqrt(5),\n                     distribution='uniform',\n                     mode='fan_in',\n                     nonlinearity='leaky_relu')):\n        super().__init__(init_cfg)\n        arch_setting = self.arch_settings[arch]\n        if arch_ovewrite:\n            arch_setting = arch_ovewrite\n        assert set(out_indices).issubset(\n            i for i in range(len(arch_setting) + 1))\n        if frozen_stages not in range(-1, len(arch_setting) + 1):\n            raise ValueError('frozen_stages must be in range(-1, '\n                             'len(arch_setting) + 1). But received '\n                             f'{frozen_stages}')\n\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.use_depthwise = use_depthwise\n        self.norm_eval = norm_eval\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n\n        self.stem = Focus(\n            3,\n            int(arch_setting[0][0] * widen_factor),\n            kernel_size=3,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.layers = ['stem']\n\n        for i, (in_channels, out_channels, num_blocks, add_identity,\n                use_spp) in enumerate(arch_setting):\n            in_channels = int(in_channels * widen_factor)\n            out_channels = int(out_channels * widen_factor)\n            num_blocks = max(round(num_blocks * deepen_factor), 1)\n            stage = []\n            conv_layer = conv(\n                in_channels,\n                out_channels,\n                3,\n                stride=2,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            stage.append(conv_layer)\n            if use_spp:\n                spp = SPPBottleneck(\n                    out_channels,\n                    out_channels,\n                    kernel_sizes=spp_kernal_sizes,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg)\n                stage.append(spp)\n            csp_layer = CSPLayer(\n                out_channels,\n                out_channels,\n                num_blocks=num_blocks,\n                add_identity=add_identity,\n                use_depthwise=use_depthwise,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            stage.append(csp_layer)\n            self.add_module(f'stage{i + 1}', nn.Sequential(*stage))\n            self.layers.append(f'stage{i + 1}')\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            for i in range(self.frozen_stages + 1):\n                m = getattr(self, self.layers[i])\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def train(self, mode=True):\n        super(CSPDarknet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n    def forward(self, x):\n        outs = []\n        for i, layer_name in enumerate(self.layers):\n            layer = getattr(self, layer_name)\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/cspnext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Sequence, Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom ..layers import CSPLayer\nfrom .csp_darknet import SPPBottleneck\n\n\n@MODELS.register_module()\nclass CSPNeXt(BaseModule):\n    \"\"\"CSPNeXt backbone used in RTMDet.\n\n    Args:\n        arch (str): Architecture of CSPNeXt, from {P5, P6}.\n            Defaults to P5.\n        expand_ratio (float): Ratio to adjust the number of channels of the\n            hidden layer. Defaults to 0.5.\n        deepen_factor (float): Depth multiplier, multiply number of\n            blocks in CSP layer by this amount. Defaults to 1.0.\n        widen_factor (float): Width multiplier, multiply number of\n            channels in each layer by this amount. Defaults to 1.0.\n        out_indices (Sequence[int]): Output from which stages.\n            Defaults to (2, 3, 4).\n        frozen_stages (int): Stages to be frozen (stop grad and set eval\n            mode). -1 means not freezing any parameters. Defaults to -1.\n        use_depthwise (bool): Whether to use depthwise separable convolution.\n            Defaults to False.\n        arch_ovewrite (list): Overwrite default arch settings.\n            Defaults to None.\n        spp_kernel_sizes: (tuple[int]): Sequential of kernel sizes of SPP\n            layers. Defaults to (5, 9, 13).\n        channel_attention (bool): Whether to add channel attention in each\n            stage. Defaults to True.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and\n            config norm layer. Defaults to dict(type='BN', requires_grad=True).\n        act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.\n            Defaults to dict(type='SiLU').\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`]): Initialization config dict.\n    \"\"\"\n    # From left to right:\n    # in_channels, out_channels, num_blocks, add_identity, use_spp\n    arch_settings = {\n        'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],\n               [256, 512, 6, True, False], [512, 1024, 3, False, True]],\n        'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],\n               [256, 512, 6, True, False], [512, 768, 3, True, False],\n               [768, 1024, 3, False, True]]\n    }\n\n    def __init__(\n        self,\n        arch: str = 'P5',\n        deepen_factor: float = 1.0,\n        widen_factor: float = 1.0,\n        out_indices: Sequence[int] = (2, 3, 4),\n        frozen_stages: int = -1,\n        use_depthwise: bool = False,\n        expand_ratio: float = 0.5,\n        arch_ovewrite: dict = None,\n        spp_kernel_sizes: Sequence[int] = (5, 9, 13),\n        channel_attention: bool = True,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),\n        act_cfg: ConfigType = dict(type='SiLU'),\n        norm_eval: bool = False,\n        init_cfg: OptMultiConfig = dict(\n            type='Kaiming',\n            layer='Conv2d',\n            a=math.sqrt(5),\n            distribution='uniform',\n            mode='fan_in',\n            nonlinearity='leaky_relu')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        arch_setting = self.arch_settings[arch]\n        if arch_ovewrite:\n            arch_setting = arch_ovewrite\n        assert set(out_indices).issubset(\n            i for i in range(len(arch_setting) + 1))\n        if frozen_stages not in range(-1, len(arch_setting) + 1):\n            raise ValueError('frozen_stages must be in range(-1, '\n                             'len(arch_setting) + 1). But received '\n                             f'{frozen_stages}')\n\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.use_depthwise = use_depthwise\n        self.norm_eval = norm_eval\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n        self.stem = nn.Sequential(\n            ConvModule(\n                3,\n                int(arch_setting[0][0] * widen_factor // 2),\n                3,\n                padding=1,\n                stride=2,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg),\n            ConvModule(\n                int(arch_setting[0][0] * widen_factor // 2),\n                int(arch_setting[0][0] * widen_factor // 2),\n                3,\n                padding=1,\n                stride=1,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg),\n            ConvModule(\n                int(arch_setting[0][0] * widen_factor // 2),\n                int(arch_setting[0][0] * widen_factor),\n                3,\n                padding=1,\n                stride=1,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg))\n        self.layers = ['stem']\n\n        for i, (in_channels, out_channels, num_blocks, add_identity,\n                use_spp) in enumerate(arch_setting):\n            in_channels = int(in_channels * widen_factor)\n            out_channels = int(out_channels * widen_factor)\n            num_blocks = max(round(num_blocks * deepen_factor), 1)\n            stage = []\n            conv_layer = conv(\n                in_channels,\n                out_channels,\n                3,\n                stride=2,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            stage.append(conv_layer)\n            if use_spp:\n                spp = SPPBottleneck(\n                    out_channels,\n                    out_channels,\n                    kernel_sizes=spp_kernel_sizes,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg)\n                stage.append(spp)\n            csp_layer = CSPLayer(\n                out_channels,\n                out_channels,\n                num_blocks=num_blocks,\n                add_identity=add_identity,\n                use_depthwise=use_depthwise,\n                use_cspnext_block=True,\n                expand_ratio=expand_ratio,\n                channel_attention=channel_attention,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            stage.append(csp_layer)\n            self.add_module(f'stage{i + 1}', nn.Sequential(*stage))\n            self.layers.append(f'stage{i + 1}')\n\n    def _freeze_stages(self) -> None:\n        if self.frozen_stages >= 0:\n            for i in range(self.frozen_stages + 1):\n                m = getattr(self, self.layers[i])\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def train(self, mode=True) -> None:\n        super().train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n    def forward(self, x: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:\n        outs = []\n        for i, layer_name in enumerate(self.layers):\n            layer = getattr(self, layer_name)\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/darknet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\n\n\nclass ResBlock(BaseModule):\n    \"\"\"The basic residual block used in Darknet. Each ResBlock consists of two\n    ConvModules and the input is added to the final output. Each ConvModule is\n    composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer\n    has half of the number of the filters as much as the second convLayer. The\n    first convLayer has filter size of 1x1 and the second one has the filter\n    size of 3x3.\n\n    Args:\n        in_channels (int): The input channels. Must be even.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 init_cfg=None):\n        super(ResBlock, self).__init__(init_cfg)\n        assert in_channels % 2 == 0  # ensure the in_channels is even\n        half_in_channels = in_channels // 2\n\n        # shortcut\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)\n        self.conv2 = ConvModule(\n            half_in_channels, in_channels, 3, padding=1, **cfg)\n\n    def forward(self, x):\n        residual = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n        out = out + residual\n\n        return out\n\n\n@MODELS.register_module()\nclass Darknet(BaseModule):\n    \"\"\"Darknet backbone.\n\n    Args:\n        depth (int): Depth of Darknet. Currently only support 53.\n        out_indices (Sequence[int]): Output from which stages.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters. Default: -1.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import Darknet\n        >>> import torch\n        >>> self = Darknet(depth=53)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 416, 416)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        ...\n        (1, 256, 52, 52)\n        (1, 512, 26, 26)\n        (1, 1024, 13, 13)\n    \"\"\"\n\n    # Dict(depth: (layers, channels))\n    arch_settings = {\n        53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),\n                               (512, 1024)))\n    }\n\n    def __init__(self,\n                 depth=53,\n                 out_indices=(3, 4, 5),\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 norm_eval=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(Darknet, self).__init__(init_cfg)\n        if depth not in self.arch_settings:\n            raise KeyError(f'invalid depth {depth} for darknet')\n\n        self.depth = depth\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.layers, self.channels = self.arch_settings[depth]\n\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)\n\n        self.cr_blocks = ['conv1']\n        for i, n_layers in enumerate(self.layers):\n            layer_name = f'conv_res_block{i + 1}'\n            in_c, out_c = self.channels[i]\n            self.add_module(\n                layer_name,\n                self.make_conv_res_block(in_c, out_c, n_layers, **cfg))\n            self.cr_blocks.append(layer_name)\n\n        self.norm_eval = norm_eval\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    def forward(self, x):\n        outs = []\n        for i, layer_name in enumerate(self.cr_blocks):\n            cr_block = getattr(self, layer_name)\n            x = cr_block(x)\n            if i in self.out_indices:\n                outs.append(x)\n\n        return tuple(outs)\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            for i in range(self.frozen_stages):\n                m = getattr(self, self.cr_blocks[i])\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def train(self, mode=True):\n        super(Darknet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n    @staticmethod\n    def make_conv_res_block(in_channels,\n                            out_channels,\n                            res_repeat,\n                            conv_cfg=None,\n                            norm_cfg=dict(type='BN', requires_grad=True),\n                            act_cfg=dict(type='LeakyReLU',\n                                         negative_slope=0.1)):\n        \"\"\"In Darknet backbone, ConvLayer is usually followed by ResBlock. This\n        function will make that. The Conv layers always have 3x3 filters with\n        stride=2. The number of the filters in Conv layer is the same as the\n        out channels of the ResBlock.\n\n        Args:\n            in_channels (int): The number of input channels.\n            out_channels (int): The number of output channels.\n            res_repeat (int): The number of ResBlocks.\n            conv_cfg (dict): Config dict for convolution layer. Default: None.\n            norm_cfg (dict): Dictionary to construct and config norm layer.\n                Default: dict(type='BN', requires_grad=True)\n            act_cfg (dict): Config dict for activation layer.\n                Default: dict(type='LeakyReLU', negative_slope=0.1).\n        \"\"\"\n\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        model = nn.Sequential()\n        model.add_module(\n            'conv',\n            ConvModule(\n                in_channels, out_channels, 3, stride=2, padding=1, **cfg))\n        for idx in range(res_repeat):\n            model.add_module('res{}'.format(idx),\n                             ResBlock(out_channels, **cfg))\n        return model\n"
  },
  {
    "path": "mmdet/models/backbones/detectors_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmengine.logging import MMLogger\nfrom mmengine.model import Sequential, constant_init, kaiming_init\nfrom mmengine.runner.checkpoint import load_checkpoint\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\nfrom .resnet import BasicBlock\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottleneck(_Bottleneck):\n    r\"\"\"Bottleneck for the ResNet backbone in `DetectoRS\n    <https://arxiv.org/pdf/2006.02334.pdf>`_.\n\n    This bottleneck allows the users to specify whether to use\n    SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).\n\n    Args:\n         inplanes (int): The number of input channels.\n         planes (int): The number of output channels before expansion.\n         rfp_inplanes (int, optional): The number of channels from RFP.\n             Default: None. If specified, an additional conv layer will be\n             added for ``rfp_feat``. Otherwise, the structure is the same as\n             base class.\n         sac (dict, optional): Dictionary to construct SAC. Default: None.\n         init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 rfp_inplanes=None,\n                 sac=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(Bottleneck, self).__init__(\n            inplanes, planes, init_cfg=init_cfg, **kwargs)\n\n        assert sac is None or isinstance(sac, dict)\n        self.sac = sac\n        self.with_sac = sac is not None\n        if self.with_sac:\n            self.conv2 = build_conv_layer(\n                self.sac,\n                planes,\n                planes,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                bias=False)\n\n        self.rfp_inplanes = rfp_inplanes\n        if self.rfp_inplanes:\n            self.rfp_conv = build_conv_layer(\n                None,\n                self.rfp_inplanes,\n                planes * self.expansion,\n                1,\n                stride=1,\n                bias=True)\n            if init_cfg is None:\n                self.init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='rfp_conv'))\n\n    def rfp_forward(self, x, rfp_feat):\n        \"\"\"The forward function that also takes the RFP features as input.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n            out = self.norm2(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        if self.rfp_inplanes:\n            rfp_feat = self.rfp_conv(rfp_feat)\n            out = out + rfp_feat\n\n        out = self.relu(out)\n\n        return out\n\n\nclass ResLayer(Sequential):\n    \"\"\"ResLayer to build ResNet style backbone for RPF in detectoRS.\n\n    The difference between this module and base class is that we pass\n    ``rfp_inplanes`` to the first block.\n\n    Args:\n        block (nn.Module): block used to build ResLayer.\n        inplanes (int): inplanes of block.\n        planes (int): planes of block.\n        num_blocks (int): number of blocks.\n        stride (int): stride of the first block. Default: 1\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottleneck. Default: False\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: dict(type='BN')\n        downsample_first (bool): Downsample at the first block or last block.\n            False for Hourglass, True for ResNet. Default: True\n        rfp_inplanes (int, optional): The number of channels from RFP.\n            Default: None. If specified, an additional conv layer will be\n            added for ``rfp_feat``. Otherwise, the structure is the same as\n            base class.\n    \"\"\"\n\n    def __init__(self,\n                 block,\n                 inplanes,\n                 planes,\n                 num_blocks,\n                 stride=1,\n                 avg_down=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 downsample_first=True,\n                 rfp_inplanes=None,\n                 **kwargs):\n        self.block = block\n        assert downsample_first, f'downsample_first={downsample_first} is ' \\\n                                 'not supported in DetectoRS'\n\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = []\n            conv_stride = stride\n            if avg_down and stride != 1:\n                conv_stride = 1\n                downsample.append(\n                    nn.AvgPool2d(\n                        kernel_size=stride,\n                        stride=stride,\n                        ceil_mode=True,\n                        count_include_pad=False))\n            downsample.extend([\n                build_conv_layer(\n                    conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=conv_stride,\n                    bias=False),\n                build_norm_layer(norm_cfg, planes * block.expansion)[1]\n            ])\n            downsample = nn.Sequential(*downsample)\n\n        layers = []\n        layers.append(\n            block(\n                inplanes=inplanes,\n                planes=planes,\n                stride=stride,\n                downsample=downsample,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                rfp_inplanes=rfp_inplanes,\n                **kwargs))\n        inplanes = planes * block.expansion\n        for _ in range(1, num_blocks):\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n\n        super(ResLayer, self).__init__(*layers)\n\n\n@MODELS.register_module()\nclass DetectoRS_ResNet(ResNet):\n    \"\"\"ResNet backbone for DetectoRS.\n\n    Args:\n        sac (dict, optional): Dictionary to construct SAC (Switchable Atrous\n            Convolution). Default: None.\n        stage_with_sac (list): Which stage to use sac. Default: (False, False,\n            False, False).\n        rfp_inplanes (int, optional): The number of channels from RFP.\n            Default: None. If specified, an additional conv layer will be\n            added for ``rfp_feat``. Otherwise, the structure is the same as\n            base class.\n        output_img (bool): If ``True``, the input image will be inserted into\n            the starting position of output. Default: False.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self,\n                 sac=None,\n                 stage_with_sac=(False, False, False, False),\n                 rfp_inplanes=None,\n                 output_img=False,\n                 pretrained=None,\n                 init_cfg=None,\n                 **kwargs):\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        self.pretrained = pretrained\n        if init_cfg is not None:\n            assert isinstance(init_cfg, dict), \\\n                f'init_cfg must be a dict, but got {type(init_cfg)}'\n            if 'type' in init_cfg:\n                assert init_cfg.get('type') == 'Pretrained', \\\n                    'Only can initialize module by loading a pretrained model'\n            else:\n                raise KeyError('`init_cfg` must contain the key \"type\"')\n            self.pretrained = init_cfg.get('checkpoint')\n        self.sac = sac\n        self.stage_with_sac = stage_with_sac\n        self.rfp_inplanes = rfp_inplanes\n        self.output_img = output_img\n        super(DetectoRS_ResNet, self).__init__(**kwargs)\n\n        self.inplanes = self.stem_channels\n        self.res_layers = []\n        for i, num_blocks in enumerate(self.stage_blocks):\n            stride = self.strides[i]\n            dilation = self.dilations[i]\n            dcn = self.dcn if self.stage_with_dcn[i] else None\n            sac = self.sac if self.stage_with_sac[i] else None\n            if self.plugins is not None:\n                stage_plugins = self.make_stage_plugins(self.plugins, i)\n            else:\n                stage_plugins = None\n            planes = self.base_channels * 2**i\n            res_layer = self.make_res_layer(\n                block=self.block,\n                inplanes=self.inplanes,\n                planes=planes,\n                num_blocks=num_blocks,\n                stride=stride,\n                dilation=dilation,\n                style=self.style,\n                avg_down=self.avg_down,\n                with_cp=self.with_cp,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                dcn=dcn,\n                sac=sac,\n                rfp_inplanes=rfp_inplanes if i > 0 else None,\n                plugins=stage_plugins)\n            self.inplanes = planes * self.block.expansion\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, res_layer)\n            self.res_layers.append(layer_name)\n\n        self._freeze_stages()\n\n    # In order to be properly initialized by RFP\n    def init_weights(self):\n        # Calling this method will cause parameter initialization exception\n        # super(DetectoRS_ResNet, self).init_weights()\n\n        if isinstance(self.pretrained, str):\n            logger = MMLogger.get_current_instance()\n            load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n        elif self.pretrained is None:\n            for m in self.modules():\n                if isinstance(m, nn.Conv2d):\n                    kaiming_init(m)\n                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n                    constant_init(m, 1)\n\n            if self.dcn is not None:\n                for m in self.modules():\n                    if isinstance(m, Bottleneck) and hasattr(\n                            m.conv2, 'conv_offset'):\n                        constant_init(m.conv2.conv_offset, 0)\n\n            if self.zero_init_residual:\n                for m in self.modules():\n                    if isinstance(m, Bottleneck):\n                        constant_init(m.norm3, 0)\n                    elif isinstance(m, BasicBlock):\n                        constant_init(m.norm2, 0)\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.\"\"\"\n        return ResLayer(**kwargs)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        outs = list(super(DetectoRS_ResNet, self).forward(x))\n        if self.output_img:\n            outs.insert(0, x)\n        return tuple(outs)\n\n    def rfp_forward(self, x, rfp_feats):\n        \"\"\"Forward function for RFP.\"\"\"\n        if self.deep_stem:\n            x = self.stem(x)\n        else:\n            x = self.conv1(x)\n            x = self.norm1(x)\n            x = self.relu(x)\n        x = self.maxpool(x)\n        outs = []\n        for i, layer_name in enumerate(self.res_layers):\n            res_layer = getattr(self, layer_name)\n            rfp_feat = rfp_feats[i] if i > 0 else None\n            for layer in res_layer:\n                x = layer.rfp_forward(x, rfp_feat)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/detectors_resnext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom mmdet.registry import MODELS\nfrom .detectors_resnet import Bottleneck as _Bottleneck\nfrom .detectors_resnet import DetectoRS_ResNet\n\n\nclass Bottleneck(_Bottleneck):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 groups=1,\n                 base_width=4,\n                 base_channels=64,\n                 **kwargs):\n        \"\"\"Bottleneck block for ResNeXt.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n        if groups == 1:\n            width = self.planes\n        else:\n            width = math.floor(self.planes *\n                               (base_width / base_channels)) * groups\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(\n            self.norm_cfg, width, postfix=2)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        fallback_on_stride = False\n        self.with_modulated_dcn = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if self.with_sac:\n            self.conv2 = build_conv_layer(\n                self.sac,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n        elif not self.with_dcn or fallback_on_stride:\n            self.conv2 = build_conv_layer(\n                self.conv_cfg,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            self.conv2 = build_conv_layer(\n                self.dcn,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n\n@MODELS.register_module()\nclass DetectoRS_ResNeXt(DetectoRS_ResNet):\n    \"\"\"ResNeXt backbone for DetectoRS.\n\n    Args:\n        groups (int): The number of groups in ResNeXt.\n        base_width (int): The base width of ResNeXt.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self, groups=1, base_width=4, **kwargs):\n        self.groups = groups\n        self.base_width = base_width\n        super(DetectoRS_ResNeXt, self).__init__(**kwargs)\n\n    def make_res_layer(self, **kwargs):\n        return super().make_res_layer(\n            groups=self.groups,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/efficientnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn.bricks import ConvModule, DropPath\nfrom mmengine.model import BaseModule, Sequential\n\nfrom mmdet.registry import MODELS\nfrom ..layers import InvertedResidual, SELayer\nfrom ..utils import make_divisible\n\n\nclass EdgeResidual(BaseModule):\n    \"\"\"Edge Residual Block.\n\n    Args:\n        in_channels (int): The input channels of this module.\n        out_channels (int): The output channels of this module.\n        mid_channels (int): The input channels of the second convolution.\n        kernel_size (int): The kernel size of the first convolution.\n            Defaults to 3.\n        stride (int): The stride of the first convolution. Defaults to 1.\n        se_cfg (dict, optional): Config dict for se layer. Defaults to None,\n            which means no se layer.\n        with_residual (bool): Use residual connection. Defaults to True.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to ``dict(type='BN')``.\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to ``dict(type='ReLU')``.\n        drop_path_rate (float): stochastic depth rate. Defaults to 0.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Defaults to False.\n        init_cfg (dict | list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 mid_channels,\n                 kernel_size=3,\n                 stride=1,\n                 se_cfg=None,\n                 with_residual=True,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 act_cfg=dict(type='ReLU'),\n                 drop_path_rate=0.,\n                 with_cp=False,\n                 init_cfg=None,\n                 **kwargs):\n        super(EdgeResidual, self).__init__(init_cfg=init_cfg)\n        assert stride in [1, 2]\n        self.with_cp = with_cp\n        self.drop_path = DropPath(\n            drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n        self.with_se = se_cfg is not None\n        self.with_residual = (\n            stride == 1 and in_channels == out_channels and with_residual)\n\n        if self.with_se:\n            assert isinstance(se_cfg, dict)\n\n        self.conv1 = ConvModule(\n            in_channels=in_channels,\n            out_channels=mid_channels,\n            kernel_size=kernel_size,\n            stride=1,\n            padding=kernel_size // 2,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n        if self.with_se:\n            self.se = SELayer(**se_cfg)\n\n        self.conv2 = ConvModule(\n            in_channels=mid_channels,\n            out_channels=out_channels,\n            kernel_size=1,\n            stride=stride,\n            padding=0,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            out = x\n            out = self.conv1(out)\n\n            if self.with_se:\n                out = self.se(out)\n\n            out = self.conv2(out)\n\n            if self.with_residual:\n                return x + self.drop_path(out)\n            else:\n                return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        return out\n\n\ndef model_scaling(layer_setting, arch_setting):\n    \"\"\"Scaling operation to the layer's parameters according to the\n    arch_setting.\"\"\"\n    # scale width\n    new_layer_setting = copy.deepcopy(layer_setting)\n    for layer_cfg in new_layer_setting:\n        for block_cfg in layer_cfg:\n            block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8)\n\n    # scale depth\n    split_layer_setting = [new_layer_setting[0]]\n    for layer_cfg in new_layer_setting[1:-1]:\n        tmp_index = [0]\n        for i in range(len(layer_cfg) - 1):\n            if layer_cfg[i + 1][1] != layer_cfg[i][1]:\n                tmp_index.append(i + 1)\n        tmp_index.append(len(layer_cfg))\n        for i in range(len(tmp_index) - 1):\n            split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i +\n                                                                        1]])\n    split_layer_setting.append(new_layer_setting[-1])\n\n    num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]]\n    new_layers = [\n        int(math.ceil(arch_setting[1] * num)) for num in num_of_layers\n    ]\n\n    merge_layer_setting = [split_layer_setting[0]]\n    for i, layer_cfg in enumerate(split_layer_setting[1:-1]):\n        if new_layers[i] <= num_of_layers[i]:\n            tmp_layer_cfg = layer_cfg[:new_layers[i]]\n        else:\n            tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * (\n                new_layers[i] - num_of_layers[i])\n        if tmp_layer_cfg[0][3] == 1 and i != 0:\n            merge_layer_setting[-1] += tmp_layer_cfg.copy()\n        else:\n            merge_layer_setting.append(tmp_layer_cfg.copy())\n    merge_layer_setting.append(split_layer_setting[-1])\n\n    return merge_layer_setting\n\n\n@MODELS.register_module()\nclass EfficientNet(BaseModule):\n    \"\"\"EfficientNet backbone.\n\n    Args:\n        arch (str): Architecture of efficientnet. Defaults to b0.\n        out_indices (Sequence[int]): Output from which stages.\n            Defaults to (6, ).\n        frozen_stages (int): Stages to be frozen (all param fixed).\n            Defaults to 0, which means not freezing any parameters.\n        conv_cfg (dict): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to dict(type='Swish').\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only. Defaults to False.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Defaults to False.\n    \"\"\"\n\n    # Parameters to build layers.\n    # 'b' represents the architecture of normal EfficientNet family includes\n    # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'.\n    # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es',\n    # 'em', 'el'.\n    # 6 parameters are needed to construct a layer, From left to right:\n    # - kernel_size: The kernel size of the block\n    # - out_channel: The number of out_channels of the block\n    # - se_ratio: The sequeeze ratio of SELayer.\n    # - stride: The stride of the block\n    # - expand_ratio: The expand_ratio of the mid_channels\n    # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual\n    layer_settings = {\n        'b': [[[3, 32, 0, 2, 0, -1]],\n              [[3, 16, 4, 1, 1, 0]],\n              [[3, 24, 4, 2, 6, 0],\n               [3, 24, 4, 1, 6, 0]],\n              [[5, 40, 4, 2, 6, 0],\n               [5, 40, 4, 1, 6, 0]],\n              [[3, 80, 4, 2, 6, 0],\n               [3, 80, 4, 1, 6, 0],\n               [3, 80, 4, 1, 6, 0],\n               [5, 112, 4, 1, 6, 0],\n               [5, 112, 4, 1, 6, 0],\n               [5, 112, 4, 1, 6, 0]],\n              [[5, 192, 4, 2, 6, 0],\n               [5, 192, 4, 1, 6, 0],\n               [5, 192, 4, 1, 6, 0],\n               [5, 192, 4, 1, 6, 0],\n               [3, 320, 4, 1, 6, 0]],\n              [[1, 1280, 0, 1, 0, -1]]\n              ],\n        'e': [[[3, 32, 0, 2, 0, -1]],\n              [[3, 24, 0, 1, 3, 1]],\n              [[3, 32, 0, 2, 8, 1],\n               [3, 32, 0, 1, 8, 1]],\n              [[3, 48, 0, 2, 8, 1],\n               [3, 48, 0, 1, 8, 1],\n               [3, 48, 0, 1, 8, 1],\n               [3, 48, 0, 1, 8, 1]],\n              [[5, 96, 0, 2, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0]],\n              [[5, 192, 0, 2, 8, 0],\n               [5, 192, 0, 1, 8, 0]],\n              [[1, 1280, 0, 1, 0, -1]]\n              ]\n    }  # yapf: disable\n\n    # Parameters to build different kinds of architecture.\n    # From left to right: scaling factor for width, scaling factor for depth,\n    # resolution.\n    arch_settings = {\n        'b0': (1.0, 1.0, 224),\n        'b1': (1.0, 1.1, 240),\n        'b2': (1.1, 1.2, 260),\n        'b3': (1.2, 1.4, 300),\n        'b4': (1.4, 1.8, 380),\n        'b5': (1.6, 2.2, 456),\n        'b6': (1.8, 2.6, 528),\n        'b7': (2.0, 3.1, 600),\n        'b8': (2.2, 3.6, 672),\n        'es': (1.0, 1.0, 224),\n        'em': (1.0, 1.1, 240),\n        'el': (1.2, 1.4, 300)\n    }\n\n    def __init__(self,\n                 arch='b0',\n                 drop_path_rate=0.,\n                 out_indices=(6, ),\n                 frozen_stages=0,\n                 conv_cfg=dict(type='Conv2dAdaptivePadding'),\n                 norm_cfg=dict(type='BN', eps=1e-3),\n                 act_cfg=dict(type='Swish'),\n                 norm_eval=False,\n                 with_cp=False,\n                 init_cfg=[\n                     dict(type='Kaiming', layer='Conv2d'),\n                     dict(\n                         type='Constant',\n                         layer=['_BatchNorm', 'GroupNorm'],\n                         val=1)\n                 ]):\n        super(EfficientNet, self).__init__(init_cfg)\n        assert arch in self.arch_settings, \\\n            f'\"{arch}\" is not one of the arch_settings ' \\\n            f'({\", \".join(self.arch_settings.keys())})'\n        self.arch_setting = self.arch_settings[arch]\n        self.layer_setting = self.layer_settings[arch[:1]]\n        for index in out_indices:\n            if index not in range(0, len(self.layer_setting)):\n                raise ValueError('the item in out_indices must in '\n                                 f'range(0, {len(self.layer_setting)}). '\n                                 f'But received {index}')\n\n        if frozen_stages not in range(len(self.layer_setting) + 1):\n            raise ValueError('frozen_stages must be in range(0, '\n                             f'{len(self.layer_setting) + 1}). '\n                             f'But received {frozen_stages}')\n        self.drop_path_rate = drop_path_rate\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n        self.norm_eval = norm_eval\n        self.with_cp = with_cp\n\n        self.layer_setting = model_scaling(self.layer_setting,\n                                           self.arch_setting)\n        block_cfg_0 = self.layer_setting[0][0]\n        block_cfg_last = self.layer_setting[-1][0]\n        self.in_channels = make_divisible(block_cfg_0[1], 8)\n        self.out_channels = block_cfg_last[1]\n        self.layers = nn.ModuleList()\n        self.layers.append(\n            ConvModule(\n                in_channels=3,\n                out_channels=self.in_channels,\n                kernel_size=block_cfg_0[0],\n                stride=block_cfg_0[3],\n                padding=block_cfg_0[0] // 2,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                act_cfg=self.act_cfg))\n        self.make_layer()\n        # Avoid building unused layers in mmdetection.\n        if len(self.layers) < max(self.out_indices) + 1:\n            self.layers.append(\n                ConvModule(\n                    in_channels=self.in_channels,\n                    out_channels=self.out_channels,\n                    kernel_size=block_cfg_last[0],\n                    stride=block_cfg_last[3],\n                    padding=block_cfg_last[0] // 2,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg))\n\n    def make_layer(self):\n        # Without the first and the final conv block.\n        layer_setting = self.layer_setting[1:-1]\n\n        total_num_blocks = sum([len(x) for x in layer_setting])\n        block_idx = 0\n        dpr = [\n            x.item()\n            for x in torch.linspace(0, self.drop_path_rate, total_num_blocks)\n        ]  # stochastic depth decay rule\n\n        for i, layer_cfg in enumerate(layer_setting):\n            # Avoid building unused layers in mmdetection.\n            if i > max(self.out_indices) - 1:\n                break\n            layer = []\n            for i, block_cfg in enumerate(layer_cfg):\n                (kernel_size, out_channels, se_ratio, stride, expand_ratio,\n                 block_type) = block_cfg\n\n                mid_channels = int(self.in_channels * expand_ratio)\n                out_channels = make_divisible(out_channels, 8)\n                if se_ratio <= 0:\n                    se_cfg = None\n                else:\n                    # In mmdetection, the `divisor` is deleted to align\n                    # the logic of SELayer with mmcls.\n                    se_cfg = dict(\n                        channels=mid_channels,\n                        ratio=expand_ratio * se_ratio,\n                        act_cfg=(self.act_cfg, dict(type='Sigmoid')))\n                if block_type == 1:  # edge tpu\n                    if i > 0 and expand_ratio == 3:\n                        with_residual = False\n                        expand_ratio = 4\n                    else:\n                        with_residual = True\n                    mid_channels = int(self.in_channels * expand_ratio)\n                    if se_cfg is not None:\n                        # In mmdetection, the `divisor` is deleted to align\n                        # the logic of SELayer with mmcls.\n                        se_cfg = dict(\n                            channels=mid_channels,\n                            ratio=se_ratio * expand_ratio,\n                            act_cfg=(self.act_cfg, dict(type='Sigmoid')))\n                    block = partial(EdgeResidual, with_residual=with_residual)\n                else:\n                    block = InvertedResidual\n                layer.append(\n                    block(\n                        in_channels=self.in_channels,\n                        out_channels=out_channels,\n                        mid_channels=mid_channels,\n                        kernel_size=kernel_size,\n                        stride=stride,\n                        se_cfg=se_cfg,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg,\n                        drop_path_rate=dpr[block_idx],\n                        with_cp=self.with_cp,\n                        # In mmdetection, `with_expand_conv` is set to align\n                        # the logic of InvertedResidual with mmcls.\n                        with_expand_conv=(mid_channels != self.in_channels)))\n                self.in_channels = out_channels\n                block_idx += 1\n            self.layers.append(Sequential(*layer))\n\n    def forward(self, x):\n        outs = []\n        for i, layer in enumerate(self.layers):\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n\n        return tuple(outs)\n\n    def _freeze_stages(self):\n        for i in range(self.frozen_stages):\n            m = self.layers[i]\n            m.eval()\n            for param in m.parameters():\n                param.requires_grad = False\n\n    def train(self, mode=True):\n        super(EfficientNet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/backbones/hourglass.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Sequence\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptMultiConfig\nfrom ..layers import ResLayer\nfrom .resnet import BasicBlock\n\n\nclass HourglassModule(BaseModule):\n    \"\"\"Hourglass Module for HourglassNet backbone.\n\n    Generate module recursively and use BasicBlock as the base unit.\n\n    Args:\n        depth (int): Depth of current HourglassModule.\n        stage_channels (list[int]): Feature channels of sub-modules in current\n            and follow-up HourglassModule.\n        stage_blocks (list[int]): Number of sub-modules stacked in current and\n            follow-up HourglassModule.\n        norm_cfg (ConfigType): Dictionary to construct and config norm layer.\n            Defaults to `dict(type='BN', requires_grad=True)`\n        upsample_cfg (ConfigType): Config dict for interpolate layer.\n            Defaults to `dict(mode='nearest')`\n       init_cfg (dict or ConfigDict, optional): the config to control the\n           initialization.\n    \"\"\"\n\n    def __init__(self,\n                 depth: int,\n                 stage_channels: List[int],\n                 stage_blocks: List[int],\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 upsample_cfg: ConfigType = dict(mode='nearest'),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg)\n\n        self.depth = depth\n\n        cur_block = stage_blocks[0]\n        next_block = stage_blocks[1]\n\n        cur_channel = stage_channels[0]\n        next_channel = stage_channels[1]\n\n        self.up1 = ResLayer(\n            BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)\n\n        self.low1 = ResLayer(\n            BasicBlock,\n            cur_channel,\n            next_channel,\n            cur_block,\n            stride=2,\n            norm_cfg=norm_cfg)\n\n        if self.depth > 1:\n            self.low2 = HourglassModule(depth - 1, stage_channels[1:],\n                                        stage_blocks[1:])\n        else:\n            self.low2 = ResLayer(\n                BasicBlock,\n                next_channel,\n                next_channel,\n                next_block,\n                norm_cfg=norm_cfg)\n\n        self.low3 = ResLayer(\n            BasicBlock,\n            next_channel,\n            cur_channel,\n            cur_block,\n            norm_cfg=norm_cfg,\n            downsample_first=False)\n\n        self.up2 = F.interpolate\n        self.upsample_cfg = upsample_cfg\n\n    def forward(self, x: torch.Tensor) -> nn.Module:\n        \"\"\"Forward function.\"\"\"\n        up1 = self.up1(x)\n        low1 = self.low1(x)\n        low2 = self.low2(low1)\n        low3 = self.low3(low2)\n        # Fixing `scale factor` (e.g. 2) is common for upsampling, but\n        # in some cases the spatial size is mismatched and error will arise.\n        if 'scale_factor' in self.upsample_cfg:\n            up2 = self.up2(low3, **self.upsample_cfg)\n        else:\n            shape = up1.shape[2:]\n            up2 = self.up2(low3, size=shape, **self.upsample_cfg)\n        return up1 + up2\n\n\n@MODELS.register_module()\nclass HourglassNet(BaseModule):\n    \"\"\"HourglassNet backbone.\n\n    Stacked Hourglass Networks for Human Pose Estimation.\n    More details can be found in the `paper\n    <https://arxiv.org/abs/1603.06937>`_ .\n\n    Args:\n        downsample_times (int): Downsample times in a HourglassModule.\n        num_stacks (int): Number of HourglassModule modules stacked,\n            1 for Hourglass-52, 2 for Hourglass-104.\n        stage_channels (Sequence[int]): Feature channel of each sub-module in a\n            HourglassModule.\n        stage_blocks (Sequence[int]): Number of sub-modules stacked in a\n            HourglassModule.\n        feat_channel (int): Feature channel of conv after a HourglassModule.\n        norm_cfg (norm_cfg): Dictionary to construct and config norm layer.\n       init_cfg (dict or ConfigDict, optional): the config to control the\n           initialization.\n\n    Example:\n        >>> from mmdet.models import HourglassNet\n        >>> import torch\n        >>> self = HourglassNet()\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 511, 511)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_output in level_outputs:\n        ...     print(tuple(level_output.shape))\n        (1, 256, 128, 128)\n        (1, 256, 128, 128)\n    \"\"\"\n\n    def __init__(self,\n                 downsample_times: int = 5,\n                 num_stacks: int = 2,\n                 stage_channels: Sequence = (256, 256, 384, 384, 384, 512),\n                 stage_blocks: Sequence = (2, 2, 2, 2, 2, 4),\n                 feat_channel: int = 256,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 init_cfg: OptMultiConfig = None) -> None:\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg)\n\n        self.num_stacks = num_stacks\n        assert self.num_stacks >= 1\n        assert len(stage_channels) == len(stage_blocks)\n        assert len(stage_channels) > downsample_times\n\n        cur_channel = stage_channels[0]\n\n        self.stem = nn.Sequential(\n            ConvModule(\n                3, cur_channel // 2, 7, padding=3, stride=2,\n                norm_cfg=norm_cfg),\n            ResLayer(\n                BasicBlock,\n                cur_channel // 2,\n                cur_channel,\n                1,\n                stride=2,\n                norm_cfg=norm_cfg))\n\n        self.hourglass_modules = nn.ModuleList([\n            HourglassModule(downsample_times, stage_channels, stage_blocks)\n            for _ in range(num_stacks)\n        ])\n\n        self.inters = ResLayer(\n            BasicBlock,\n            cur_channel,\n            cur_channel,\n            num_stacks - 1,\n            norm_cfg=norm_cfg)\n\n        self.conv1x1s = nn.ModuleList([\n            ConvModule(\n                cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)\n            for _ in range(num_stacks - 1)\n        ])\n\n        self.out_convs = nn.ModuleList([\n            ConvModule(\n                cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)\n            for _ in range(num_stacks)\n        ])\n\n        self.remap_convs = nn.ModuleList([\n            ConvModule(\n                feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)\n            for _ in range(num_stacks - 1)\n        ])\n\n        self.relu = nn.ReLU(inplace=True)\n\n    def init_weights(self) -> None:\n        \"\"\"Init module weights.\"\"\"\n        # Training Centripetal Model needs to reset parameters for Conv2d\n        super().init_weights()\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                m.reset_parameters()\n\n    def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n        \"\"\"Forward function.\"\"\"\n        inter_feat = self.stem(x)\n        out_feats = []\n\n        for ind in range(self.num_stacks):\n            single_hourglass = self.hourglass_modules[ind]\n            out_conv = self.out_convs[ind]\n\n            hourglass_feat = single_hourglass(inter_feat)\n            out_feat = out_conv(hourglass_feat)\n            out_feats.append(out_feat)\n\n            if ind < self.num_stacks - 1:\n                inter_feat = self.conv1x1s[ind](\n                    inter_feat) + self.remap_convs[ind](\n                        out_feat)\n                inter_feat = self.inters[ind](self.relu(inter_feat))\n\n        return out_feats\n"
  },
  {
    "path": "mmdet/models/backbones/hrnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmengine.model import BaseModule, ModuleList, Sequential\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\nfrom .resnet import BasicBlock, Bottleneck\n\n\nclass HRModule(BaseModule):\n    \"\"\"High-Resolution Module for HRNet.\n\n    In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n    is in this module.\n    \"\"\"\n\n    def __init__(self,\n                 num_branches,\n                 blocks,\n                 num_blocks,\n                 in_channels,\n                 num_channels,\n                 multiscale_output=True,\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 block_init_cfg=None,\n                 init_cfg=None):\n        super(HRModule, self).__init__(init_cfg)\n        self.block_init_cfg = block_init_cfg\n        self._check_branches(num_branches, num_blocks, in_channels,\n                             num_channels)\n\n        self.in_channels = in_channels\n        self.num_branches = num_branches\n\n        self.multiscale_output = multiscale_output\n        self.norm_cfg = norm_cfg\n        self.conv_cfg = conv_cfg\n        self.with_cp = with_cp\n        self.branches = self._make_branches(num_branches, blocks, num_blocks,\n                                            num_channels)\n        self.fuse_layers = self._make_fuse_layers()\n        self.relu = nn.ReLU(inplace=False)\n\n    def _check_branches(self, num_branches, num_blocks, in_channels,\n                        num_channels):\n        if num_branches != len(num_blocks):\n            error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n                        f'!= NUM_BLOCKS({len(num_blocks)})'\n            raise ValueError(error_msg)\n\n        if num_branches != len(num_channels):\n            error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n                        f'!= NUM_CHANNELS({len(num_channels)})'\n            raise ValueError(error_msg)\n\n        if num_branches != len(in_channels):\n            error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n                        f'!= NUM_INCHANNELS({len(in_channels)})'\n            raise ValueError(error_msg)\n\n    def _make_one_branch(self,\n                         branch_index,\n                         block,\n                         num_blocks,\n                         num_channels,\n                         stride=1):\n        downsample = None\n        if stride != 1 or \\\n                self.in_channels[branch_index] != \\\n                num_channels[branch_index] * block.expansion:\n            downsample = nn.Sequential(\n                build_conv_layer(\n                    self.conv_cfg,\n                    self.in_channels[branch_index],\n                    num_channels[branch_index] * block.expansion,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, num_channels[branch_index] *\n                                 block.expansion)[1])\n\n        layers = []\n        layers.append(\n            block(\n                self.in_channels[branch_index],\n                num_channels[branch_index],\n                stride,\n                downsample=downsample,\n                with_cp=self.with_cp,\n                norm_cfg=self.norm_cfg,\n                conv_cfg=self.conv_cfg,\n                init_cfg=self.block_init_cfg))\n        self.in_channels[branch_index] = \\\n            num_channels[branch_index] * block.expansion\n        for i in range(1, num_blocks[branch_index]):\n            layers.append(\n                block(\n                    self.in_channels[branch_index],\n                    num_channels[branch_index],\n                    with_cp=self.with_cp,\n                    norm_cfg=self.norm_cfg,\n                    conv_cfg=self.conv_cfg,\n                    init_cfg=self.block_init_cfg))\n\n        return Sequential(*layers)\n\n    def _make_branches(self, num_branches, block, num_blocks, num_channels):\n        branches = []\n\n        for i in range(num_branches):\n            branches.append(\n                self._make_one_branch(i, block, num_blocks, num_channels))\n\n        return ModuleList(branches)\n\n    def _make_fuse_layers(self):\n        if self.num_branches == 1:\n            return None\n\n        num_branches = self.num_branches\n        in_channels = self.in_channels\n        fuse_layers = []\n        num_out_branches = num_branches if self.multiscale_output else 1\n        for i in range(num_out_branches):\n            fuse_layer = []\n            for j in range(num_branches):\n                if j > i:\n                    fuse_layer.append(\n                        nn.Sequential(\n                            build_conv_layer(\n                                self.conv_cfg,\n                                in_channels[j],\n                                in_channels[i],\n                                kernel_size=1,\n                                stride=1,\n                                padding=0,\n                                bias=False),\n                            build_norm_layer(self.norm_cfg, in_channels[i])[1],\n                            nn.Upsample(\n                                scale_factor=2**(j - i), mode='nearest')))\n                elif j == i:\n                    fuse_layer.append(None)\n                else:\n                    conv_downsamples = []\n                    for k in range(i - j):\n                        if k == i - j - 1:\n                            conv_downsamples.append(\n                                nn.Sequential(\n                                    build_conv_layer(\n                                        self.conv_cfg,\n                                        in_channels[j],\n                                        in_channels[i],\n                                        kernel_size=3,\n                                        stride=2,\n                                        padding=1,\n                                        bias=False),\n                                    build_norm_layer(self.norm_cfg,\n                                                     in_channels[i])[1]))\n                        else:\n                            conv_downsamples.append(\n                                nn.Sequential(\n                                    build_conv_layer(\n                                        self.conv_cfg,\n                                        in_channels[j],\n                                        in_channels[j],\n                                        kernel_size=3,\n                                        stride=2,\n                                        padding=1,\n                                        bias=False),\n                                    build_norm_layer(self.norm_cfg,\n                                                     in_channels[j])[1],\n                                    nn.ReLU(inplace=False)))\n                    fuse_layer.append(nn.Sequential(*conv_downsamples))\n            fuse_layers.append(nn.ModuleList(fuse_layer))\n\n        return nn.ModuleList(fuse_layers)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        if self.num_branches == 1:\n            return [self.branches[0](x[0])]\n\n        for i in range(self.num_branches):\n            x[i] = self.branches[i](x[i])\n\n        x_fuse = []\n        for i in range(len(self.fuse_layers)):\n            y = 0\n            for j in range(self.num_branches):\n                if i == j:\n                    y += x[j]\n                else:\n                    y += self.fuse_layers[i][j](x[j])\n            x_fuse.append(self.relu(y))\n        return x_fuse\n\n\n@MODELS.register_module()\nclass HRNet(BaseModule):\n    \"\"\"HRNet backbone.\n\n    `High-Resolution Representations for Labeling Pixels and Regions\n    arXiv: <https://arxiv.org/abs/1904.04514>`_.\n\n    Args:\n        extra (dict): Detailed configuration for each stage of HRNet.\n            There must be 4 stages, the configuration for each stage must have\n            5 keys:\n\n                - num_modules(int): The number of HRModule in this stage.\n                - num_branches(int): The number of branches in the HRModule.\n                - block(str): The type of convolution block.\n                - num_blocks(tuple): The number of blocks in each branch.\n                    The length must be equal to num_branches.\n                - num_channels(tuple): The number of channels in each branch.\n                    The length must be equal to num_branches.\n        in_channels (int): Number of input image channels. Default: 3.\n        conv_cfg (dict): Dictionary to construct and config conv layer.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only. Default: True.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Default: False.\n        zero_init_residual (bool): Whether to use zero init for last norm layer\n            in resblocks to let them behave as identity. Default: False.\n        multiscale_output (bool): Whether to output multi-level features\n            produced by multiple branches. If False, only the first level\n            feature will be output. Default: True.\n        pretrained (str, optional): Model pretrained path. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n\n    Example:\n        >>> from mmdet.models import HRNet\n        >>> import torch\n        >>> extra = dict(\n        >>>     stage1=dict(\n        >>>         num_modules=1,\n        >>>         num_branches=1,\n        >>>         block='BOTTLENECK',\n        >>>         num_blocks=(4, ),\n        >>>         num_channels=(64, )),\n        >>>     stage2=dict(\n        >>>         num_modules=1,\n        >>>         num_branches=2,\n        >>>         block='BASIC',\n        >>>         num_blocks=(4, 4),\n        >>>         num_channels=(32, 64)),\n        >>>     stage3=dict(\n        >>>         num_modules=4,\n        >>>         num_branches=3,\n        >>>         block='BASIC',\n        >>>         num_blocks=(4, 4, 4),\n        >>>         num_channels=(32, 64, 128)),\n        >>>     stage4=dict(\n        >>>         num_modules=3,\n        >>>         num_branches=4,\n        >>>         block='BASIC',\n        >>>         num_blocks=(4, 4, 4, 4),\n        >>>         num_channels=(32, 64, 128, 256)))\n        >>> self = HRNet(extra, in_channels=1)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 1, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 32, 8, 8)\n        (1, 64, 4, 4)\n        (1, 128, 2, 2)\n        (1, 256, 1, 1)\n    \"\"\"\n\n    blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}\n\n    def __init__(self,\n                 extra,\n                 in_channels=3,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 norm_eval=True,\n                 with_cp=False,\n                 zero_init_residual=False,\n                 multiscale_output=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(HRNet, self).__init__(init_cfg)\n\n        self.pretrained = pretrained\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        # Assert configurations of 4 stages are in extra\n        assert 'stage1' in extra and 'stage2' in extra \\\n               and 'stage3' in extra and 'stage4' in extra\n        # Assert whether the length of `num_blocks` and `num_channels` are\n        # equal to `num_branches`\n        for i in range(4):\n            cfg = extra[f'stage{i + 1}']\n            assert len(cfg['num_blocks']) == cfg['num_branches'] and \\\n                   len(cfg['num_channels']) == cfg['num_branches']\n\n        self.extra = extra\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.norm_eval = norm_eval\n        self.with_cp = with_cp\n        self.zero_init_residual = zero_init_residual\n\n        # stem net\n        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            in_channels,\n            64,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            bias=False)\n\n        self.add_module(self.norm1_name, norm1)\n        self.conv2 = build_conv_layer(\n            self.conv_cfg,\n            64,\n            64,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.relu = nn.ReLU(inplace=True)\n\n        # stage 1\n        self.stage1_cfg = self.extra['stage1']\n        num_channels = self.stage1_cfg['num_channels'][0]\n        block_type = self.stage1_cfg['block']\n        num_blocks = self.stage1_cfg['num_blocks'][0]\n\n        block = self.blocks_dict[block_type]\n        stage1_out_channels = num_channels * block.expansion\n        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)\n\n        # stage 2\n        self.stage2_cfg = self.extra['stage2']\n        num_channels = self.stage2_cfg['num_channels']\n        block_type = self.stage2_cfg['block']\n\n        block = self.blocks_dict[block_type]\n        num_channels = [channel * block.expansion for channel in num_channels]\n        self.transition1 = self._make_transition_layer([stage1_out_channels],\n                                                       num_channels)\n        self.stage2, pre_stage_channels = self._make_stage(\n            self.stage2_cfg, num_channels)\n\n        # stage 3\n        self.stage3_cfg = self.extra['stage3']\n        num_channels = self.stage3_cfg['num_channels']\n        block_type = self.stage3_cfg['block']\n\n        block = self.blocks_dict[block_type]\n        num_channels = [channel * block.expansion for channel in num_channels]\n        self.transition2 = self._make_transition_layer(pre_stage_channels,\n                                                       num_channels)\n        self.stage3, pre_stage_channels = self._make_stage(\n            self.stage3_cfg, num_channels)\n\n        # stage 4\n        self.stage4_cfg = self.extra['stage4']\n        num_channels = self.stage4_cfg['num_channels']\n        block_type = self.stage4_cfg['block']\n\n        block = self.blocks_dict[block_type]\n        num_channels = [channel * block.expansion for channel in num_channels]\n        self.transition3 = self._make_transition_layer(pre_stage_channels,\n                                                       num_channels)\n        self.stage4, pre_stage_channels = self._make_stage(\n            self.stage4_cfg, num_channels, multiscale_output=multiscale_output)\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n        return getattr(self, self.norm1_name)\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: the normalization layer named \"norm2\" \"\"\"\n        return getattr(self, self.norm2_name)\n\n    def _make_transition_layer(self, num_channels_pre_layer,\n                               num_channels_cur_layer):\n        num_branches_cur = len(num_channels_cur_layer)\n        num_branches_pre = len(num_channels_pre_layer)\n\n        transition_layers = []\n        for i in range(num_branches_cur):\n            if i < num_branches_pre:\n                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n                    transition_layers.append(\n                        nn.Sequential(\n                            build_conv_layer(\n                                self.conv_cfg,\n                                num_channels_pre_layer[i],\n                                num_channels_cur_layer[i],\n                                kernel_size=3,\n                                stride=1,\n                                padding=1,\n                                bias=False),\n                            build_norm_layer(self.norm_cfg,\n                                             num_channels_cur_layer[i])[1],\n                            nn.ReLU(inplace=True)))\n                else:\n                    transition_layers.append(None)\n            else:\n                conv_downsamples = []\n                for j in range(i + 1 - num_branches_pre):\n                    in_channels = num_channels_pre_layer[-1]\n                    out_channels = num_channels_cur_layer[i] \\\n                        if j == i - num_branches_pre else in_channels\n                    conv_downsamples.append(\n                        nn.Sequential(\n                            build_conv_layer(\n                                self.conv_cfg,\n                                in_channels,\n                                out_channels,\n                                kernel_size=3,\n                                stride=2,\n                                padding=1,\n                                bias=False),\n                            build_norm_layer(self.norm_cfg, out_channels)[1],\n                            nn.ReLU(inplace=True)))\n                transition_layers.append(nn.Sequential(*conv_downsamples))\n\n        return nn.ModuleList(transition_layers)\n\n    def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                build_conv_layer(\n                    self.conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, planes * block.expansion)[1])\n\n        layers = []\n        block_init_cfg = None\n        if self.pretrained is None and not hasattr(\n                self, 'init_cfg') and self.zero_init_residual:\n            if block is BasicBlock:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm2'))\n            elif block is Bottleneck:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm3'))\n        layers.append(\n            block(\n                inplanes,\n                planes,\n                stride,\n                downsample=downsample,\n                with_cp=self.with_cp,\n                norm_cfg=self.norm_cfg,\n                conv_cfg=self.conv_cfg,\n                init_cfg=block_init_cfg,\n            ))\n        inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(\n                block(\n                    inplanes,\n                    planes,\n                    with_cp=self.with_cp,\n                    norm_cfg=self.norm_cfg,\n                    conv_cfg=self.conv_cfg,\n                    init_cfg=block_init_cfg))\n\n        return Sequential(*layers)\n\n    def _make_stage(self, layer_config, in_channels, multiscale_output=True):\n        num_modules = layer_config['num_modules']\n        num_branches = layer_config['num_branches']\n        num_blocks = layer_config['num_blocks']\n        num_channels = layer_config['num_channels']\n        block = self.blocks_dict[layer_config['block']]\n\n        hr_modules = []\n        block_init_cfg = None\n        if self.pretrained is None and not hasattr(\n                self, 'init_cfg') and self.zero_init_residual:\n            if block is BasicBlock:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm2'))\n            elif block is Bottleneck:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm3'))\n\n        for i in range(num_modules):\n            # multi_scale_output is only used for the last module\n            if not multiscale_output and i == num_modules - 1:\n                reset_multiscale_output = False\n            else:\n                reset_multiscale_output = True\n\n            hr_modules.append(\n                HRModule(\n                    num_branches,\n                    block,\n                    num_blocks,\n                    in_channels,\n                    num_channels,\n                    reset_multiscale_output,\n                    with_cp=self.with_cp,\n                    norm_cfg=self.norm_cfg,\n                    conv_cfg=self.conv_cfg,\n                    block_init_cfg=block_init_cfg))\n\n        return Sequential(*hr_modules), in_channels\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv1(x)\n        x = self.norm1(x)\n        x = self.relu(x)\n        x = self.conv2(x)\n        x = self.norm2(x)\n        x = self.relu(x)\n        x = self.layer1(x)\n\n        x_list = []\n        for i in range(self.stage2_cfg['num_branches']):\n            if self.transition1[i] is not None:\n                x_list.append(self.transition1[i](x))\n            else:\n                x_list.append(x)\n        y_list = self.stage2(x_list)\n\n        x_list = []\n        for i in range(self.stage3_cfg['num_branches']):\n            if self.transition2[i] is not None:\n                x_list.append(self.transition2[i](y_list[-1]))\n            else:\n                x_list.append(y_list[i])\n        y_list = self.stage3(x_list)\n\n        x_list = []\n        for i in range(self.stage4_cfg['num_branches']):\n            if self.transition3[i] is not None:\n                x_list.append(self.transition3[i](y_list[-1]))\n            else:\n                x_list.append(y_list[i])\n        y_list = self.stage4(x_list)\n\n        return y_list\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode will keeping the normalization\n        layer freezed.\"\"\"\n        super(HRNet, self).train(mode)\n        if mode and self.norm_eval:\n            for m in self.modules():\n                # trick: eval have effect on BatchNorm only\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/backbones/mobilenet_v2.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\nfrom ..layers import InvertedResidual\nfrom ..utils import make_divisible\n\n\n@MODELS.register_module()\nclass MobileNetV2(BaseModule):\n    \"\"\"MobileNetV2 backbone.\n\n    Args:\n        widen_factor (float): Width multiplier, multiply number of\n            channels in each layer by this amount. Default: 1.0.\n        out_indices (Sequence[int], optional): Output from which stages.\n            Default: (1, 2, 4, 7).\n        frozen_stages (int): Stages to be frozen (all param fixed).\n            Default: -1, which means not freezing any parameters.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='ReLU6').\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only. Default: False.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Default: False.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    # Parameters to build layers. 4 parameters are needed to construct a\n    # layer, from left to right: expand_ratio, channel, num_blocks, stride.\n    arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],\n                     [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],\n                     [6, 320, 1, 1]]\n\n    def __init__(self,\n                 widen_factor=1.,\n                 out_indices=(1, 2, 4, 7),\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 act_cfg=dict(type='ReLU6'),\n                 norm_eval=False,\n                 with_cp=False,\n                 pretrained=None,\n                 init_cfg=None):\n        super(MobileNetV2, self).__init__(init_cfg)\n\n        self.pretrained = pretrained\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.widen_factor = widen_factor\n        self.out_indices = out_indices\n        if not set(out_indices).issubset(set(range(0, 8))):\n            raise ValueError('out_indices must be a subset of range'\n                             f'(0, 8). But received {out_indices}')\n\n        if frozen_stages not in range(-1, 8):\n            raise ValueError('frozen_stages must be in range(-1, 8). '\n                             f'But received {frozen_stages}')\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n        self.norm_eval = norm_eval\n        self.with_cp = with_cp\n\n        self.in_channels = make_divisible(32 * widen_factor, 8)\n\n        self.conv1 = ConvModule(\n            in_channels=3,\n            out_channels=self.in_channels,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            act_cfg=self.act_cfg)\n\n        self.layers = []\n\n        for i, layer_cfg in enumerate(self.arch_settings):\n            expand_ratio, channel, num_blocks, stride = layer_cfg\n            out_channels = make_divisible(channel * widen_factor, 8)\n            inverted_res_layer = self.make_layer(\n                out_channels=out_channels,\n                num_blocks=num_blocks,\n                stride=stride,\n                expand_ratio=expand_ratio)\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, inverted_res_layer)\n            self.layers.append(layer_name)\n\n        if widen_factor > 1.0:\n            self.out_channel = int(1280 * widen_factor)\n        else:\n            self.out_channel = 1280\n\n        layer = ConvModule(\n            in_channels=self.in_channels,\n            out_channels=self.out_channel,\n            kernel_size=1,\n            stride=1,\n            padding=0,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            act_cfg=self.act_cfg)\n        self.add_module('conv2', layer)\n        self.layers.append('conv2')\n\n    def make_layer(self, out_channels, num_blocks, stride, expand_ratio):\n        \"\"\"Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n        Args:\n            out_channels (int): out_channels of block.\n            num_blocks (int): number of blocks.\n            stride (int): stride of the first block. Default: 1\n            expand_ratio (int): Expand the number of channels of the\n                hidden layer in InvertedResidual by this ratio. Default: 6.\n        \"\"\"\n        layers = []\n        for i in range(num_blocks):\n            if i >= 1:\n                stride = 1\n            layers.append(\n                InvertedResidual(\n                    self.in_channels,\n                    out_channels,\n                    mid_channels=int(round(self.in_channels * expand_ratio)),\n                    stride=stride,\n                    with_expand_conv=expand_ratio != 1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg,\n                    with_cp=self.with_cp))\n            self.in_channels = out_channels\n\n        return nn.Sequential(*layers)\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            for param in self.conv1.parameters():\n                param.requires_grad = False\n        for i in range(1, self.frozen_stages + 1):\n            layer = getattr(self, f'layer{i}')\n            layer.eval()\n            for param in layer.parameters():\n                param.requires_grad = False\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv1(x)\n        outs = []\n        for i, layer_name in enumerate(self.layers):\n            layer = getattr(self, layer_name)\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep normalization layer\n        frozen.\"\"\"\n        super(MobileNetV2, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                # trick: eval have effect on BatchNorm only\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/backbones/pvt.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer\nfrom mmcv.cnn.bricks.drop import build_dropout\nfrom mmcv.cnn.bricks.transformer import MultiheadAttention\nfrom mmengine.logging import MMLogger\nfrom mmengine.model import (BaseModule, ModuleList, Sequential, constant_init,\n                            normal_init, trunc_normal_init)\nfrom mmengine.model.weight_init import trunc_normal_\nfrom mmengine.runner.checkpoint import CheckpointLoader, load_state_dict\nfrom torch.nn.modules.utils import _pair as to_2tuple\n\nfrom mmdet.registry import MODELS\nfrom ..layers import PatchEmbed, nchw_to_nlc, nlc_to_nchw\n\n\nclass MixFFN(BaseModule):\n    \"\"\"An implementation of MixFFN of PVT.\n\n    The differences between MixFFN & FFN:\n        1. Use 1X1 Conv to replace Linear layer.\n        2. Introduce 3X3 Depth-wise Conv to encode positional information.\n\n    Args:\n        embed_dims (int): The feature dimension. Same as\n            `MultiheadAttention`.\n        feedforward_channels (int): The hidden dimension of FFNs.\n        act_cfg (dict, optional): The activation config for FFNs.\n            Default: dict(type='GELU').\n        ffn_drop (float, optional): Probability of an element to be\n            zeroed in FFN. Default 0.0.\n        dropout_layer (obj:`ConfigDict`): The dropout_layer used\n            when adding the shortcut.\n            Default: None.\n        use_conv (bool): If True, add 3x3 DWConv between two Linear layers.\n            Defaults: False.\n        init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 feedforward_channels,\n                 act_cfg=dict(type='GELU'),\n                 ffn_drop=0.,\n                 dropout_layer=None,\n                 use_conv=False,\n                 init_cfg=None):\n        super(MixFFN, self).__init__(init_cfg=init_cfg)\n\n        self.embed_dims = embed_dims\n        self.feedforward_channels = feedforward_channels\n        self.act_cfg = act_cfg\n        activate = build_activation_layer(act_cfg)\n\n        in_channels = embed_dims\n        fc1 = Conv2d(\n            in_channels=in_channels,\n            out_channels=feedforward_channels,\n            kernel_size=1,\n            stride=1,\n            bias=True)\n        if use_conv:\n            # 3x3 depth wise conv to provide positional encode information\n            dw_conv = Conv2d(\n                in_channels=feedforward_channels,\n                out_channels=feedforward_channels,\n                kernel_size=3,\n                stride=1,\n                padding=(3 - 1) // 2,\n                bias=True,\n                groups=feedforward_channels)\n        fc2 = Conv2d(\n            in_channels=feedforward_channels,\n            out_channels=in_channels,\n            kernel_size=1,\n            stride=1,\n            bias=True)\n        drop = nn.Dropout(ffn_drop)\n        layers = [fc1, activate, drop, fc2, drop]\n        if use_conv:\n            layers.insert(1, dw_conv)\n        self.layers = Sequential(*layers)\n        self.dropout_layer = build_dropout(\n            dropout_layer) if dropout_layer else torch.nn.Identity()\n\n    def forward(self, x, hw_shape, identity=None):\n        out = nlc_to_nchw(x, hw_shape)\n        out = self.layers(out)\n        out = nchw_to_nlc(out)\n        if identity is None:\n            identity = x\n        return identity + self.dropout_layer(out)\n\n\nclass SpatialReductionAttention(MultiheadAttention):\n    \"\"\"An implementation of Spatial Reduction Attention of PVT.\n\n    This module is modified from MultiheadAttention which is a module from\n    mmcv.cnn.bricks.transformer.\n\n    Args:\n        embed_dims (int): The embedding dimension.\n        num_heads (int): Parallel attention heads.\n        attn_drop (float): A Dropout layer on attn_output_weights.\n            Default: 0.0.\n        proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n            Default: 0.0.\n        dropout_layer (obj:`ConfigDict`): The dropout_layer used\n            when adding the shortcut. Default: None.\n        batch_first (bool): Key, Query and Value are shape of\n            (batch, n, embed_dim)\n            or (n, batch, embed_dim). Default: False.\n        qkv_bias (bool): enable bias for qkv if True. Default: True.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='LN').\n        sr_ratio (int): The ratio of spatial reduction of Spatial Reduction\n            Attention of PVT. Default: 1.\n        init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 attn_drop=0.,\n                 proj_drop=0.,\n                 dropout_layer=None,\n                 batch_first=True,\n                 qkv_bias=True,\n                 norm_cfg=dict(type='LN'),\n                 sr_ratio=1,\n                 init_cfg=None):\n        super().__init__(\n            embed_dims,\n            num_heads,\n            attn_drop,\n            proj_drop,\n            batch_first=batch_first,\n            dropout_layer=dropout_layer,\n            bias=qkv_bias,\n            init_cfg=init_cfg)\n\n        self.sr_ratio = sr_ratio\n        if sr_ratio > 1:\n            self.sr = Conv2d(\n                in_channels=embed_dims,\n                out_channels=embed_dims,\n                kernel_size=sr_ratio,\n                stride=sr_ratio)\n            # The ret[0] of build_norm_layer is norm name.\n            self.norm = build_norm_layer(norm_cfg, embed_dims)[1]\n\n        # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa\n        from mmdet import digit_version, mmcv_version\n        if mmcv_version < digit_version('1.3.17'):\n            warnings.warn('The legacy version of forward function in'\n                          'SpatialReductionAttention is deprecated in'\n                          'mmcv>=1.3.17 and will no longer support in the'\n                          'future. Please upgrade your mmcv.')\n            self.forward = self.legacy_forward\n\n    def forward(self, x, hw_shape, identity=None):\n\n        x_q = x\n        if self.sr_ratio > 1:\n            x_kv = nlc_to_nchw(x, hw_shape)\n            x_kv = self.sr(x_kv)\n            x_kv = nchw_to_nlc(x_kv)\n            x_kv = self.norm(x_kv)\n        else:\n            x_kv = x\n\n        if identity is None:\n            identity = x_q\n\n        # Because the dataflow('key', 'query', 'value') of\n        # ``torch.nn.MultiheadAttention`` is (num_queries, batch,\n        # embed_dims), We should adjust the shape of dataflow from\n        # batch_first (batch, num_queries, embed_dims) to num_queries_first\n        # (num_queries ,batch, embed_dims), and recover ``attn_output``\n        # from num_queries_first to batch_first.\n        if self.batch_first:\n            x_q = x_q.transpose(0, 1)\n            x_kv = x_kv.transpose(0, 1)\n\n        out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]\n\n        if self.batch_first:\n            out = out.transpose(0, 1)\n\n        return identity + self.dropout_layer(self.proj_drop(out))\n\n    def legacy_forward(self, x, hw_shape, identity=None):\n        \"\"\"multi head attention forward in mmcv version < 1.3.17.\"\"\"\n        x_q = x\n        if self.sr_ratio > 1:\n            x_kv = nlc_to_nchw(x, hw_shape)\n            x_kv = self.sr(x_kv)\n            x_kv = nchw_to_nlc(x_kv)\n            x_kv = self.norm(x_kv)\n        else:\n            x_kv = x\n\n        if identity is None:\n            identity = x_q\n\n        out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]\n\n        return identity + self.dropout_layer(self.proj_drop(out))\n\n\nclass PVTEncoderLayer(BaseModule):\n    \"\"\"Implements one encoder layer in PVT.\n\n    Args:\n        embed_dims (int): The feature dimension.\n        num_heads (int): Parallel attention heads.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        drop_rate (float): Probability of an element to be zeroed.\n            after the feed forward layer. Default: 0.0.\n        attn_drop_rate (float): The drop out rate for attention layer.\n            Default: 0.0.\n        drop_path_rate (float): stochastic depth rate. Default: 0.0.\n        qkv_bias (bool): enable bias for qkv if True.\n            Default: True.\n        act_cfg (dict): The activation config for FFNs.\n            Default: dict(type='GELU').\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='LN').\n        sr_ratio (int): The ratio of spatial reduction of Spatial Reduction\n            Attention of PVT. Default: 1.\n        use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.\n            Default: False.\n        init_cfg (dict, optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 feedforward_channels,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.,\n                 qkv_bias=True,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 sr_ratio=1,\n                 use_conv_ffn=False,\n                 init_cfg=None):\n        super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg)\n\n        # The ret[0] of build_norm_layer is norm name.\n        self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]\n\n        self.attn = SpatialReductionAttention(\n            embed_dims=embed_dims,\n            num_heads=num_heads,\n            attn_drop=attn_drop_rate,\n            proj_drop=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            qkv_bias=qkv_bias,\n            norm_cfg=norm_cfg,\n            sr_ratio=sr_ratio)\n\n        # The ret[0] of build_norm_layer is norm name.\n        self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]\n\n        self.ffn = MixFFN(\n            embed_dims=embed_dims,\n            feedforward_channels=feedforward_channels,\n            ffn_drop=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            use_conv=use_conv_ffn,\n            act_cfg=act_cfg)\n\n    def forward(self, x, hw_shape):\n        x = self.attn(self.norm1(x), hw_shape, identity=x)\n        x = self.ffn(self.norm2(x), hw_shape, identity=x)\n\n        return x\n\n\nclass AbsolutePositionEmbedding(BaseModule):\n    \"\"\"An implementation of the absolute position embedding in PVT.\n\n    Args:\n        pos_shape (int): The shape of the absolute position embedding.\n        pos_dim (int): The dimension of the absolute position embedding.\n        drop_rate (float): Probability of an element to be zeroed.\n            Default: 0.0.\n    \"\"\"\n\n    def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n\n        if isinstance(pos_shape, int):\n            pos_shape = to_2tuple(pos_shape)\n        elif isinstance(pos_shape, tuple):\n            if len(pos_shape) == 1:\n                pos_shape = to_2tuple(pos_shape[0])\n            assert len(pos_shape) == 2, \\\n                f'The size of image should have length 1 or 2, ' \\\n                f'but got {len(pos_shape)}'\n        self.pos_shape = pos_shape\n        self.pos_dim = pos_dim\n\n        self.pos_embed = nn.Parameter(\n            torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim))\n        self.drop = nn.Dropout(p=drop_rate)\n\n    def init_weights(self):\n        trunc_normal_(self.pos_embed, std=0.02)\n\n    def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'):\n        \"\"\"Resize pos_embed weights.\n\n        Resize pos_embed using bilinear interpolate method.\n\n        Args:\n            pos_embed (torch.Tensor): Position embedding weights.\n            input_shape (tuple): Tuple for (downsampled input image height,\n                downsampled input image width).\n            mode (str): Algorithm used for upsampling:\n                ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n                ``'trilinear'``. Default: ``'bilinear'``.\n\n        Return:\n            torch.Tensor: The resized pos_embed of shape [B, L_new, C].\n        \"\"\"\n        assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'\n        pos_h, pos_w = self.pos_shape\n        pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]\n        pos_embed_weight = pos_embed_weight.reshape(\n            1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous()\n        pos_embed_weight = F.interpolate(\n            pos_embed_weight, size=input_shape, mode=mode)\n        pos_embed_weight = torch.flatten(pos_embed_weight,\n                                         2).transpose(1, 2).contiguous()\n        pos_embed = pos_embed_weight\n\n        return pos_embed\n\n    def forward(self, x, hw_shape, mode='bilinear'):\n        pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode)\n        return self.drop(x + pos_embed)\n\n\n@MODELS.register_module()\nclass PyramidVisionTransformer(BaseModule):\n    \"\"\"Pyramid Vision Transformer (PVT)\n\n    Implementation of `Pyramid Vision Transformer: A Versatile Backbone for\n    Dense Prediction without Convolutions\n    <https://arxiv.org/pdf/2102.12122.pdf>`_.\n\n    Args:\n        pretrain_img_size (int | tuple[int]): The size of input image when\n            pretrain. Defaults: 224.\n        in_channels (int): Number of input channels. Default: 3.\n        embed_dims (int): Embedding dimension. Default: 64.\n        num_stags (int): The num of stages. Default: 4.\n        num_layers (Sequence[int]): The layer number of each transformer encode\n            layer. Default: [3, 4, 6, 3].\n        num_heads (Sequence[int]): The attention heads of each transformer\n            encode layer. Default: [1, 2, 5, 8].\n        patch_sizes (Sequence[int]): The patch_size of each patch embedding.\n            Default: [4, 2, 2, 2].\n        strides (Sequence[int]): The stride of each patch embedding.\n            Default: [4, 2, 2, 2].\n        paddings (Sequence[int]): The padding of each patch embedding.\n            Default: [0, 0, 0, 0].\n        sr_ratios (Sequence[int]): The spatial reduction rate of each\n            transformer encode layer. Default: [8, 4, 2, 1].\n        out_indices (Sequence[int] | int): Output from which stages.\n            Default: (0, 1, 2, 3).\n        mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the\n            embedding dim of each transformer encode layer.\n            Default: [8, 8, 4, 4].\n        qkv_bias (bool): Enable bias for qkv if True. Default: True.\n        drop_rate (float): Probability of an element to be zeroed.\n            Default 0.0.\n        attn_drop_rate (float): The drop out rate for attention layer.\n            Default 0.0.\n        drop_path_rate (float): stochastic depth rate. Default 0.1.\n        use_abs_pos_embed (bool): If True, add absolute position embedding to\n            the patch embedding. Defaults: True.\n        use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.\n            Default: False.\n        act_cfg (dict): The activation config for FFNs.\n            Default: dict(type='GELU').\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='LN').\n        pretrained (str, optional): model pretrained path. Default: None.\n        convert_weights (bool): The flag indicates whether the\n            pre-trained model is from the original repo. We may need\n            to convert some keys to make it compatible.\n            Default: True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 pretrain_img_size=224,\n                 in_channels=3,\n                 embed_dims=64,\n                 num_stages=4,\n                 num_layers=[3, 4, 6, 3],\n                 num_heads=[1, 2, 5, 8],\n                 patch_sizes=[4, 2, 2, 2],\n                 strides=[4, 2, 2, 2],\n                 paddings=[0, 0, 0, 0],\n                 sr_ratios=[8, 4, 2, 1],\n                 out_indices=(0, 1, 2, 3),\n                 mlp_ratios=[8, 8, 4, 4],\n                 qkv_bias=True,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.1,\n                 use_abs_pos_embed=True,\n                 norm_after_stage=False,\n                 use_conv_ffn=False,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN', eps=1e-6),\n                 pretrained=None,\n                 convert_weights=True,\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n\n        self.convert_weights = convert_weights\n        if isinstance(pretrain_img_size, int):\n            pretrain_img_size = to_2tuple(pretrain_img_size)\n        elif isinstance(pretrain_img_size, tuple):\n            if len(pretrain_img_size) == 1:\n                pretrain_img_size = to_2tuple(pretrain_img_size[0])\n            assert len(pretrain_img_size) == 2, \\\n                f'The size of image should have length 1 or 2, ' \\\n                f'but got {len(pretrain_img_size)}'\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be setting at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            self.init_cfg = init_cfg\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.embed_dims = embed_dims\n\n        self.num_stages = num_stages\n        self.num_layers = num_layers\n        self.num_heads = num_heads\n        self.patch_sizes = patch_sizes\n        self.strides = strides\n        self.sr_ratios = sr_ratios\n        assert num_stages == len(num_layers) == len(num_heads) \\\n               == len(patch_sizes) == len(strides) == len(sr_ratios)\n\n        self.out_indices = out_indices\n        assert max(out_indices) < self.num_stages\n        self.pretrained = pretrained\n\n        # transformer encoder\n        dpr = [\n            x.item()\n            for x in torch.linspace(0, drop_path_rate, sum(num_layers))\n        ]  # stochastic num_layer decay rule\n\n        cur = 0\n        self.layers = ModuleList()\n        for i, num_layer in enumerate(num_layers):\n            embed_dims_i = embed_dims * num_heads[i]\n            patch_embed = PatchEmbed(\n                in_channels=in_channels,\n                embed_dims=embed_dims_i,\n                kernel_size=patch_sizes[i],\n                stride=strides[i],\n                padding=paddings[i],\n                bias=True,\n                norm_cfg=norm_cfg)\n\n            layers = ModuleList()\n            if use_abs_pos_embed:\n                pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1])\n                pos_embed = AbsolutePositionEmbedding(\n                    pos_shape=pos_shape,\n                    pos_dim=embed_dims_i,\n                    drop_rate=drop_rate)\n                layers.append(pos_embed)\n            layers.extend([\n                PVTEncoderLayer(\n                    embed_dims=embed_dims_i,\n                    num_heads=num_heads[i],\n                    feedforward_channels=mlp_ratios[i] * embed_dims_i,\n                    drop_rate=drop_rate,\n                    attn_drop_rate=attn_drop_rate,\n                    drop_path_rate=dpr[cur + idx],\n                    qkv_bias=qkv_bias,\n                    act_cfg=act_cfg,\n                    norm_cfg=norm_cfg,\n                    sr_ratio=sr_ratios[i],\n                    use_conv_ffn=use_conv_ffn) for idx in range(num_layer)\n            ])\n            in_channels = embed_dims_i\n            # The ret[0] of build_norm_layer is norm name.\n            if norm_after_stage:\n                norm = build_norm_layer(norm_cfg, embed_dims_i)[1]\n            else:\n                norm = nn.Identity()\n            self.layers.append(ModuleList([patch_embed, layers, norm]))\n            cur += num_layer\n\n    def init_weights(self):\n        logger = MMLogger.get_current_instance()\n        if self.init_cfg is None:\n            logger.warn(f'No pre-trained weights for '\n                        f'{self.__class__.__name__}, '\n                        f'training start from scratch')\n            for m in self.modules():\n                if isinstance(m, nn.Linear):\n                    trunc_normal_init(m, std=.02, bias=0.)\n                elif isinstance(m, nn.LayerNorm):\n                    constant_init(m, 1.0)\n                elif isinstance(m, nn.Conv2d):\n                    fan_out = m.kernel_size[0] * m.kernel_size[\n                        1] * m.out_channels\n                    fan_out //= m.groups\n                    normal_init(m, 0, math.sqrt(2.0 / fan_out))\n                elif isinstance(m, AbsolutePositionEmbedding):\n                    m.init_weights()\n        else:\n            assert 'checkpoint' in self.init_cfg, f'Only support ' \\\n                                                  f'specify `Pretrained` in ' \\\n                                                  f'`init_cfg` in ' \\\n                                                  f'{self.__class__.__name__} '\n            checkpoint = CheckpointLoader.load_checkpoint(\n                self.init_cfg.checkpoint, logger=logger, map_location='cpu')\n            logger.warn(f'Load pre-trained model for '\n                        f'{self.__class__.__name__} from original repo')\n            if 'state_dict' in checkpoint:\n                state_dict = checkpoint['state_dict']\n            elif 'model' in checkpoint:\n                state_dict = checkpoint['model']\n            else:\n                state_dict = checkpoint\n            if self.convert_weights:\n                # Because pvt backbones are not supported by mmcls,\n                # so we need to convert pre-trained weights to match this\n                # implementation.\n                state_dict = pvt_convert(state_dict)\n            load_state_dict(self, state_dict, strict=False, logger=logger)\n\n    def forward(self, x):\n        outs = []\n\n        for i, layer in enumerate(self.layers):\n            x, hw_shape = layer[0](x)\n\n            for block in layer[1]:\n                x = block(x, hw_shape)\n            x = layer[2](x)\n            x = nlc_to_nchw(x, hw_shape)\n            if i in self.out_indices:\n                outs.append(x)\n\n        return outs\n\n\n@MODELS.register_module()\nclass PyramidVisionTransformerV2(PyramidVisionTransformer):\n    \"\"\"Implementation of `PVTv2: Improved Baselines with Pyramid Vision\n    Transformer <https://arxiv.org/pdf/2106.13797.pdf>`_.\"\"\"\n\n    def __init__(self, **kwargs):\n        super(PyramidVisionTransformerV2, self).__init__(\n            patch_sizes=[7, 3, 3, 3],\n            paddings=[3, 1, 1, 1],\n            use_abs_pos_embed=False,\n            norm_after_stage=True,\n            use_conv_ffn=True,\n            **kwargs)\n\n\ndef pvt_convert(ckpt):\n    new_ckpt = OrderedDict()\n    # Process the concat between q linear weights and kv linear weights\n    use_abs_pos_embed = False\n    use_conv_ffn = False\n    for k in ckpt.keys():\n        if k.startswith('pos_embed'):\n            use_abs_pos_embed = True\n        if k.find('dwconv') >= 0:\n            use_conv_ffn = True\n    for k, v in ckpt.items():\n        if k.startswith('head'):\n            continue\n        if k.startswith('norm.'):\n            continue\n        if k.startswith('cls_token'):\n            continue\n        if k.startswith('pos_embed'):\n            stage_i = int(k.replace('pos_embed', ''))\n            new_k = k.replace(f'pos_embed{stage_i}',\n                              f'layers.{stage_i - 1}.1.0.pos_embed')\n            if stage_i == 4 and v.size(1) == 50:  # 1 (cls token) + 7 * 7\n                new_v = v[:, 1:, :]  # remove cls token\n            else:\n                new_v = v\n        elif k.startswith('patch_embed'):\n            stage_i = int(k.split('.')[0].replace('patch_embed', ''))\n            new_k = k.replace(f'patch_embed{stage_i}',\n                              f'layers.{stage_i - 1}.0')\n            new_v = v\n            if 'proj.' in new_k:\n                new_k = new_k.replace('proj.', 'projection.')\n        elif k.startswith('block'):\n            stage_i = int(k.split('.')[0].replace('block', ''))\n            layer_i = int(k.split('.')[1])\n            new_layer_i = layer_i + use_abs_pos_embed\n            new_k = k.replace(f'block{stage_i}.{layer_i}',\n                              f'layers.{stage_i - 1}.1.{new_layer_i}')\n            new_v = v\n            if 'attn.q.' in new_k:\n                sub_item_k = k.replace('q.', 'kv.')\n                new_k = new_k.replace('q.', 'attn.in_proj_')\n                new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)\n            elif 'attn.kv.' in new_k:\n                continue\n            elif 'attn.proj.' in new_k:\n                new_k = new_k.replace('proj.', 'attn.out_proj.')\n            elif 'attn.sr.' in new_k:\n                new_k = new_k.replace('sr.', 'sr.')\n            elif 'mlp.' in new_k:\n                string = f'{new_k}-'\n                new_k = new_k.replace('mlp.', 'ffn.layers.')\n                if 'fc1.weight' in new_k or 'fc2.weight' in new_k:\n                    new_v = v.reshape((*v.shape, 1, 1))\n                new_k = new_k.replace('fc1.', '0.')\n                new_k = new_k.replace('dwconv.dwconv.', '1.')\n                if use_conv_ffn:\n                    new_k = new_k.replace('fc2.', '4.')\n                else:\n                    new_k = new_k.replace('fc2.', '3.')\n                string += f'{new_k} {v.shape}-{new_v.shape}'\n        elif k.startswith('norm'):\n            stage_i = int(k[4])\n            new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')\n            new_v = v\n        else:\n            new_k = k\n            new_v = v\n        new_ckpt[new_k] = new_v\n\n    return new_ckpt\n"
  },
  {
    "path": "mmdet/models/backbones/regnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch.nn as nn\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom mmdet.registry import MODELS\nfrom .resnet import ResNet\nfrom .resnext import Bottleneck\n\n\n@MODELS.register_module()\nclass RegNet(ResNet):\n    \"\"\"RegNet backbone.\n\n    More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .\n\n    Args:\n        arch (dict): The parameter of RegNets.\n\n            - w0 (int): initial width\n            - wa (float): slope of width\n            - wm (float): quantization parameter to quantize the width\n            - depth (int): depth of the backbone\n            - group_w (int): width of group\n            - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        base_channels (int): Base channels after stem layer.\n        in_channels (int): Number of input image channels. Default: 3.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n            not freezing any parameters.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import RegNet\n        >>> import torch\n        >>> self = RegNet(\n                arch=dict(\n                    w0=88,\n                    wa=26.31,\n                    wm=2.25,\n                    group_w=48,\n                    depth=25,\n                    bot_mul=1.0))\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 96, 8, 8)\n        (1, 192, 4, 4)\n        (1, 432, 2, 2)\n        (1, 1008, 1, 1)\n    \"\"\"\n    arch_settings = {\n        'regnetx_400mf':\n        dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),\n        'regnetx_800mf':\n        dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),\n        'regnetx_1.6gf':\n        dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),\n        'regnetx_3.2gf':\n        dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),\n        'regnetx_4.0gf':\n        dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),\n        'regnetx_6.4gf':\n        dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),\n        'regnetx_8.0gf':\n        dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),\n        'regnetx_12gf':\n        dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),\n    }\n\n    def __init__(self,\n                 arch,\n                 in_channels=3,\n                 stem_channels=32,\n                 base_channels=32,\n                 strides=(2, 2, 2, 2),\n                 dilations=(1, 1, 1, 1),\n                 out_indices=(0, 1, 2, 3),\n                 style='pytorch',\n                 deep_stem=False,\n                 avg_down=False,\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 norm_eval=True,\n                 dcn=None,\n                 stage_with_dcn=(False, False, False, False),\n                 plugins=None,\n                 with_cp=False,\n                 zero_init_residual=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ResNet, self).__init__(init_cfg)\n\n        # Generate RegNet parameters first\n        if isinstance(arch, str):\n            assert arch in self.arch_settings, \\\n                f'\"arch\": \"{arch}\" is not one of the' \\\n                ' arch_settings'\n            arch = self.arch_settings[arch]\n        elif not isinstance(arch, dict):\n            raise ValueError('Expect \"arch\" to be either a string '\n                             f'or a dict, got {type(arch)}')\n\n        widths, num_stages = self.generate_regnet(\n            arch['w0'],\n            arch['wa'],\n            arch['wm'],\n            arch['depth'],\n        )\n        # Convert to per stage format\n        stage_widths, stage_blocks = self.get_stages_from_blocks(widths)\n        # Generate group widths and bot muls\n        group_widths = [arch['group_w'] for _ in range(num_stages)]\n        self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]\n        # Adjust the compatibility of stage_widths and group_widths\n        stage_widths, group_widths = self.adjust_width_group(\n            stage_widths, self.bottleneck_ratio, group_widths)\n\n        # Group params by stage\n        self.stage_widths = stage_widths\n        self.group_widths = group_widths\n        self.depth = sum(stage_blocks)\n        self.stem_channels = stem_channels\n        self.base_channels = base_channels\n        self.num_stages = num_stages\n        assert num_stages >= 1 and num_stages <= 4\n        self.strides = strides\n        self.dilations = dilations\n        assert len(strides) == len(dilations) == num_stages\n        self.out_indices = out_indices\n        assert max(out_indices) < num_stages\n        self.style = style\n        self.deep_stem = deep_stem\n        self.avg_down = avg_down\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.with_cp = with_cp\n        self.norm_eval = norm_eval\n        self.dcn = dcn\n        self.stage_with_dcn = stage_with_dcn\n        if dcn is not None:\n            assert len(stage_with_dcn) == num_stages\n        self.plugins = plugins\n        self.zero_init_residual = zero_init_residual\n        self.block = Bottleneck\n        expansion_bak = self.block.expansion\n        self.block.expansion = 1\n        self.stage_blocks = stage_blocks[:num_stages]\n\n        self._make_stem_layer(in_channels, stem_channels)\n\n        block_init_cfg = None\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n                if self.zero_init_residual:\n                    block_init_cfg = dict(\n                        type='Constant', val=0, override=dict(name='norm3'))\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.inplanes = stem_channels\n        self.res_layers = []\n        for i, num_blocks in enumerate(self.stage_blocks):\n            stride = self.strides[i]\n            dilation = self.dilations[i]\n            group_width = self.group_widths[i]\n            width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))\n            stage_groups = width // group_width\n\n            dcn = self.dcn if self.stage_with_dcn[i] else None\n            if self.plugins is not None:\n                stage_plugins = self.make_stage_plugins(self.plugins, i)\n            else:\n                stage_plugins = None\n\n            res_layer = self.make_res_layer(\n                block=self.block,\n                inplanes=self.inplanes,\n                planes=self.stage_widths[i],\n                num_blocks=num_blocks,\n                stride=stride,\n                dilation=dilation,\n                style=self.style,\n                avg_down=self.avg_down,\n                with_cp=self.with_cp,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                dcn=dcn,\n                plugins=stage_plugins,\n                groups=stage_groups,\n                base_width=group_width,\n                base_channels=self.stage_widths[i],\n                init_cfg=block_init_cfg)\n            self.inplanes = self.stage_widths[i]\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, res_layer)\n            self.res_layers.append(layer_name)\n\n        self._freeze_stages()\n\n        self.feat_dim = stage_widths[-1]\n        self.block.expansion = expansion_bak\n\n    def _make_stem_layer(self, in_channels, base_channels):\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            in_channels,\n            base_channels,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            bias=False)\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, base_channels, postfix=1)\n        self.add_module(self.norm1_name, norm1)\n        self.relu = nn.ReLU(inplace=True)\n\n    def generate_regnet(self,\n                        initial_width,\n                        width_slope,\n                        width_parameter,\n                        depth,\n                        divisor=8):\n        \"\"\"Generates per block width from RegNet parameters.\n\n        Args:\n            initial_width ([int]): Initial width of the backbone\n            width_slope ([float]): Slope of the quantized linear function\n            width_parameter ([int]): Parameter used to quantize the width.\n            depth ([int]): Depth of the backbone.\n            divisor (int, optional): The divisor of channels. Defaults to 8.\n\n        Returns:\n            list, int: return a list of widths of each stage and the number \\\n                of stages\n        \"\"\"\n        assert width_slope >= 0\n        assert initial_width > 0\n        assert width_parameter > 1\n        assert initial_width % divisor == 0\n        widths_cont = np.arange(depth) * width_slope + initial_width\n        ks = np.round(\n            np.log(widths_cont / initial_width) / np.log(width_parameter))\n        widths = initial_width * np.power(width_parameter, ks)\n        widths = np.round(np.divide(widths, divisor)) * divisor\n        num_stages = len(np.unique(widths))\n        widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()\n        return widths, num_stages\n\n    @staticmethod\n    def quantize_float(number, divisor):\n        \"\"\"Converts a float to closest non-zero int divisible by divisor.\n\n        Args:\n            number (int): Original number to be quantized.\n            divisor (int): Divisor used to quantize the number.\n\n        Returns:\n            int: quantized number that is divisible by devisor.\n        \"\"\"\n        return int(round(number / divisor) * divisor)\n\n    def adjust_width_group(self, widths, bottleneck_ratio, groups):\n        \"\"\"Adjusts the compatibility of widths and groups.\n\n        Args:\n            widths (list[int]): Width of each stage.\n            bottleneck_ratio (float): Bottleneck ratio.\n            groups (int): number of groups in each stage\n\n        Returns:\n            tuple(list): The adjusted widths and groups of each stage.\n        \"\"\"\n        bottleneck_width = [\n            int(w * b) for w, b in zip(widths, bottleneck_ratio)\n        ]\n        groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]\n        bottleneck_width = [\n            self.quantize_float(w_bot, g)\n            for w_bot, g in zip(bottleneck_width, groups)\n        ]\n        widths = [\n            int(w_bot / b)\n            for w_bot, b in zip(bottleneck_width, bottleneck_ratio)\n        ]\n        return widths, groups\n\n    def get_stages_from_blocks(self, widths):\n        \"\"\"Gets widths/stage_blocks of network at each stage.\n\n        Args:\n            widths (list[int]): Width in each stage.\n\n        Returns:\n            tuple(list): width and depth of each stage\n        \"\"\"\n        width_diff = [\n            width != width_prev\n            for width, width_prev in zip(widths + [0], [0] + widths)\n        ]\n        stage_widths = [\n            width for width, diff in zip(widths, width_diff[:-1]) if diff\n        ]\n        stage_blocks = np.diff([\n            depth for depth, diff in zip(range(len(width_diff)), width_diff)\n            if diff\n        ]).tolist()\n        return stage_widths, stage_blocks\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv1(x)\n        x = self.norm1(x)\n        x = self.relu(x)\n\n        outs = []\n        for i, layer_name in enumerate(self.res_layers):\n            res_layer = getattr(self, layer_name)\n            x = res_layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/res2net.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmengine.model import Sequential\n\nfrom mmdet.registry import MODELS\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottle2neck(_Bottleneck):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 scales=4,\n                 base_width=26,\n                 base_channels=64,\n                 stage_type='normal',\n                 **kwargs):\n        \"\"\"Bottle2neck block for Res2Net.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)\n        assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'\n        width = int(math.floor(self.planes * (base_width / base_channels)))\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width * scales, postfix=1)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width * scales,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n\n        if stage_type == 'stage' and self.conv2_stride != 1:\n            self.pool = nn.AvgPool2d(\n                kernel_size=3, stride=self.conv2_stride, padding=1)\n        convs = []\n        bns = []\n\n        fallback_on_stride = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if not self.with_dcn or fallback_on_stride:\n            for i in range(scales - 1):\n                convs.append(\n                    build_conv_layer(\n                        self.conv_cfg,\n                        width,\n                        width,\n                        kernel_size=3,\n                        stride=self.conv2_stride,\n                        padding=self.dilation,\n                        dilation=self.dilation,\n                        bias=False))\n                bns.append(\n                    build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\n            self.convs = nn.ModuleList(convs)\n            self.bns = nn.ModuleList(bns)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            for i in range(scales - 1):\n                convs.append(\n                    build_conv_layer(\n                        self.dcn,\n                        width,\n                        width,\n                        kernel_size=3,\n                        stride=self.conv2_stride,\n                        padding=self.dilation,\n                        dilation=self.dilation,\n                        bias=False))\n                bns.append(\n                    build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\n            self.convs = nn.ModuleList(convs)\n            self.bns = nn.ModuleList(bns)\n\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width * scales,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n        self.stage_type = stage_type\n        self.scales = scales\n        self.width = width\n        delattr(self, 'conv2')\n        delattr(self, self.norm2_name)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            spx = torch.split(out, self.width, 1)\n            sp = self.convs[0](spx[0].contiguous())\n            sp = self.relu(self.bns[0](sp))\n            out = sp\n            for i in range(1, self.scales - 1):\n                if self.stage_type == 'stage':\n                    sp = spx[i]\n                else:\n                    sp = sp + spx[i]\n                sp = self.convs[i](sp.contiguous())\n                sp = self.relu(self.bns[i](sp))\n                out = torch.cat((out, sp), 1)\n\n            if self.stage_type == 'normal' or self.conv2_stride == 1:\n                out = torch.cat((out, spx[self.scales - 1]), 1)\n            elif self.stage_type == 'stage':\n                out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\nclass Res2Layer(Sequential):\n    \"\"\"Res2Layer to build Res2Net style backbone.\n\n    Args:\n        block (nn.Module): block used to build ResLayer.\n        inplanes (int): inplanes of block.\n        planes (int): planes of block.\n        num_blocks (int): number of blocks.\n        stride (int): stride of the first block. Default: 1\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottle2neck. Default: False\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: dict(type='BN')\n        scales (int): Scales used in Res2Net. Default: 4\n        base_width (int): Basic width of each scale. Default: 26\n    \"\"\"\n\n    def __init__(self,\n                 block,\n                 inplanes,\n                 planes,\n                 num_blocks,\n                 stride=1,\n                 avg_down=True,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 scales=4,\n                 base_width=26,\n                 **kwargs):\n        self.block = block\n\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.AvgPool2d(\n                    kernel_size=stride,\n                    stride=stride,\n                    ceil_mode=True,\n                    count_include_pad=False),\n                build_conv_layer(\n                    conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=1,\n                    bias=False),\n                build_norm_layer(norm_cfg, planes * block.expansion)[1],\n            )\n\n        layers = []\n        layers.append(\n            block(\n                inplanes=inplanes,\n                planes=planes,\n                stride=stride,\n                downsample=downsample,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                scales=scales,\n                base_width=base_width,\n                stage_type='stage',\n                **kwargs))\n        inplanes = planes * block.expansion\n        for i in range(1, num_blocks):\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    scales=scales,\n                    base_width=base_width,\n                    **kwargs))\n        super(Res2Layer, self).__init__(*layers)\n\n\n@MODELS.register_module()\nclass Res2Net(ResNet):\n    \"\"\"Res2Net backbone.\n\n    Args:\n        scales (int): Scales used in Res2Net. Default: 4\n        base_width (int): Basic width of each scale. Default: 26\n        depth (int): Depth of res2net, from {50, 101, 152}.\n        in_channels (int): Number of input image channels. Default: 3.\n        num_stages (int): Res2net stages. Default: 4.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottle2neck.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        plugins (list[dict]): List of plugins for stages, each dict contains:\n\n            - cfg (dict, required): Cfg dict to build plugin.\n            - position (str, required): Position inside block to insert\n              plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.\n            - stages (tuple[bool], optional): Stages to apply plugin, length\n              should be same as 'num_stages'.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): Whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import Res2Net\n        >>> import torch\n        >>> self = Res2Net(depth=50, scales=4, base_width=26)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 256, 8, 8)\n        (1, 512, 4, 4)\n        (1, 1024, 2, 2)\n        (1, 2048, 1, 1)\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottle2neck, (3, 4, 6, 3)),\n        101: (Bottle2neck, (3, 4, 23, 3)),\n        152: (Bottle2neck, (3, 8, 36, 3))\n    }\n\n    def __init__(self,\n                 scales=4,\n                 base_width=26,\n                 style='pytorch',\n                 deep_stem=True,\n                 avg_down=True,\n                 pretrained=None,\n                 init_cfg=None,\n                 **kwargs):\n        self.scales = scales\n        self.base_width = base_width\n        super(Res2Net, self).__init__(\n            style='pytorch',\n            deep_stem=True,\n            avg_down=True,\n            pretrained=pretrained,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def make_res_layer(self, **kwargs):\n        return Res2Layer(\n            scales=self.scales,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/resnest.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom ..layers import ResLayer\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNetV1d\n\n\nclass RSoftmax(nn.Module):\n    \"\"\"Radix Softmax module in ``SplitAttentionConv2d``.\n\n    Args:\n        radix (int): Radix of input.\n        groups (int): Groups of input.\n    \"\"\"\n\n    def __init__(self, radix, groups):\n        super().__init__()\n        self.radix = radix\n        self.groups = groups\n\n    def forward(self, x):\n        batch = x.size(0)\n        if self.radix > 1:\n            x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)\n            x = F.softmax(x, dim=1)\n            x = x.reshape(batch, -1)\n        else:\n            x = torch.sigmoid(x)\n        return x\n\n\nclass SplitAttentionConv2d(BaseModule):\n    \"\"\"Split-Attention Conv2d in ResNeSt.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        channels (int): Number of intermediate channels.\n        kernel_size (int | tuple[int]): Size of the convolution kernel.\n        stride (int | tuple[int]): Stride of the convolution.\n        padding (int | tuple[int]): Zero-padding added to both sides of\n        dilation (int | tuple[int]): Spacing between kernel elements.\n        groups (int): Number of blocked connections from input channels to\n            output channels.\n        groups (int): Same as nn.Conv2d.\n        radix (int): Radix of SpltAtConv2d. Default: 2\n        reduction_factor (int): Reduction factor of inter_channels. Default: 4.\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        dcn (dict): Config dict for DCN. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 channels,\n                 kernel_size,\n                 stride=1,\n                 padding=0,\n                 dilation=1,\n                 groups=1,\n                 radix=2,\n                 reduction_factor=4,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 init_cfg=None):\n        super(SplitAttentionConv2d, self).__init__(init_cfg)\n        inter_channels = max(in_channels * radix // reduction_factor, 32)\n        self.radix = radix\n        self.groups = groups\n        self.channels = channels\n        self.with_dcn = dcn is not None\n        self.dcn = dcn\n        fallback_on_stride = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if self.with_dcn and not fallback_on_stride:\n            assert conv_cfg is None, 'conv_cfg must be None for DCN'\n            conv_cfg = dcn\n        self.conv = build_conv_layer(\n            conv_cfg,\n            in_channels,\n            channels * radix,\n            kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups * radix,\n            bias=False)\n        # To be consistent with original implementation, starting from 0\n        self.norm0_name, norm0 = build_norm_layer(\n            norm_cfg, channels * radix, postfix=0)\n        self.add_module(self.norm0_name, norm0)\n        self.relu = nn.ReLU(inplace=True)\n        self.fc1 = build_conv_layer(\n            None, channels, inter_channels, 1, groups=self.groups)\n        self.norm1_name, norm1 = build_norm_layer(\n            norm_cfg, inter_channels, postfix=1)\n        self.add_module(self.norm1_name, norm1)\n        self.fc2 = build_conv_layer(\n            None, inter_channels, channels * radix, 1, groups=self.groups)\n        self.rsoftmax = RSoftmax(radix, groups)\n\n    @property\n    def norm0(self):\n        \"\"\"nn.Module: the normalization layer named \"norm0\" \"\"\"\n        return getattr(self, self.norm0_name)\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n        return getattr(self, self.norm1_name)\n\n    def forward(self, x):\n        x = self.conv(x)\n        x = self.norm0(x)\n        x = self.relu(x)\n\n        batch, rchannel = x.shape[:2]\n        batch = x.size(0)\n        if self.radix > 1:\n            splits = x.view(batch, self.radix, -1, *x.shape[2:])\n            gap = splits.sum(dim=1)\n        else:\n            gap = x\n        gap = F.adaptive_avg_pool2d(gap, 1)\n        gap = self.fc1(gap)\n\n        gap = self.norm1(gap)\n        gap = self.relu(gap)\n\n        atten = self.fc2(gap)\n        atten = self.rsoftmax(atten).view(batch, -1, 1, 1)\n\n        if self.radix > 1:\n            attens = atten.view(batch, self.radix, -1, *atten.shape[2:])\n            out = torch.sum(attens * splits, dim=1)\n        else:\n            out = atten * x\n        return out.contiguous()\n\n\nclass Bottleneck(_Bottleneck):\n    \"\"\"Bottleneck block for ResNeSt.\n\n    Args:\n        inplane (int): Input planes of this block.\n        planes (int): Middle planes of this block.\n        groups (int): Groups of conv2.\n        base_width (int): Base of width in terms of base channels. Default: 4.\n        base_channels (int): Base of channels for calculating width.\n            Default: 64.\n        radix (int): Radix of SpltAtConv2d. Default: 2\n        reduction_factor (int): Reduction factor of inter_channels in\n            SplitAttentionConv2d. Default: 4.\n        avg_down_stride (bool): Whether to use average pool for stride in\n            Bottleneck. Default: True.\n        kwargs (dict): Key word arguments for base class.\n    \"\"\"\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 groups=1,\n                 base_width=4,\n                 base_channels=64,\n                 radix=2,\n                 reduction_factor=4,\n                 avg_down_stride=True,\n                 **kwargs):\n        \"\"\"Bottleneck block for ResNeSt.\"\"\"\n        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n        if groups == 1:\n            width = self.planes\n        else:\n            width = math.floor(self.planes *\n                               (base_width / base_channels)) * groups\n\n        self.avg_down_stride = avg_down_stride and self.conv2_stride > 1\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width, postfix=1)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        self.with_modulated_dcn = False\n        self.conv2 = SplitAttentionConv2d(\n            width,\n            width,\n            kernel_size=3,\n            stride=1 if self.avg_down_stride else self.conv2_stride,\n            padding=self.dilation,\n            dilation=self.dilation,\n            groups=groups,\n            radix=radix,\n            reduction_factor=reduction_factor,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            dcn=self.dcn)\n        delattr(self, self.norm2_name)\n\n        if self.avg_down_stride:\n            self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)\n\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n\n            if self.avg_down_stride:\n                out = self.avd_layer(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\n@MODELS.register_module()\nclass ResNeSt(ResNetV1d):\n    \"\"\"ResNeSt backbone.\n\n    Args:\n        groups (int): Number of groups of Bottleneck. Default: 1\n        base_width (int): Base width of Bottleneck. Default: 4\n        radix (int): Radix of SplitAttentionConv2d. Default: 2\n        reduction_factor (int): Reduction factor of inter_channels in\n            SplitAttentionConv2d. Default: 4.\n        avg_down_stride (bool): Whether to use average pool for stride in\n            Bottleneck. Default: True.\n        kwargs (dict): Keyword arguments for ResNet.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3)),\n        200: (Bottleneck, (3, 24, 36, 3))\n    }\n\n    def __init__(self,\n                 groups=1,\n                 base_width=4,\n                 radix=2,\n                 reduction_factor=4,\n                 avg_down_stride=True,\n                 **kwargs):\n        self.groups = groups\n        self.base_width = base_width\n        self.radix = radix\n        self.reduction_factor = reduction_factor\n        self.avg_down_stride = avg_down_stride\n        super(ResNeSt, self).__init__(**kwargs)\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n        return ResLayer(\n            groups=self.groups,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            radix=self.radix,\n            reduction_factor=self.reduction_factor,\n            avg_down_stride=self.avg_down_stride,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer\nfrom mmengine.model import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.registry import MODELS\nfrom ..layers import ResLayer\n\n\nclass BasicBlock(BaseModule):\n    expansion = 1\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 stride=1,\n                 dilation=1,\n                 downsample=None,\n                 style='pytorch',\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 plugins=None,\n                 init_cfg=None):\n        super(BasicBlock, self).__init__(init_cfg)\n        assert dcn is None, 'Not implemented yet.'\n        assert plugins is None, 'Not implemented yet.'\n\n        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n        self.conv1 = build_conv_layer(\n            conv_cfg,\n            inplanes,\n            planes,\n            3,\n            stride=stride,\n            padding=dilation,\n            dilation=dilation,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        self.conv2 = build_conv_layer(\n            conv_cfg, planes, planes, 3, padding=1, bias=False)\n        self.add_module(self.norm2_name, norm2)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n        self.dilation = dilation\n        self.with_cp = with_cp\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n        return getattr(self, self.norm1_name)\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n        return getattr(self, self.norm2_name)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            out = self.conv2(out)\n            out = self.norm2(out)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(BaseModule):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 stride=1,\n                 dilation=1,\n                 downsample=None,\n                 style='pytorch',\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 plugins=None,\n                 init_cfg=None):\n        \"\"\"Bottleneck block for ResNet.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottleneck, self).__init__(init_cfg)\n        assert style in ['pytorch', 'caffe']\n        assert dcn is None or isinstance(dcn, dict)\n        assert plugins is None or isinstance(plugins, list)\n        if plugins is not None:\n            allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']\n            assert all(p['position'] in allowed_position for p in plugins)\n\n        self.inplanes = inplanes\n        self.planes = planes\n        self.stride = stride\n        self.dilation = dilation\n        self.style = style\n        self.with_cp = with_cp\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.dcn = dcn\n        self.with_dcn = dcn is not None\n        self.plugins = plugins\n        self.with_plugins = plugins is not None\n\n        if self.with_plugins:\n            # collect plugins for conv1/conv2/conv3\n            self.after_conv1_plugins = [\n                plugin['cfg'] for plugin in plugins\n                if plugin['position'] == 'after_conv1'\n            ]\n            self.after_conv2_plugins = [\n                plugin['cfg'] for plugin in plugins\n                if plugin['position'] == 'after_conv2'\n            ]\n            self.after_conv3_plugins = [\n                plugin['cfg'] for plugin in plugins\n                if plugin['position'] == 'after_conv3'\n            ]\n\n        if self.style == 'pytorch':\n            self.conv1_stride = 1\n            self.conv2_stride = stride\n        else:\n            self.conv1_stride = stride\n            self.conv2_stride = 1\n\n        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n        self.norm3_name, norm3 = build_norm_layer(\n            norm_cfg, planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            conv_cfg,\n            inplanes,\n            planes,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        fallback_on_stride = False\n        if self.with_dcn:\n            fallback_on_stride = dcn.pop('fallback_on_stride', False)\n        if not self.with_dcn or fallback_on_stride:\n            self.conv2 = build_conv_layer(\n                conv_cfg,\n                planes,\n                planes,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=dilation,\n                dilation=dilation,\n                bias=False)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            self.conv2 = build_conv_layer(\n                dcn,\n                planes,\n                planes,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=dilation,\n                dilation=dilation,\n                bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.conv3 = build_conv_layer(\n            conv_cfg,\n            planes,\n            planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n\n        if self.with_plugins:\n            self.after_conv1_plugin_names = self.make_block_plugins(\n                planes, self.after_conv1_plugins)\n            self.after_conv2_plugin_names = self.make_block_plugins(\n                planes, self.after_conv2_plugins)\n            self.after_conv3_plugin_names = self.make_block_plugins(\n                planes * self.expansion, self.after_conv3_plugins)\n\n    def make_block_plugins(self, in_channels, plugins):\n        \"\"\"make plugins for block.\n\n        Args:\n            in_channels (int): Input channels of plugin.\n            plugins (list[dict]): List of plugins cfg to build.\n\n        Returns:\n            list[str]: List of the names of plugin.\n        \"\"\"\n        assert isinstance(plugins, list)\n        plugin_names = []\n        for plugin in plugins:\n            plugin = plugin.copy()\n            name, layer = build_plugin_layer(\n                plugin,\n                in_channels=in_channels,\n                postfix=plugin.pop('postfix', ''))\n            assert not hasattr(self, name), f'duplicate plugin {name}'\n            self.add_module(name, layer)\n            plugin_names.append(name)\n        return plugin_names\n\n    def forward_plugin(self, x, plugin_names):\n        out = x\n        for name in plugin_names:\n            out = getattr(self, name)(out)\n        return out\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n        return getattr(self, self.norm1_name)\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n        return getattr(self, self.norm2_name)\n\n    @property\n    def norm3(self):\n        \"\"\"nn.Module: normalization layer after the third convolution layer\"\"\"\n        return getattr(self, self.norm3_name)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n            out = self.norm2(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\n@MODELS.register_module()\nclass ResNet(BaseModule):\n    \"\"\"ResNet backbone.\n\n    Args:\n        depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n        stem_channels (int | None): Number of stem channels. If not specified,\n            it will be the same as `base_channels`. Default: None.\n        base_channels (int): Number of base channels of res layer. Default: 64.\n        in_channels (int): Number of input image channels. Default: 3.\n        num_stages (int): Resnet stages. Default: 4.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottleneck.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        plugins (list[dict]): List of plugins for stages, each dict contains:\n\n            - cfg (dict, required): Cfg dict to build plugin.\n            - position (str, required): Position inside block to insert\n              plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.\n            - stages (tuple[bool], optional): Stages to apply plugin, length\n              should be same as 'num_stages'.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): Whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import ResNet\n        >>> import torch\n        >>> self = ResNet(depth=18)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 64, 8, 8)\n        (1, 128, 4, 4)\n        (1, 256, 2, 2)\n        (1, 512, 1, 1)\n    \"\"\"\n\n    arch_settings = {\n        18: (BasicBlock, (2, 2, 2, 2)),\n        34: (BasicBlock, (3, 4, 6, 3)),\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self,\n                 depth,\n                 in_channels=3,\n                 stem_channels=None,\n                 base_channels=64,\n                 num_stages=4,\n                 strides=(1, 2, 2, 2),\n                 dilations=(1, 1, 1, 1),\n                 out_indices=(0, 1, 2, 3),\n                 style='pytorch',\n                 deep_stem=False,\n                 avg_down=False,\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 norm_eval=True,\n                 dcn=None,\n                 stage_with_dcn=(False, False, False, False),\n                 plugins=None,\n                 with_cp=False,\n                 zero_init_residual=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ResNet, self).__init__(init_cfg)\n        self.zero_init_residual = zero_init_residual\n        if depth not in self.arch_settings:\n            raise KeyError(f'invalid depth {depth} for resnet')\n\n        block_init_cfg = None\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n                block = self.arch_settings[depth][0]\n                if self.zero_init_residual:\n                    if block is BasicBlock:\n                        block_init_cfg = dict(\n                            type='Constant',\n                            val=0,\n                            override=dict(name='norm2'))\n                    elif block is Bottleneck:\n                        block_init_cfg = dict(\n                            type='Constant',\n                            val=0,\n                            override=dict(name='norm3'))\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.depth = depth\n        if stem_channels is None:\n            stem_channels = base_channels\n        self.stem_channels = stem_channels\n        self.base_channels = base_channels\n        self.num_stages = num_stages\n        assert num_stages >= 1 and num_stages <= 4\n        self.strides = strides\n        self.dilations = dilations\n        assert len(strides) == len(dilations) == num_stages\n        self.out_indices = out_indices\n        assert max(out_indices) < num_stages\n        self.style = style\n        self.deep_stem = deep_stem\n        self.avg_down = avg_down\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.with_cp = with_cp\n        self.norm_eval = norm_eval\n        self.dcn = dcn\n        self.stage_with_dcn = stage_with_dcn\n        if dcn is not None:\n            assert len(stage_with_dcn) == num_stages\n        self.plugins = plugins\n        self.block, stage_blocks = self.arch_settings[depth]\n        self.stage_blocks = stage_blocks[:num_stages]\n        self.inplanes = stem_channels\n\n        self._make_stem_layer(in_channels, stem_channels)\n\n        self.res_layers = []\n        for i, num_blocks in enumerate(self.stage_blocks):\n            stride = strides[i]\n            dilation = dilations[i]\n            dcn = self.dcn if self.stage_with_dcn[i] else None\n            if plugins is not None:\n                stage_plugins = self.make_stage_plugins(plugins, i)\n            else:\n                stage_plugins = None\n            planes = base_channels * 2**i\n            res_layer = self.make_res_layer(\n                block=self.block,\n                inplanes=self.inplanes,\n                planes=planes,\n                num_blocks=num_blocks,\n                stride=stride,\n                dilation=dilation,\n                style=self.style,\n                avg_down=self.avg_down,\n                with_cp=with_cp,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                dcn=dcn,\n                plugins=stage_plugins,\n                init_cfg=block_init_cfg)\n            self.inplanes = planes * self.block.expansion\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, res_layer)\n            self.res_layers.append(layer_name)\n\n        self._freeze_stages()\n\n        self.feat_dim = self.block.expansion * base_channels * 2**(\n            len(self.stage_blocks) - 1)\n\n    def make_stage_plugins(self, plugins, stage_idx):\n        \"\"\"Make plugins for ResNet ``stage_idx`` th stage.\n\n        Currently we support to insert ``context_block``,\n        ``empirical_attention_block``, ``nonlocal_block`` into the backbone\n        like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n        Bottleneck.\n\n        An example of plugins format could be:\n\n        Examples:\n            >>> plugins=[\n            ...     dict(cfg=dict(type='xxx', arg1='xxx'),\n            ...          stages=(False, True, True, True),\n            ...          position='after_conv2'),\n            ...     dict(cfg=dict(type='yyy'),\n            ...          stages=(True, True, True, True),\n            ...          position='after_conv3'),\n            ...     dict(cfg=dict(type='zzz', postfix='1'),\n            ...          stages=(True, True, True, True),\n            ...          position='after_conv3'),\n            ...     dict(cfg=dict(type='zzz', postfix='2'),\n            ...          stages=(True, True, True, True),\n            ...          position='after_conv3')\n            ... ]\n            >>> self = ResNet(depth=18)\n            >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n            >>> assert len(stage_plugins) == 3\n\n        Suppose ``stage_idx=0``, the structure of blocks in the stage would be:\n\n        .. code-block:: none\n\n            conv1-> conv2->conv3->yyy->zzz1->zzz2\n\n        Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n\n        .. code-block:: none\n\n            conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n        If stages is missing, the plugin would be applied to all stages.\n\n        Args:\n            plugins (list[dict]): List of plugins cfg to build. The postfix is\n                required if multiple same type plugins are inserted.\n            stage_idx (int): Index of stage to build\n\n        Returns:\n            list[dict]: Plugins for current stage\n        \"\"\"\n        stage_plugins = []\n        for plugin in plugins:\n            plugin = plugin.copy()\n            stages = plugin.pop('stages', None)\n            assert stages is None or len(stages) == self.num_stages\n            # whether to insert plugin into current stage\n            if stages is None or stages[stage_idx]:\n                stage_plugins.append(plugin)\n\n        return stage_plugins\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n        return ResLayer(**kwargs)\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n        return getattr(self, self.norm1_name)\n\n    def _make_stem_layer(self, in_channels, stem_channels):\n        if self.deep_stem:\n            self.stem = nn.Sequential(\n                build_conv_layer(\n                    self.conv_cfg,\n                    in_channels,\n                    stem_channels // 2,\n                    kernel_size=3,\n                    stride=2,\n                    padding=1,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n                nn.ReLU(inplace=True),\n                build_conv_layer(\n                    self.conv_cfg,\n                    stem_channels // 2,\n                    stem_channels // 2,\n                    kernel_size=3,\n                    stride=1,\n                    padding=1,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n                nn.ReLU(inplace=True),\n                build_conv_layer(\n                    self.conv_cfg,\n                    stem_channels // 2,\n                    stem_channels,\n                    kernel_size=3,\n                    stride=1,\n                    padding=1,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, stem_channels)[1],\n                nn.ReLU(inplace=True))\n        else:\n            self.conv1 = build_conv_layer(\n                self.conv_cfg,\n                in_channels,\n                stem_channels,\n                kernel_size=7,\n                stride=2,\n                padding=3,\n                bias=False)\n            self.norm1_name, norm1 = build_norm_layer(\n                self.norm_cfg, stem_channels, postfix=1)\n            self.add_module(self.norm1_name, norm1)\n            self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            if self.deep_stem:\n                self.stem.eval()\n                for param in self.stem.parameters():\n                    param.requires_grad = False\n            else:\n                self.norm1.eval()\n                for m in [self.conv1, self.norm1]:\n                    for param in m.parameters():\n                        param.requires_grad = False\n\n        for i in range(1, self.frozen_stages + 1):\n            m = getattr(self, f'layer{i}')\n            m.eval()\n            for param in m.parameters():\n                param.requires_grad = False\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        if self.deep_stem:\n            x = self.stem(x)\n        else:\n            x = self.conv1(x)\n            x = self.norm1(x)\n            x = self.relu(x)\n        x = self.maxpool(x)\n        outs = []\n        for i, layer_name in enumerate(self.res_layers):\n            res_layer = getattr(self, layer_name)\n            x = res_layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep normalization layer\n        freezed.\"\"\"\n        super(ResNet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                # trick: eval have effect on BatchNorm only\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n\n@MODELS.register_module()\nclass ResNetV1d(ResNet):\n    r\"\"\"ResNetV1d variant described in `Bag of Tricks\n    <https://arxiv.org/pdf/1812.01187.pdf>`_.\n\n    Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n    the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n    avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(ResNetV1d, self).__init__(\n            deep_stem=True, avg_down=True, **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/resnext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom mmdet.registry import MODELS\nfrom ..layers import ResLayer\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottleneck(_Bottleneck):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 groups=1,\n                 base_width=4,\n                 base_channels=64,\n                 **kwargs):\n        \"\"\"Bottleneck block for ResNeXt.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n        if groups == 1:\n            width = self.planes\n        else:\n            width = math.floor(self.planes *\n                               (base_width / base_channels)) * groups\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(\n            self.norm_cfg, width, postfix=2)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        fallback_on_stride = False\n        self.with_modulated_dcn = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if not self.with_dcn or fallback_on_stride:\n            self.conv2 = build_conv_layer(\n                self.conv_cfg,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            self.conv2 = build_conv_layer(\n                self.dcn,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n        if self.with_plugins:\n            self._del_block_plugins(self.after_conv1_plugin_names +\n                                    self.after_conv2_plugin_names +\n                                    self.after_conv3_plugin_names)\n            self.after_conv1_plugin_names = self.make_block_plugins(\n                width, self.after_conv1_plugins)\n            self.after_conv2_plugin_names = self.make_block_plugins(\n                width, self.after_conv2_plugins)\n            self.after_conv3_plugin_names = self.make_block_plugins(\n                self.planes * self.expansion, self.after_conv3_plugins)\n\n    def _del_block_plugins(self, plugin_names):\n        \"\"\"delete plugins for block if exist.\n\n        Args:\n            plugin_names (list[str]): List of plugins name to delete.\n        \"\"\"\n        assert isinstance(plugin_names, list)\n        for plugin_name in plugin_names:\n            del self._modules[plugin_name]\n\n\n@MODELS.register_module()\nclass ResNeXt(ResNet):\n    \"\"\"ResNeXt backbone.\n\n    Args:\n        depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n        in_channels (int): Number of input image channels. Default: 3.\n        num_stages (int): Resnet stages. Default: 4.\n        groups (int): Group of resnext.\n        base_width (int): Base width of resnext.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n            not freezing any parameters.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self, groups=1, base_width=4, **kwargs):\n        self.groups = groups\n        self.base_width = base_width\n        super(ResNeXt, self).__init__(**kwargs)\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer``\"\"\"\n        return ResLayer(\n            groups=self.groups,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/ssd_vgg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import VGG\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom ..necks import ssd_neck\n\n\n@MODELS.register_module()\nclass SSDVGG(VGG, BaseModule):\n    \"\"\"VGG Backbone network for single-shot-detection.\n\n    Args:\n        depth (int): Depth of vgg, from {11, 13, 16, 19}.\n        with_last_pool (bool): Whether to add a pooling layer at the last\n            of the model\n        ceil_mode (bool): When True, will use `ceil` instead of `floor`\n            to compute the output shape.\n        out_indices (Sequence[int]): Output from which stages.\n        out_feature_indices (Sequence[int]): Output from which feature map.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n        input_size (int, optional): Deprecated argumment.\n            Width and height of input, from {300, 512}.\n        l2_norm_scale (float, optional) : Deprecated argumment.\n            L2 normalization layer init scale.\n\n    Example:\n        >>> self = SSDVGG(input_size=300, depth=11)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 300, 300)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 1024, 19, 19)\n        (1, 512, 10, 10)\n        (1, 256, 5, 5)\n        (1, 256, 3, 3)\n        (1, 256, 1, 1)\n    \"\"\"\n    extra_setting = {\n        300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),\n        512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),\n    }\n\n    def __init__(self,\n                 depth,\n                 with_last_pool=False,\n                 ceil_mode=True,\n                 out_indices=(3, 4),\n                 out_feature_indices=(22, 34),\n                 pretrained=None,\n                 init_cfg=None,\n                 input_size=None,\n                 l2_norm_scale=None):\n        # TODO: in_channels for mmcv.VGG\n        super(SSDVGG, self).__init__(\n            depth,\n            with_last_pool=with_last_pool,\n            ceil_mode=ceil_mode,\n            out_indices=out_indices)\n\n        self.features.add_module(\n            str(len(self.features)),\n            nn.MaxPool2d(kernel_size=3, stride=1, padding=1))\n        self.features.add_module(\n            str(len(self.features)),\n            nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))\n        self.features.add_module(\n            str(len(self.features)), nn.ReLU(inplace=True))\n        self.features.add_module(\n            str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))\n        self.features.add_module(\n            str(len(self.features)), nn.ReLU(inplace=True))\n        self.out_feature_indices = out_feature_indices\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n\n        if init_cfg is not None:\n            self.init_cfg = init_cfg\n        elif isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            self.init_cfg = [\n                dict(type='Kaiming', layer='Conv2d'),\n                dict(type='Constant', val=1, layer='BatchNorm2d'),\n                dict(type='Normal', std=0.01, layer='Linear'),\n            ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        if input_size is not None:\n            warnings.warn('DeprecationWarning: input_size is deprecated')\n        if l2_norm_scale is not None:\n            warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '\n                          'deprecated, it has been moved to SSDNeck.')\n\n    def init_weights(self, pretrained=None):\n        super(VGG, self).init_weights()\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        outs = []\n        for i, layer in enumerate(self.features):\n            x = layer(x)\n            if i in self.out_feature_indices:\n                outs.append(x)\n\n        if len(outs) == 1:\n            return outs[0]\n        else:\n            return tuple(outs)\n\n\nclass L2Norm(ssd_neck.L2Norm):\n\n    def __init__(self, **kwargs):\n        super(L2Norm, self).__init__(**kwargs)\n        warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '\n                      'is deprecated, please use L2Norm in '\n                      'mmdet/models/necks/ssd_neck.py instead')\n"
  },
  {
    "path": "mmdet/models/backbones/swin.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_norm_layer\nfrom mmcv.cnn.bricks.transformer import FFN, build_dropout\nfrom mmengine.logging import MMLogger\nfrom mmengine.model import BaseModule, ModuleList\nfrom mmengine.model.weight_init import (constant_init, trunc_normal_,\n                                        trunc_normal_init)\nfrom mmengine.runner.checkpoint import CheckpointLoader\nfrom mmengine.utils import to_2tuple\n\nfrom mmdet.registry import MODELS\nfrom ..layers import PatchEmbed, PatchMerging\n\n\nclass WindowMSA(BaseModule):\n    \"\"\"Window based multi-head self-attention (W-MSA) module with relative\n    position bias.\n\n    Args:\n        embed_dims (int): Number of input channels.\n        num_heads (int): Number of attention heads.\n        window_size (tuple[int]): The height and width of the window.\n        qkv_bias (bool, optional):  If True, add a learnable bias to q, k, v.\n            Default: True.\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        attn_drop_rate (float, optional): Dropout ratio of attention weight.\n            Default: 0.0\n        proj_drop_rate (float, optional): Dropout ratio of output. Default: 0.\n        init_cfg (dict | None, optional): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 window_size,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 attn_drop_rate=0.,\n                 proj_drop_rate=0.,\n                 init_cfg=None):\n\n        super().__init__()\n        self.embed_dims = embed_dims\n        self.window_size = window_size  # Wh, Ww\n        self.num_heads = num_heads\n        head_embed_dims = embed_dims // num_heads\n        self.scale = qk_scale or head_embed_dims**-0.5\n        self.init_cfg = init_cfg\n\n        # define a parameter table of relative position bias\n        self.relative_position_bias_table = nn.Parameter(\n            torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),\n                        num_heads))  # 2*Wh-1 * 2*Ww-1, nH\n\n        # About 2x faster than original impl\n        Wh, Ww = self.window_size\n        rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)\n        rel_position_index = rel_index_coords + rel_index_coords.T\n        rel_position_index = rel_position_index.flip(1).contiguous()\n        self.register_buffer('relative_position_index', rel_position_index)\n\n        self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop_rate)\n        self.proj = nn.Linear(embed_dims, embed_dims)\n        self.proj_drop = nn.Dropout(proj_drop_rate)\n\n        self.softmax = nn.Softmax(dim=-1)\n\n    def init_weights(self):\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n    def forward(self, x, mask=None):\n        \"\"\"\n        Args:\n\n            x (tensor): input features with shape of (num_windows*B, N, C)\n            mask (tensor | None, Optional): mask with shape of (num_windows,\n                Wh*Ww, Wh*Ww), value should be between (-inf, 0].\n        \"\"\"\n        B, N, C = x.shape\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,\n                                  C // self.num_heads).permute(2, 0, 3, 1, 4)\n        # make torchscript happy (cannot use tensor as tuple)\n        q, k, v = qkv[0], qkv[1], qkv[2]\n\n        q = q * self.scale\n        attn = (q @ k.transpose(-2, -1))\n\n        relative_position_bias = self.relative_position_bias_table[\n            self.relative_position_index.view(-1)].view(\n                self.window_size[0] * self.window_size[1],\n                self.window_size[0] * self.window_size[1],\n                -1)  # Wh*Ww,Wh*Ww,nH\n        relative_position_bias = relative_position_bias.permute(\n            2, 0, 1).contiguous()  # nH, Wh*Ww, Wh*Ww\n        attn = attn + relative_position_bias.unsqueeze(0)\n\n        if mask is not None:\n            nW = mask.shape[0]\n            attn = attn.view(B // nW, nW, self.num_heads, N,\n                             N) + mask.unsqueeze(1).unsqueeze(0)\n            attn = attn.view(-1, self.num_heads, N, N)\n        attn = self.softmax(attn)\n\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n        return x\n\n    @staticmethod\n    def double_step_seq(step1, len1, step2, len2):\n        seq1 = torch.arange(0, step1 * len1, step1)\n        seq2 = torch.arange(0, step2 * len2, step2)\n        return (seq1[:, None] + seq2[None, :]).reshape(1, -1)\n\n\nclass ShiftWindowMSA(BaseModule):\n    \"\"\"Shifted Window Multihead Self-Attention Module.\n\n    Args:\n        embed_dims (int): Number of input channels.\n        num_heads (int): Number of attention heads.\n        window_size (int): The height and width of the window.\n        shift_size (int, optional): The shift step of each window towards\n            right-bottom. If zero, act as regular window-msa. Defaults to 0.\n        qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n            Default: True\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Defaults: None.\n        attn_drop_rate (float, optional): Dropout ratio of attention weight.\n            Defaults: 0.\n        proj_drop_rate (float, optional): Dropout ratio of output.\n            Defaults: 0.\n        dropout_layer (dict, optional): The dropout_layer used before output.\n            Defaults: dict(type='DropPath', drop_prob=0.).\n        init_cfg (dict, optional): The extra config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 window_size,\n                 shift_size=0,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 attn_drop_rate=0,\n                 proj_drop_rate=0,\n                 dropout_layer=dict(type='DropPath', drop_prob=0.),\n                 init_cfg=None):\n        super().__init__(init_cfg)\n\n        self.window_size = window_size\n        self.shift_size = shift_size\n        assert 0 <= self.shift_size < self.window_size\n\n        self.w_msa = WindowMSA(\n            embed_dims=embed_dims,\n            num_heads=num_heads,\n            window_size=to_2tuple(window_size),\n            qkv_bias=qkv_bias,\n            qk_scale=qk_scale,\n            attn_drop_rate=attn_drop_rate,\n            proj_drop_rate=proj_drop_rate,\n            init_cfg=None)\n\n        self.drop = build_dropout(dropout_layer)\n\n    def forward(self, query, hw_shape):\n        B, L, C = query.shape\n        H, W = hw_shape\n        assert L == H * W, 'input feature has wrong size'\n        query = query.view(B, H, W, C)\n\n        # pad feature maps to multiples of window size\n        pad_r = (self.window_size - W % self.window_size) % self.window_size\n        pad_b = (self.window_size - H % self.window_size) % self.window_size\n        query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b))\n        H_pad, W_pad = query.shape[1], query.shape[2]\n\n        # cyclic shift\n        if self.shift_size > 0:\n            shifted_query = torch.roll(\n                query,\n                shifts=(-self.shift_size, -self.shift_size),\n                dims=(1, 2))\n\n            # calculate attention mask for SW-MSA\n            img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device)\n            h_slices = (slice(0, -self.window_size),\n                        slice(-self.window_size,\n                              -self.shift_size), slice(-self.shift_size, None))\n            w_slices = (slice(0, -self.window_size),\n                        slice(-self.window_size,\n                              -self.shift_size), slice(-self.shift_size, None))\n            cnt = 0\n            for h in h_slices:\n                for w in w_slices:\n                    img_mask[:, h, w, :] = cnt\n                    cnt += 1\n\n            # nW, window_size, window_size, 1\n            mask_windows = self.window_partition(img_mask)\n            mask_windows = mask_windows.view(\n                -1, self.window_size * self.window_size)\n            attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n            attn_mask = attn_mask.masked_fill(attn_mask != 0,\n                                              float(-100.0)).masked_fill(\n                                                  attn_mask == 0, float(0.0))\n        else:\n            shifted_query = query\n            attn_mask = None\n\n        # nW*B, window_size, window_size, C\n        query_windows = self.window_partition(shifted_query)\n        # nW*B, window_size*window_size, C\n        query_windows = query_windows.view(-1, self.window_size**2, C)\n\n        # W-MSA/SW-MSA (nW*B, window_size*window_size, C)\n        attn_windows = self.w_msa(query_windows, mask=attn_mask)\n\n        # merge windows\n        attn_windows = attn_windows.view(-1, self.window_size,\n                                         self.window_size, C)\n\n        # B H' W' C\n        shifted_x = self.window_reverse(attn_windows, H_pad, W_pad)\n        # reverse cyclic shift\n        if self.shift_size > 0:\n            x = torch.roll(\n                shifted_x,\n                shifts=(self.shift_size, self.shift_size),\n                dims=(1, 2))\n        else:\n            x = shifted_x\n\n        if pad_r > 0 or pad_b:\n            x = x[:, :H, :W, :].contiguous()\n\n        x = x.view(B, H * W, C)\n\n        x = self.drop(x)\n        return x\n\n    def window_reverse(self, windows, H, W):\n        \"\"\"\n        Args:\n            windows: (num_windows*B, window_size, window_size, C)\n            H (int): Height of image\n            W (int): Width of image\n        Returns:\n            x: (B, H, W, C)\n        \"\"\"\n        window_size = self.window_size\n        B = int(windows.shape[0] / (H * W / window_size / window_size))\n        x = windows.view(B, H // window_size, W // window_size, window_size,\n                         window_size, -1)\n        x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n        return x\n\n    def window_partition(self, x):\n        \"\"\"\n        Args:\n            x: (B, H, W, C)\n        Returns:\n            windows: (num_windows*B, window_size, window_size, C)\n        \"\"\"\n        B, H, W, C = x.shape\n        window_size = self.window_size\n        x = x.view(B, H // window_size, window_size, W // window_size,\n                   window_size, C)\n        windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()\n        windows = windows.view(-1, window_size, window_size, C)\n        return windows\n\n\nclass SwinBlock(BaseModule):\n    \"\"\"\"\n    Args:\n        embed_dims (int): The feature dimension.\n        num_heads (int): Parallel attention heads.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        window_size (int, optional): The local window scale. Default: 7.\n        shift (bool, optional): whether to shift window or not. Default False.\n        qkv_bias (bool, optional): enable bias for qkv if True. Default: True.\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        drop_rate (float, optional): Dropout rate. Default: 0.\n        attn_drop_rate (float, optional): Attention dropout rate. Default: 0.\n        drop_path_rate (float, optional): Stochastic depth rate. Default: 0.\n        act_cfg (dict, optional): The config dict of activation function.\n            Default: dict(type='GELU').\n        norm_cfg (dict, optional): The config dict of normalization.\n            Default: dict(type='LN').\n        with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n            will save some memory while slowing down the training speed.\n            Default: False.\n        init_cfg (dict | list | None, optional): The init config.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 feedforward_channels,\n                 window_size=7,\n                 shift=False,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 with_cp=False,\n                 init_cfg=None):\n\n        super(SwinBlock, self).__init__()\n\n        self.init_cfg = init_cfg\n        self.with_cp = with_cp\n\n        self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]\n        self.attn = ShiftWindowMSA(\n            embed_dims=embed_dims,\n            num_heads=num_heads,\n            window_size=window_size,\n            shift_size=window_size // 2 if shift else 0,\n            qkv_bias=qkv_bias,\n            qk_scale=qk_scale,\n            attn_drop_rate=attn_drop_rate,\n            proj_drop_rate=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            init_cfg=None)\n\n        self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]\n        self.ffn = FFN(\n            embed_dims=embed_dims,\n            feedforward_channels=feedforward_channels,\n            num_fcs=2,\n            ffn_drop=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            act_cfg=act_cfg,\n            add_identity=True,\n            init_cfg=None)\n\n    def forward(self, x, hw_shape):\n\n        def _inner_forward(x):\n            identity = x\n            x = self.norm1(x)\n            x = self.attn(x, hw_shape)\n\n            x = x + identity\n\n            identity = x\n            x = self.norm2(x)\n            x = self.ffn(x, identity=identity)\n\n            return x\n\n        if self.with_cp and x.requires_grad:\n            x = cp.checkpoint(_inner_forward, x)\n        else:\n            x = _inner_forward(x)\n\n        return x\n\n\nclass SwinBlockSequence(BaseModule):\n    \"\"\"Implements one stage in Swin Transformer.\n\n    Args:\n        embed_dims (int): The feature dimension.\n        num_heads (int): Parallel attention heads.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        depth (int): The number of blocks in this stage.\n        window_size (int, optional): The local window scale. Default: 7.\n        qkv_bias (bool, optional): enable bias for qkv if True. Default: True.\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        drop_rate (float, optional): Dropout rate. Default: 0.\n        attn_drop_rate (float, optional): Attention dropout rate. Default: 0.\n        drop_path_rate (float | list[float], optional): Stochastic depth\n            rate. Default: 0.\n        downsample (BaseModule | None, optional): The downsample operation\n            module. Default: None.\n        act_cfg (dict, optional): The config dict of activation function.\n            Default: dict(type='GELU').\n        norm_cfg (dict, optional): The config dict of normalization.\n            Default: dict(type='LN').\n        with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n            will save some memory while slowing down the training speed.\n            Default: False.\n        init_cfg (dict | list | None, optional): The init config.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 feedforward_channels,\n                 depth,\n                 window_size=7,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.,\n                 downsample=None,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 with_cp=False,\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n\n        if isinstance(drop_path_rate, list):\n            drop_path_rates = drop_path_rate\n            assert len(drop_path_rates) == depth\n        else:\n            drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]\n\n        self.blocks = ModuleList()\n        for i in range(depth):\n            block = SwinBlock(\n                embed_dims=embed_dims,\n                num_heads=num_heads,\n                feedforward_channels=feedforward_channels,\n                window_size=window_size,\n                shift=False if i % 2 == 0 else True,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop_rate=drop_rate,\n                attn_drop_rate=attn_drop_rate,\n                drop_path_rate=drop_path_rates[i],\n                act_cfg=act_cfg,\n                norm_cfg=norm_cfg,\n                with_cp=with_cp,\n                init_cfg=None)\n            self.blocks.append(block)\n\n        self.downsample = downsample\n\n    def forward(self, x, hw_shape):\n        for block in self.blocks:\n            x = block(x, hw_shape)\n\n        if self.downsample:\n            x_down, down_hw_shape = self.downsample(x, hw_shape)\n            return x_down, down_hw_shape, x, hw_shape\n        else:\n            return x, hw_shape, x, hw_shape\n\n\n@MODELS.register_module()\nclass SwinTransformer(BaseModule):\n    \"\"\" Swin Transformer\n    A PyTorch implement of : `Swin Transformer:\n    Hierarchical Vision Transformer using Shifted Windows`  -\n        https://arxiv.org/abs/2103.14030\n\n    Inspiration from\n    https://github.com/microsoft/Swin-Transformer\n\n    Args:\n        pretrain_img_size (int | tuple[int]): The size of input image when\n            pretrain. Defaults: 224.\n        in_channels (int): The num of input channels.\n            Defaults: 3.\n        embed_dims (int): The feature dimension. Default: 96.\n        patch_size (int | tuple[int]): Patch size. Default: 4.\n        window_size (int): Window size. Default: 7.\n        mlp_ratio (int): Ratio of mlp hidden dim to embedding dim.\n            Default: 4.\n        depths (tuple[int]): Depths of each Swin Transformer stage.\n            Default: (2, 2, 6, 2).\n        num_heads (tuple[int]): Parallel attention heads of each Swin\n            Transformer stage. Default: (3, 6, 12, 24).\n        strides (tuple[int]): The patch merging or patch embedding stride of\n            each Swin Transformer stage. (In swin, we set kernel size equal to\n            stride.) Default: (4, 2, 2, 2).\n        out_indices (tuple[int]): Output from which stages.\n            Default: (0, 1, 2, 3).\n        qkv_bias (bool, optional): If True, add a learnable bias to query, key,\n            value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        patch_norm (bool): If add a norm layer for patch embed and patch\n            merging. Default: True.\n        drop_rate (float): Dropout rate. Defaults: 0.\n        attn_drop_rate (float): Attention dropout rate. Default: 0.\n        drop_path_rate (float): Stochastic depth rate. Defaults: 0.1.\n        use_abs_pos_embed (bool): If True, add absolute position embedding to\n            the patch embedding. Defaults: False.\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='GELU').\n        norm_cfg (dict): Config dict for normalization layer at\n            output of backone. Defaults: dict(type='LN').\n        with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n            will save some memory while slowing down the training speed.\n            Default: False.\n        pretrained (str, optional): model pretrained path. Default: None.\n        convert_weights (bool): The flag indicates whether the\n            pre-trained model is from the original repo. We may need\n            to convert some keys to make it compatible.\n            Default: False.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            Default: -1 (-1 means not freezing any parameters).\n        init_cfg (dict, optional): The Config for initialization.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 pretrain_img_size=224,\n                 in_channels=3,\n                 embed_dims=96,\n                 patch_size=4,\n                 window_size=7,\n                 mlp_ratio=4,\n                 depths=(2, 2, 6, 2),\n                 num_heads=(3, 6, 12, 24),\n                 strides=(4, 2, 2, 2),\n                 out_indices=(0, 1, 2, 3),\n                 qkv_bias=True,\n                 qk_scale=None,\n                 patch_norm=True,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.1,\n                 use_abs_pos_embed=False,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 with_cp=False,\n                 pretrained=None,\n                 convert_weights=False,\n                 frozen_stages=-1,\n                 init_cfg=None):\n        self.convert_weights = convert_weights\n        self.frozen_stages = frozen_stages\n        if isinstance(pretrain_img_size, int):\n            pretrain_img_size = to_2tuple(pretrain_img_size)\n        elif isinstance(pretrain_img_size, tuple):\n            if len(pretrain_img_size) == 1:\n                pretrain_img_size = to_2tuple(pretrain_img_size[0])\n            assert len(pretrain_img_size) == 2, \\\n                f'The size of image should have length 1 or 2, ' \\\n                f'but got {len(pretrain_img_size)}'\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            self.init_cfg = init_cfg\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        super(SwinTransformer, self).__init__(init_cfg=init_cfg)\n\n        num_layers = len(depths)\n        self.out_indices = out_indices\n        self.use_abs_pos_embed = use_abs_pos_embed\n\n        assert strides[0] == patch_size, 'Use non-overlapping patch embed.'\n\n        self.patch_embed = PatchEmbed(\n            in_channels=in_channels,\n            embed_dims=embed_dims,\n            conv_type='Conv2d',\n            kernel_size=patch_size,\n            stride=strides[0],\n            norm_cfg=norm_cfg if patch_norm else None,\n            init_cfg=None)\n\n        if self.use_abs_pos_embed:\n            patch_row = pretrain_img_size[0] // patch_size\n            patch_col = pretrain_img_size[1] // patch_size\n            num_patches = patch_row * patch_col\n            self.absolute_pos_embed = nn.Parameter(\n                torch.zeros((1, num_patches, embed_dims)))\n\n        self.drop_after_pos = nn.Dropout(p=drop_rate)\n\n        # set stochastic depth decay rule\n        total_depth = sum(depths)\n        dpr = [\n            x.item() for x in torch.linspace(0, drop_path_rate, total_depth)\n        ]\n\n        self.stages = ModuleList()\n        in_channels = embed_dims\n        for i in range(num_layers):\n            if i < num_layers - 1:\n                downsample = PatchMerging(\n                    in_channels=in_channels,\n                    out_channels=2 * in_channels,\n                    stride=strides[i + 1],\n                    norm_cfg=norm_cfg if patch_norm else None,\n                    init_cfg=None)\n            else:\n                downsample = None\n\n            stage = SwinBlockSequence(\n                embed_dims=in_channels,\n                num_heads=num_heads[i],\n                feedforward_channels=mlp_ratio * in_channels,\n                depth=depths[i],\n                window_size=window_size,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop_rate=drop_rate,\n                attn_drop_rate=attn_drop_rate,\n                drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],\n                downsample=downsample,\n                act_cfg=act_cfg,\n                norm_cfg=norm_cfg,\n                with_cp=with_cp,\n                init_cfg=None)\n            self.stages.append(stage)\n            if downsample:\n                in_channels = downsample.out_channels\n\n        self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)]\n        # Add a norm layer for each output\n        for i in out_indices:\n            layer = build_norm_layer(norm_cfg, self.num_features[i])[1]\n            layer_name = f'norm{i}'\n            self.add_module(layer_name, layer)\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n        super(SwinTransformer, self).train(mode)\n        self._freeze_stages()\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            self.patch_embed.eval()\n            for param in self.patch_embed.parameters():\n                param.requires_grad = False\n            if self.use_abs_pos_embed:\n                self.absolute_pos_embed.requires_grad = False\n            self.drop_after_pos.eval()\n\n        for i in range(1, self.frozen_stages + 1):\n\n            if (i - 1) in self.out_indices:\n                norm_layer = getattr(self, f'norm{i-1}')\n                norm_layer.eval()\n                for param in norm_layer.parameters():\n                    param.requires_grad = False\n\n            m = self.stages[i - 1]\n            m.eval()\n            for param in m.parameters():\n                param.requires_grad = False\n\n    def init_weights(self):\n        logger = MMLogger.get_current_instance()\n        if self.init_cfg is None:\n            logger.warn(f'No pre-trained weights for '\n                        f'{self.__class__.__name__}, '\n                        f'training start from scratch')\n            if self.use_abs_pos_embed:\n                trunc_normal_(self.absolute_pos_embed, std=0.02)\n            for m in self.modules():\n                if isinstance(m, nn.Linear):\n                    trunc_normal_init(m, std=.02, bias=0.)\n                elif isinstance(m, nn.LayerNorm):\n                    constant_init(m, 1.0)\n        else:\n            assert 'checkpoint' in self.init_cfg, f'Only support ' \\\n                                                  f'specify `Pretrained` in ' \\\n                                                  f'`init_cfg` in ' \\\n                                                  f'{self.__class__.__name__} '\n            ckpt = CheckpointLoader.load_checkpoint(\n                self.init_cfg.checkpoint, logger=logger, map_location='cpu')\n            if 'state_dict' in ckpt:\n                _state_dict = ckpt['state_dict']\n            elif 'model' in ckpt:\n                _state_dict = ckpt['model']\n            else:\n                _state_dict = ckpt\n            if self.convert_weights:\n                # supported loading weight from original repo,\n                _state_dict = swin_converter(_state_dict)\n\n            state_dict = OrderedDict()\n            for k, v in _state_dict.items():\n                if k.startswith('backbone.'):\n                    state_dict[k[9:]] = v\n\n            # strip prefix of state_dict\n            if list(state_dict.keys())[0].startswith('module.'):\n                state_dict = {k[7:]: v for k, v in state_dict.items()}\n\n            # reshape absolute position embedding\n            if state_dict.get('absolute_pos_embed') is not None:\n                absolute_pos_embed = state_dict['absolute_pos_embed']\n                N1, L, C1 = absolute_pos_embed.size()\n                N2, C2, H, W = self.absolute_pos_embed.size()\n                if N1 != N2 or C1 != C2 or L != H * W:\n                    logger.warning('Error in loading absolute_pos_embed, pass')\n                else:\n                    state_dict['absolute_pos_embed'] = absolute_pos_embed.view(\n                        N2, H, W, C2).permute(0, 3, 1, 2).contiguous()\n\n            # interpolate position bias table if needed\n            relative_position_bias_table_keys = [\n                k for k in state_dict.keys()\n                if 'relative_position_bias_table' in k\n            ]\n            for table_key in relative_position_bias_table_keys:\n                table_pretrained = state_dict[table_key]\n                table_current = self.state_dict()[table_key]\n                L1, nH1 = table_pretrained.size()\n                L2, nH2 = table_current.size()\n                if nH1 != nH2:\n                    logger.warning(f'Error in loading {table_key}, pass')\n                elif L1 != L2:\n                    S1 = int(L1**0.5)\n                    S2 = int(L2**0.5)\n                    table_pretrained_resized = F.interpolate(\n                        table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1),\n                        size=(S2, S2),\n                        mode='bicubic')\n                    state_dict[table_key] = table_pretrained_resized.view(\n                        nH2, L2).permute(1, 0).contiguous()\n\n            # load state_dict\n            self.load_state_dict(state_dict, False)\n\n    def forward(self, x):\n        x, hw_shape = self.patch_embed(x)\n\n        if self.use_abs_pos_embed:\n            x = x + self.absolute_pos_embed\n        x = self.drop_after_pos(x)\n\n        outs = []\n        for i, stage in enumerate(self.stages):\n            x, hw_shape, out, out_hw_shape = stage(x, hw_shape)\n            if i in self.out_indices:\n                norm_layer = getattr(self, f'norm{i}')\n                out = norm_layer(out)\n                out = out.view(-1, *out_hw_shape,\n                               self.num_features[i]).permute(0, 3, 1,\n                                                             2).contiguous()\n                outs.append(out)\n\n        return outs\n\n\ndef swin_converter(ckpt):\n\n    new_ckpt = OrderedDict()\n\n    def correct_unfold_reduction_order(x):\n        out_channel, in_channel = x.shape\n        x = x.reshape(out_channel, 4, in_channel // 4)\n        x = x[:, [0, 2, 1, 3], :].transpose(1,\n                                            2).reshape(out_channel, in_channel)\n        return x\n\n    def correct_unfold_norm_order(x):\n        in_channel = x.shape[0]\n        x = x.reshape(4, in_channel // 4)\n        x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)\n        return x\n\n    for k, v in ckpt.items():\n        if k.startswith('head'):\n            continue\n        elif k.startswith('layers'):\n            new_v = v\n            if 'attn.' in k:\n                new_k = k.replace('attn.', 'attn.w_msa.')\n            elif 'mlp.' in k:\n                if 'mlp.fc1.' in k:\n                    new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')\n                elif 'mlp.fc2.' in k:\n                    new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')\n                else:\n                    new_k = k.replace('mlp.', 'ffn.')\n            elif 'downsample' in k:\n                new_k = k\n                if 'reduction.' in k:\n                    new_v = correct_unfold_reduction_order(v)\n                elif 'norm.' in k:\n                    new_v = correct_unfold_norm_order(v)\n            else:\n                new_k = k\n            new_k = new_k.replace('layers', 'stages', 1)\n        elif k.startswith('patch_embed'):\n            new_v = v\n            if 'proj' in k:\n                new_k = k.replace('proj', 'projection')\n            else:\n                new_k = k\n        else:\n            new_v = v\n            new_k = k\n\n        new_ckpt['backbone.' + new_k] = new_v\n\n    return new_ckpt\n"
  },
  {
    "path": "mmdet/models/backbones/trident_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmengine.model import BaseModule\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.models.backbones.resnet import Bottleneck, ResNet\nfrom mmdet.registry import MODELS\n\n\nclass TridentConv(BaseModule):\n    \"\"\"Trident Convolution Module.\n\n    Args:\n        in_channels (int): Number of channels in input.\n        out_channels (int): Number of channels in output.\n        kernel_size (int): Size of convolution kernel.\n        stride (int, optional): Convolution stride. Default: 1.\n        trident_dilations (tuple[int, int, int], optional): Dilations of\n            different trident branch. Default: (1, 2, 3).\n        test_branch_idx (int, optional): In inference, all 3 branches will\n            be used if `test_branch_idx==-1`, otherwise only branch with\n            index `test_branch_idx` will be used. Default: 1.\n        bias (bool, optional): Whether to use bias in convolution or not.\n            Default: False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size,\n                 stride=1,\n                 trident_dilations=(1, 2, 3),\n                 test_branch_idx=1,\n                 bias=False,\n                 init_cfg=None):\n        super(TridentConv, self).__init__(init_cfg)\n        self.num_branch = len(trident_dilations)\n        self.with_bias = bias\n        self.test_branch_idx = test_branch_idx\n        self.stride = _pair(stride)\n        self.kernel_size = _pair(kernel_size)\n        self.paddings = _pair(trident_dilations)\n        self.dilations = trident_dilations\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.bias = bias\n\n        self.weight = nn.Parameter(\n            torch.Tensor(out_channels, in_channels, *self.kernel_size))\n        if bias:\n            self.bias = nn.Parameter(torch.Tensor(out_channels))\n        else:\n            self.bias = None\n\n    def extra_repr(self):\n        tmpstr = f'in_channels={self.in_channels}'\n        tmpstr += f', out_channels={self.out_channels}'\n        tmpstr += f', kernel_size={self.kernel_size}'\n        tmpstr += f', num_branch={self.num_branch}'\n        tmpstr += f', test_branch_idx={self.test_branch_idx}'\n        tmpstr += f', stride={self.stride}'\n        tmpstr += f', paddings={self.paddings}'\n        tmpstr += f', dilations={self.dilations}'\n        tmpstr += f', bias={self.bias}'\n        return tmpstr\n\n    def forward(self, inputs):\n        if self.training or self.test_branch_idx == -1:\n            outputs = [\n                F.conv2d(input, self.weight, self.bias, self.stride, padding,\n                         dilation) for input, dilation, padding in zip(\n                             inputs, self.dilations, self.paddings)\n            ]\n        else:\n            assert len(inputs) == 1\n            outputs = [\n                F.conv2d(inputs[0], self.weight, self.bias, self.stride,\n                         self.paddings[self.test_branch_idx],\n                         self.dilations[self.test_branch_idx])\n            ]\n\n        return outputs\n\n\n# Since TridentNet is defined over ResNet50 and ResNet101, here we\n# only support TridentBottleneckBlock.\nclass TridentBottleneck(Bottleneck):\n    \"\"\"BottleBlock for TridentResNet.\n\n    Args:\n        trident_dilations (tuple[int, int, int]): Dilations of different\n            trident branch.\n        test_branch_idx (int): In inference, all 3 branches will be used\n            if `test_branch_idx==-1`, otherwise only branch with index\n            `test_branch_idx` will be used.\n        concat_output (bool): Whether to concat the output list to a Tensor.\n            `True` only in the last Block.\n    \"\"\"\n\n    def __init__(self, trident_dilations, test_branch_idx, concat_output,\n                 **kwargs):\n\n        super(TridentBottleneck, self).__init__(**kwargs)\n        self.trident_dilations = trident_dilations\n        self.num_branch = len(trident_dilations)\n        self.concat_output = concat_output\n        self.test_branch_idx = test_branch_idx\n        self.conv2 = TridentConv(\n            self.planes,\n            self.planes,\n            kernel_size=3,\n            stride=self.conv2_stride,\n            bias=False,\n            trident_dilations=self.trident_dilations,\n            test_branch_idx=test_branch_idx,\n            init_cfg=dict(\n                type='Kaiming',\n                distribution='uniform',\n                mode='fan_in',\n                override=dict(name='conv2')))\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            num_branch = (\n                self.num_branch\n                if self.training or self.test_branch_idx == -1 else 1)\n            identity = x\n            if not isinstance(x, list):\n                x = (x, ) * num_branch\n                identity = x\n                if self.downsample is not None:\n                    identity = [self.downsample(b) for b in x]\n\n            out = [self.conv1(b) for b in x]\n            out = [self.norm1(b) for b in out]\n            out = [self.relu(b) for b in out]\n\n            if self.with_plugins:\n                for k in range(len(out)):\n                    out[k] = self.forward_plugin(out[k],\n                                                 self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n            out = [self.norm2(b) for b in out]\n            out = [self.relu(b) for b in out]\n            if self.with_plugins:\n                for k in range(len(out)):\n                    out[k] = self.forward_plugin(out[k],\n                                                 self.after_conv2_plugin_names)\n\n            out = [self.conv3(b) for b in out]\n            out = [self.norm3(b) for b in out]\n\n            if self.with_plugins:\n                for k in range(len(out)):\n                    out[k] = self.forward_plugin(out[k],\n                                                 self.after_conv3_plugin_names)\n\n            out = [\n                out_b + identity_b for out_b, identity_b in zip(out, identity)\n            ]\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = [self.relu(b) for b in out]\n        if self.concat_output:\n            out = torch.cat(out, dim=0)\n        return out\n\n\ndef make_trident_res_layer(block,\n                           inplanes,\n                           planes,\n                           num_blocks,\n                           stride=1,\n                           trident_dilations=(1, 2, 3),\n                           style='pytorch',\n                           with_cp=False,\n                           conv_cfg=None,\n                           norm_cfg=dict(type='BN'),\n                           dcn=None,\n                           plugins=None,\n                           test_branch_idx=-1):\n    \"\"\"Build Trident Res Layers.\"\"\"\n\n    downsample = None\n    if stride != 1 or inplanes != planes * block.expansion:\n        downsample = []\n        conv_stride = stride\n        downsample.extend([\n            build_conv_layer(\n                conv_cfg,\n                inplanes,\n                planes * block.expansion,\n                kernel_size=1,\n                stride=conv_stride,\n                bias=False),\n            build_norm_layer(norm_cfg, planes * block.expansion)[1]\n        ])\n        downsample = nn.Sequential(*downsample)\n\n    layers = []\n    for i in range(num_blocks):\n        layers.append(\n            block(\n                inplanes=inplanes,\n                planes=planes,\n                stride=stride if i == 0 else 1,\n                trident_dilations=trident_dilations,\n                downsample=downsample if i == 0 else None,\n                style=style,\n                with_cp=with_cp,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                dcn=dcn,\n                plugins=plugins,\n                test_branch_idx=test_branch_idx,\n                concat_output=True if i == num_blocks - 1 else False))\n        inplanes = planes * block.expansion\n    return nn.Sequential(*layers)\n\n\n@MODELS.register_module()\nclass TridentResNet(ResNet):\n    \"\"\"The stem layer, stage 1 and stage 2 in Trident ResNet are identical to\n    ResNet, while in stage 3, Trident BottleBlock is utilized to replace the\n    normal BottleBlock to yield trident output. Different branch shares the\n    convolution weight but uses different dilations to achieve multi-scale\n    output.\n\n                               / stage3(b0) \\\n    x - stem - stage1 - stage2 - stage3(b1) - output\n                               \\ stage3(b2) /\n\n    Args:\n        depth (int): Depth of resnet, from {50, 101, 152}.\n        num_branch (int): Number of branches in TridentNet.\n        test_branch_idx (int): In inference, all 3 branches will be used\n            if `test_branch_idx==-1`, otherwise only branch with index\n            `test_branch_idx` will be used.\n        trident_dilations (tuple[int]): Dilations of different trident branch.\n            len(trident_dilations) should be equal to num_branch.\n    \"\"\"  # noqa\n\n    def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,\n                 **kwargs):\n\n        assert num_branch == len(trident_dilations)\n        assert depth in (50, 101, 152)\n        super(TridentResNet, self).__init__(depth, **kwargs)\n        assert self.num_stages == 3\n        self.test_branch_idx = test_branch_idx\n        self.num_branch = num_branch\n\n        last_stage_idx = self.num_stages - 1\n        stride = self.strides[last_stage_idx]\n        dilation = trident_dilations\n        dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None\n        if self.plugins is not None:\n            stage_plugins = self.make_stage_plugins(self.plugins,\n                                                    last_stage_idx)\n        else:\n            stage_plugins = None\n        planes = self.base_channels * 2**last_stage_idx\n        res_layer = make_trident_res_layer(\n            TridentBottleneck,\n            inplanes=(self.block.expansion * self.base_channels *\n                      2**(last_stage_idx - 1)),\n            planes=planes,\n            num_blocks=self.stage_blocks[last_stage_idx],\n            stride=stride,\n            trident_dilations=dilation,\n            style=self.style,\n            with_cp=self.with_cp,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            dcn=dcn,\n            plugins=stage_plugins,\n            test_branch_idx=self.test_branch_idx)\n\n        layer_name = f'layer{last_stage_idx + 1}'\n\n        self.__setattr__(layer_name, res_layer)\n        self.res_layers.pop(last_stage_idx)\n        self.res_layers.insert(last_stage_idx, layer_name)\n\n        self._freeze_stages()\n"
  },
  {
    "path": "mmdet/models/data_preprocessors/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .data_preprocessor import (BatchFixedSizePad, BatchResize,\n                                BatchSyncRandomResize, BoxInstDataPreprocessor,\n                                DetDataPreprocessor,\n                                MultiBranchDataPreprocessor)\n\n__all__ = [\n    'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',\n    'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor'\n]\n"
  },
  {
    "path": "mmdet/models/data_preprocessors/data_preprocessor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport random\nfrom numbers import Number\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.dist import barrier, broadcast, get_dist_info\nfrom mmengine.logging import MessageHub\nfrom mmengine.model import BaseDataPreprocessor, ImgDataPreprocessor\nfrom mmengine.structures import PixelData\nfrom mmengine.utils import is_seq_of\nfrom torch import Tensor\n\nfrom mmdet.models.utils import unfold_wo_center\nfrom mmdet.models.utils.misc import samplelist_boxtype2tensor\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.mask import BitmapMasks\nfrom mmdet.utils import ConfigType\n\ntry:\n    import skimage\nexcept ImportError:\n    skimage = None\n\n\n@MODELS.register_module()\nclass DetDataPreprocessor(ImgDataPreprocessor):\n    \"\"\"Image pre-processor for detection tasks.\n\n    Comparing with the :class:`mmengine.ImgDataPreprocessor`,\n\n    1. It supports batch augmentations.\n    2. It will additionally append batch_input_shape and pad_shape\n    to data_samples considering the object detection task.\n\n    It provides the data pre-processing as follows\n\n    - Collate and move data to the target device.\n    - Pad inputs to the maximum size of current batch with defined\n      ``pad_value``. The padding size can be divisible by a defined\n      ``pad_size_divisor``\n    - Stack inputs to batch_inputs.\n    - Convert inputs from bgr to rgb if the shape of input is (3, H, W).\n    - Normalize image with defined std and mean.\n    - Do batch augmentations during training.\n\n    Args:\n        mean (Sequence[Number], optional): The pixel mean of R, G, B channels.\n            Defaults to None.\n        std (Sequence[Number], optional): The pixel standard deviation of\n            R, G, B channels. Defaults to None.\n        pad_size_divisor (int): The size of padded image should be\n            divisible by ``pad_size_divisor``. Defaults to 1.\n        pad_value (Number): The padded pixel value. Defaults to 0.\n        pad_mask (bool): Whether to pad instance masks. Defaults to False.\n        mask_pad_value (int): The padded pixel value for instance masks.\n            Defaults to 0.\n        pad_seg (bool): Whether to pad semantic segmentation maps.\n            Defaults to False.\n        seg_pad_value (int): The padded pixel value for semantic\n            segmentation maps. Defaults to 255.\n        bgr_to_rgb (bool): whether to convert image from BGR to RGB.\n            Defaults to False.\n        rgb_to_bgr (bool): whether to convert image from RGB to RGB.\n            Defaults to False.\n        boxtype2tensor (bool): Whether to keep the ``BaseBoxes`` type of\n            bboxes data or not. Defaults to True.\n        non_blocking (bool): Whether block current process\n            when transferring data to device. Defaults to False.\n        batch_augments (list[dict], optional): Batch-level augmentations\n    \"\"\"\n\n    def __init__(self,\n                 mean: Sequence[Number] = None,\n                 std: Sequence[Number] = None,\n                 pad_size_divisor: int = 1,\n                 pad_value: Union[float, int] = 0,\n                 pad_mask: bool = False,\n                 mask_pad_value: int = 0,\n                 pad_seg: bool = False,\n                 seg_pad_value: int = 255,\n                 bgr_to_rgb: bool = False,\n                 rgb_to_bgr: bool = False,\n                 boxtype2tensor: bool = True,\n                 non_blocking: Optional[bool] = False,\n                 batch_augments: Optional[List[dict]] = None):\n        super().__init__(\n            mean=mean,\n            std=std,\n            pad_size_divisor=pad_size_divisor,\n            pad_value=pad_value,\n            bgr_to_rgb=bgr_to_rgb,\n            rgb_to_bgr=rgb_to_bgr,\n            non_blocking=non_blocking)\n        if batch_augments is not None:\n            self.batch_augments = nn.ModuleList(\n                [MODELS.build(aug) for aug in batch_augments])\n        else:\n            self.batch_augments = None\n        self.pad_mask = pad_mask\n        self.mask_pad_value = mask_pad_value\n        self.pad_seg = pad_seg\n        self.seg_pad_value = seg_pad_value\n        self.boxtype2tensor = boxtype2tensor\n\n    def forward(self, data: dict, training: bool = False) -> dict:\n        \"\"\"Perform normalization、padding and bgr2rgb conversion based on\n        ``BaseDataPreprocessor``.\n\n        Args:\n            data (dict): Data sampled from dataloader.\n            training (bool): Whether to enable training time augmentation.\n\n        Returns:\n            dict: Data in the same format as the model input.\n        \"\"\"\n        batch_pad_shape = self._get_pad_shape(data)\n        data = super().forward(data=data, training=training)\n        inputs, data_samples = data['inputs'], data['data_samples']\n\n        if data_samples is not None:\n            # NOTE the batched image size information may be useful, e.g.\n            # in DETR, this is needed for the construction of masks, which is\n            # then used for the transformer_head.\n            batch_input_shape = tuple(inputs[0].size()[-2:])\n            for data_sample, pad_shape in zip(data_samples, batch_pad_shape):\n                data_sample.set_metainfo({\n                    'batch_input_shape': batch_input_shape,\n                    'pad_shape': pad_shape\n                })\n\n            if self.boxtype2tensor:\n                samplelist_boxtype2tensor(data_samples)\n\n            if self.pad_mask and training:\n                self.pad_gt_masks(data_samples)\n\n            if self.pad_seg and training:\n                self.pad_gt_sem_seg(data_samples)\n\n        if training and self.batch_augments is not None:\n            for batch_aug in self.batch_augments:\n                inputs, data_samples = batch_aug(inputs, data_samples)\n\n        return {'inputs': inputs, 'data_samples': data_samples}\n\n    def _get_pad_shape(self, data: dict) -> List[tuple]:\n        \"\"\"Get the pad_shape of each image based on data and\n        pad_size_divisor.\"\"\"\n        _batch_inputs = data['inputs']\n        # Process data with `pseudo_collate`.\n        if is_seq_of(_batch_inputs, torch.Tensor):\n            batch_pad_shape = []\n            for ori_input in _batch_inputs:\n                pad_h = int(\n                    np.ceil(ori_input.shape[1] /\n                            self.pad_size_divisor)) * self.pad_size_divisor\n                pad_w = int(\n                    np.ceil(ori_input.shape[2] /\n                            self.pad_size_divisor)) * self.pad_size_divisor\n                batch_pad_shape.append((pad_h, pad_w))\n        # Process data with `default_collate`.\n        elif isinstance(_batch_inputs, torch.Tensor):\n            assert _batch_inputs.dim() == 4, (\n                'The input of `ImgDataPreprocessor` should be a NCHW tensor '\n                'or a list of tensor, but got a tensor with shape: '\n                f'{_batch_inputs.shape}')\n            pad_h = int(\n                np.ceil(_batch_inputs.shape[1] /\n                        self.pad_size_divisor)) * self.pad_size_divisor\n            pad_w = int(\n                np.ceil(_batch_inputs.shape[2] /\n                        self.pad_size_divisor)) * self.pad_size_divisor\n            batch_pad_shape = [(pad_h, pad_w)] * _batch_inputs.shape[0]\n        else:\n            raise TypeError('Output of `cast_data` should be a dict '\n                            'or a tuple with inputs and data_samples, but got'\n                            f'{type(data)}： {data}')\n        return batch_pad_shape\n\n    def pad_gt_masks(self,\n                     batch_data_samples: Sequence[DetDataSample]) -> None:\n        \"\"\"Pad gt_masks to shape of batch_input_shape.\"\"\"\n        if 'masks' in batch_data_samples[0].gt_instances:\n            for data_samples in batch_data_samples:\n                masks = data_samples.gt_instances.masks\n                data_samples.gt_instances.masks = masks.pad(\n                    data_samples.batch_input_shape,\n                    pad_val=self.mask_pad_value)\n\n    def pad_gt_sem_seg(self,\n                       batch_data_samples: Sequence[DetDataSample]) -> None:\n        \"\"\"Pad gt_sem_seg to shape of batch_input_shape.\"\"\"\n        if 'gt_sem_seg' in batch_data_samples[0]:\n            for data_samples in batch_data_samples:\n                gt_sem_seg = data_samples.gt_sem_seg.sem_seg\n                h, w = gt_sem_seg.shape[-2:]\n                pad_h, pad_w = data_samples.batch_input_shape\n                gt_sem_seg = F.pad(\n                    gt_sem_seg,\n                    pad=(0, max(pad_w - w, 0), 0, max(pad_h - h, 0)),\n                    mode='constant',\n                    value=self.seg_pad_value)\n                data_samples.gt_sem_seg = PixelData(sem_seg=gt_sem_seg)\n\n\n@MODELS.register_module()\nclass BatchSyncRandomResize(nn.Module):\n    \"\"\"Batch random resize which synchronizes the random size across ranks.\n\n    Args:\n        random_size_range (tuple): The multi-scale random range during\n            multi-scale training.\n        interval (int): The iter interval of change\n            image size. Defaults to 10.\n        size_divisor (int): Image size divisible factor.\n            Defaults to 32.\n    \"\"\"\n\n    def __init__(self,\n                 random_size_range: Tuple[int, int],\n                 interval: int = 10,\n                 size_divisor: int = 32) -> None:\n        super().__init__()\n        self.rank, self.world_size = get_dist_info()\n        self._input_size = None\n        self._random_size_range = (round(random_size_range[0] / size_divisor),\n                                   round(random_size_range[1] / size_divisor))\n        self._interval = interval\n        self._size_divisor = size_divisor\n\n    def forward(\n        self, inputs: Tensor, data_samples: List[DetDataSample]\n    ) -> Tuple[Tensor, List[DetDataSample]]:\n        \"\"\"resize a batch of images and bboxes to shape ``self._input_size``\"\"\"\n        h, w = inputs.shape[-2:]\n        if self._input_size is None:\n            self._input_size = (h, w)\n        scale_y = self._input_size[0] / h\n        scale_x = self._input_size[1] / w\n        if scale_x != 1 or scale_y != 1:\n            inputs = F.interpolate(\n                inputs,\n                size=self._input_size,\n                mode='bilinear',\n                align_corners=False)\n            for data_sample in data_samples:\n                img_shape = (int(data_sample.img_shape[0] * scale_y),\n                             int(data_sample.img_shape[1] * scale_x))\n                pad_shape = (int(data_sample.pad_shape[0] * scale_y),\n                             int(data_sample.pad_shape[1] * scale_x))\n                data_sample.set_metainfo({\n                    'img_shape': img_shape,\n                    'pad_shape': pad_shape,\n                    'batch_input_shape': self._input_size\n                })\n                data_sample.gt_instances.bboxes[\n                    ...,\n                    0::2] = data_sample.gt_instances.bboxes[...,\n                                                            0::2] * scale_x\n                data_sample.gt_instances.bboxes[\n                    ...,\n                    1::2] = data_sample.gt_instances.bboxes[...,\n                                                            1::2] * scale_y\n                if 'ignored_instances' in data_sample:\n                    data_sample.ignored_instances.bboxes[\n                        ..., 0::2] = data_sample.ignored_instances.bboxes[\n                            ..., 0::2] * scale_x\n                    data_sample.ignored_instances.bboxes[\n                        ..., 1::2] = data_sample.ignored_instances.bboxes[\n                            ..., 1::2] * scale_y\n        message_hub = MessageHub.get_current_instance()\n        if (message_hub.get_info('iter') + 1) % self._interval == 0:\n            self._input_size = self._get_random_size(\n                aspect_ratio=float(w / h), device=inputs.device)\n        return inputs, data_samples\n\n    def _get_random_size(self, aspect_ratio: float,\n                         device: torch.device) -> Tuple[int, int]:\n        \"\"\"Randomly generate a shape in ``_random_size_range`` and broadcast to\n        all ranks.\"\"\"\n        tensor = torch.LongTensor(2).to(device)\n        if self.rank == 0:\n            size = random.randint(*self._random_size_range)\n            size = (self._size_divisor * size,\n                    self._size_divisor * int(aspect_ratio * size))\n            tensor[0] = size[0]\n            tensor[1] = size[1]\n        barrier()\n        broadcast(tensor, 0)\n        input_size = (tensor[0].item(), tensor[1].item())\n        return input_size\n\n\n@MODELS.register_module()\nclass BatchFixedSizePad(nn.Module):\n    \"\"\"Fixed size padding for batch images.\n\n    Args:\n        size (Tuple[int, int]): Fixed padding size. Expected padding\n            shape (h, w). Defaults to None.\n        img_pad_value (int): The padded pixel value for images.\n            Defaults to 0.\n        pad_mask (bool): Whether to pad instance masks. Defaults to False.\n        mask_pad_value (int): The padded pixel value for instance masks.\n            Defaults to 0.\n        pad_seg (bool): Whether to pad semantic segmentation maps.\n            Defaults to False.\n        seg_pad_value (int): The padded pixel value for semantic\n            segmentation maps. Defaults to 255.\n    \"\"\"\n\n    def __init__(self,\n                 size: Tuple[int, int],\n                 img_pad_value: int = 0,\n                 pad_mask: bool = False,\n                 mask_pad_value: int = 0,\n                 pad_seg: bool = False,\n                 seg_pad_value: int = 255) -> None:\n        super().__init__()\n        self.size = size\n        self.pad_mask = pad_mask\n        self.pad_seg = pad_seg\n        self.img_pad_value = img_pad_value\n        self.mask_pad_value = mask_pad_value\n        self.seg_pad_value = seg_pad_value\n\n    def forward(\n        self,\n        inputs: Tensor,\n        data_samples: Optional[List[dict]] = None\n    ) -> Tuple[Tensor, Optional[List[dict]]]:\n        \"\"\"Pad image, instance masks, segmantic segmentation maps.\"\"\"\n        src_h, src_w = inputs.shape[-2:]\n        dst_h, dst_w = self.size\n\n        if src_h >= dst_h and src_w >= dst_w:\n            return inputs, data_samples\n\n        inputs = F.pad(\n            inputs,\n            pad=(0, max(0, dst_w - src_w), 0, max(0, dst_h - src_h)),\n            mode='constant',\n            value=self.img_pad_value)\n\n        if data_samples is not None:\n            # update batch_input_shape\n            for data_sample in data_samples:\n                data_sample.set_metainfo({\n                    'batch_input_shape': (dst_h, dst_w),\n                    'pad_shape': (dst_h, dst_w)\n                })\n\n            if self.pad_mask:\n                for data_sample in data_samples:\n                    masks = data_sample.gt_instances.masks\n                    data_sample.gt_instances.masks = masks.pad(\n                        (dst_h, dst_w), pad_val=self.mask_pad_value)\n\n            if self.pad_seg:\n                for data_sample in data_samples:\n                    gt_sem_seg = data_sample.gt_sem_seg.sem_seg\n                    h, w = gt_sem_seg.shape[-2:]\n                    gt_sem_seg = F.pad(\n                        gt_sem_seg,\n                        pad=(0, max(0, dst_w - w), 0, max(0, dst_h - h)),\n                        mode='constant',\n                        value=self.seg_pad_value)\n                    data_sample.gt_sem_seg = PixelData(sem_seg=gt_sem_seg)\n\n        return inputs, data_samples\n\n\n@MODELS.register_module()\nclass MultiBranchDataPreprocessor(BaseDataPreprocessor):\n    \"\"\"DataPreprocessor wrapper for multi-branch data.\n\n    Take semi-supervised object detection as an example, assume that\n    the ratio of labeled data and unlabeled data in a batch is 1:2,\n    `sup` indicates the branch where the labeled data is augmented,\n    `unsup_teacher` and `unsup_student` indicate the branches where\n    the unlabeled data is augmented by different pipeline.\n\n    The input format of multi-branch data is shown as below :\n\n    .. code-block:: none\n        {\n            'inputs':\n                {\n                    'sup': [Tensor, None, None],\n                    'unsup_teacher': [None, Tensor, Tensor],\n                    'unsup_student': [None, Tensor, Tensor],\n                },\n            'data_sample':\n                {\n                    'sup': [DetDataSample, None, None],\n                    'unsup_teacher': [None, DetDataSample, DetDataSample],\n                    'unsup_student': [NOne, DetDataSample, DetDataSample],\n                }\n        }\n\n    The format of multi-branch data\n    after filtering None is shown as below :\n\n    .. code-block:: none\n        {\n            'inputs':\n                {\n                    'sup': [Tensor],\n                    'unsup_teacher': [Tensor, Tensor],\n                    'unsup_student': [Tensor, Tensor],\n                },\n            'data_sample':\n                {\n                    'sup': [DetDataSample],\n                    'unsup_teacher': [DetDataSample, DetDataSample],\n                    'unsup_student': [DetDataSample, DetDataSample],\n                }\n        }\n\n    In order to reuse `DetDataPreprocessor` for the data\n    from different branches, the format of multi-branch data\n    grouped by branch is as below :\n\n    .. code-block:: none\n        {\n            'sup':\n                {\n                    'inputs': [Tensor]\n                    'data_sample': [DetDataSample, DetDataSample]\n                },\n            'unsup_teacher':\n                {\n                    'inputs': [Tensor, Tensor]\n                    'data_sample': [DetDataSample, DetDataSample]\n                },\n            'unsup_student':\n                {\n                    'inputs': [Tensor, Tensor]\n                    'data_sample': [DetDataSample, DetDataSample]\n                },\n        }\n\n    After preprocessing data from different branches,\n    the multi-branch data needs to be reformatted as:\n\n    .. code-block:: none\n        {\n            'inputs':\n                {\n                    'sup': [Tensor],\n                    'unsup_teacher': [Tensor, Tensor],\n                    'unsup_student': [Tensor, Tensor],\n                },\n            'data_sample':\n                {\n                    'sup': [DetDataSample],\n                    'unsup_teacher': [DetDataSample, DetDataSample],\n                    'unsup_student': [DetDataSample, DetDataSample],\n                }\n        }\n\n    Args:\n        data_preprocessor (:obj:`ConfigDict` or dict): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n    \"\"\"\n\n    def __init__(self, data_preprocessor: ConfigType) -> None:\n        super().__init__()\n        self.data_preprocessor = MODELS.build(data_preprocessor)\n\n    def forward(self, data: dict, training: bool = False) -> dict:\n        \"\"\"Perform normalization、padding and bgr2rgb conversion based on\n        ``BaseDataPreprocessor`` for multi-branch data.\n\n        Args:\n            data (dict): Data sampled from dataloader.\n            training (bool): Whether to enable training time augmentation.\n\n        Returns:\n            dict:\n\n            - 'inputs' (Dict[str, obj:`torch.Tensor`]): The forward data of\n                models from different branches.\n            - 'data_sample' (Dict[str, obj:`DetDataSample`]): The annotation\n                info of the sample from different branches.\n        \"\"\"\n\n        if training is False:\n            return self.data_preprocessor(data, training)\n\n        # Filter out branches with a value of None\n        for key in data.keys():\n            for branch in data[key].keys():\n                data[key][branch] = list(\n                    filter(lambda x: x is not None, data[key][branch]))\n\n        # Group data by branch\n        multi_branch_data = {}\n        for key in data.keys():\n            for branch in data[key].keys():\n                if multi_branch_data.get(branch, None) is None:\n                    multi_branch_data[branch] = {key: data[key][branch]}\n                elif multi_branch_data[branch].get(key, None) is None:\n                    multi_branch_data[branch][key] = data[key][branch]\n                else:\n                    multi_branch_data[branch][key].append(data[key][branch])\n\n        # Preprocess data from different branches\n        for branch, _data in multi_branch_data.items():\n            multi_branch_data[branch] = self.data_preprocessor(_data, training)\n\n        # Format data by inputs and data_samples\n        format_data = {}\n        for branch in multi_branch_data.keys():\n            for key in multi_branch_data[branch].keys():\n                if format_data.get(key, None) is None:\n                    format_data[key] = {branch: multi_branch_data[branch][key]}\n                elif format_data[key].get(branch, None) is None:\n                    format_data[key][branch] = multi_branch_data[branch][key]\n                else:\n                    format_data[key][branch].append(\n                        multi_branch_data[branch][key])\n\n        return format_data\n\n    @property\n    def device(self):\n        return self.data_preprocessor.device\n\n    def to(self, device: Optional[Union[int, torch.device]], *args,\n           **kwargs) -> nn.Module:\n        \"\"\"Overrides this method to set the :attr:`device`\n\n        Args:\n            device (int or torch.device, optional): The desired device of the\n                parameters and buffers in this module.\n\n        Returns:\n            nn.Module: The model itself.\n        \"\"\"\n\n        return self.data_preprocessor.to(device, *args, **kwargs)\n\n    def cuda(self, *args, **kwargs) -> nn.Module:\n        \"\"\"Overrides this method to set the :attr:`device`\n\n        Returns:\n            nn.Module: The model itself.\n        \"\"\"\n\n        return self.data_preprocessor.cuda(*args, **kwargs)\n\n    def cpu(self, *args, **kwargs) -> nn.Module:\n        \"\"\"Overrides this method to set the :attr:`device`\n\n        Returns:\n            nn.Module: The model itself.\n        \"\"\"\n\n        return self.data_preprocessor.cpu(*args, **kwargs)\n\n\n@MODELS.register_module()\nclass BatchResize(nn.Module):\n    \"\"\"Batch resize during training. This implementation is modified from\n    https://github.com/Purkialo/CrowdDet/blob/master/lib/data/CrowdHuman.py.\n\n    It provides the data pre-processing as follows:\n    - A batch of all images will pad to a uniform size and stack them into\n      a torch.Tensor by `DetDataPreprocessor`.\n    - `BatchFixShapeResize` resize all images to the target size.\n    - Padding images to make sure the size of image can be divisible by\n      ``pad_size_divisor``.\n\n    Args:\n        scale (tuple): Images scales for resizing.\n        pad_size_divisor (int): Image size divisible factor.\n            Defaults to 1.\n        pad_value (Number): The padded pixel value. Defaults to 0.\n    \"\"\"\n\n    def __init__(\n        self,\n        scale: tuple,\n        pad_size_divisor: int = 1,\n        pad_value: Union[float, int] = 0,\n    ) -> None:\n        super().__init__()\n        self.min_size = min(scale)\n        self.max_size = max(scale)\n        self.pad_size_divisor = pad_size_divisor\n        self.pad_value = pad_value\n\n    def forward(\n        self, inputs: Tensor, data_samples: List[DetDataSample]\n    ) -> Tuple[Tensor, List[DetDataSample]]:\n        \"\"\"resize a batch of images and bboxes.\"\"\"\n\n        batch_height, batch_width = inputs.shape[-2:]\n        target_height, target_width, scale = self.get_target_size(\n            batch_height, batch_width)\n\n        inputs = F.interpolate(\n            inputs,\n            size=(target_height, target_width),\n            mode='bilinear',\n            align_corners=False)\n\n        inputs = self.get_padded_tensor(inputs, self.pad_value)\n\n        if data_samples is not None:\n            batch_input_shape = tuple(inputs.size()[-2:])\n            for data_sample in data_samples:\n                img_shape = [\n                    int(scale * _) for _ in list(data_sample.img_shape)\n                ]\n                data_sample.set_metainfo({\n                    'img_shape': tuple(img_shape),\n                    'batch_input_shape': batch_input_shape,\n                    'pad_shape': batch_input_shape,\n                    'scale_factor': (scale, scale)\n                })\n\n                data_sample.gt_instances.bboxes *= scale\n                data_sample.ignored_instances.bboxes *= scale\n\n        return inputs, data_samples\n\n    def get_target_size(self, height: int,\n                        width: int) -> Tuple[int, int, float]:\n        \"\"\"Get the target size of a batch of images based on data and scale.\"\"\"\n        im_size_min = np.min([height, width])\n        im_size_max = np.max([height, width])\n        scale = self.min_size / im_size_min\n        if scale * im_size_max > self.max_size:\n            scale = self.max_size / im_size_max\n        target_height, target_width = int(round(height * scale)), int(\n            round(width * scale))\n        return target_height, target_width, scale\n\n    def get_padded_tensor(self, tensor: Tensor, pad_value: int) -> Tensor:\n        \"\"\"Pad images according to pad_size_divisor.\"\"\"\n        assert tensor.ndim == 4\n        target_height, target_width = tensor.shape[-2], tensor.shape[-1]\n        divisor = self.pad_size_divisor\n        padded_height = (target_height + divisor - 1) // divisor * divisor\n        padded_width = (target_width + divisor - 1) // divisor * divisor\n        padded_tensor = torch.ones([\n            tensor.shape[0], tensor.shape[1], padded_height, padded_width\n        ]) * pad_value\n        padded_tensor = padded_tensor.type_as(tensor)\n        padded_tensor[:, :, :target_height, :target_width] = tensor\n        return padded_tensor\n\n\n@MODELS.register_module()\nclass BoxInstDataPreprocessor(DetDataPreprocessor):\n    \"\"\"Pseudo mask pre-processor for BoxInst.\n\n    Comparing with the :class:`mmdet.DetDataPreprocessor`,\n\n    1. It generates masks using box annotations.\n    2. It computes the images color similarity in LAB color space.\n\n    Args:\n        mask_stride (int): The mask output stride in boxinst. Defaults to 4.\n        pairwise_size (int): The size of neighborhood for each pixel.\n            Defaults to 3.\n        pairwise_dilation (int): The dilation of neighborhood for each pixel.\n            Defaults to 2.\n        pairwise_color_thresh (float): The thresh of image color similarity.\n            Defaults to 0.3.\n        bottom_pixels_removed (int): The length of removed pixels in bottom.\n            It is caused by the annotation error in coco dataset.\n            Defaults to 10.\n    \"\"\"\n\n    def __init__(self,\n                 *arg,\n                 mask_stride: int = 4,\n                 pairwise_size: int = 3,\n                 pairwise_dilation: int = 2,\n                 pairwise_color_thresh: float = 0.3,\n                 bottom_pixels_removed: int = 10,\n                 **kwargs) -> None:\n        super().__init__(*arg, **kwargs)\n        self.mask_stride = mask_stride\n        self.pairwise_size = pairwise_size\n        self.pairwise_dilation = pairwise_dilation\n        self.pairwise_color_thresh = pairwise_color_thresh\n        self.bottom_pixels_removed = bottom_pixels_removed\n\n        if skimage is None:\n            raise RuntimeError('skimage is not installed,\\\n                 please install it by: pip install scikit-image')\n\n    def get_images_color_similarity(self, inputs: Tensor,\n                                    image_masks: Tensor) -> Tensor:\n        \"\"\"Compute the image color similarity in LAB color space.\"\"\"\n        assert inputs.dim() == 4\n        assert inputs.size(0) == 1\n\n        unfolded_images = unfold_wo_center(\n            inputs,\n            kernel_size=self.pairwise_size,\n            dilation=self.pairwise_dilation)\n        diff = inputs[:, :, None] - unfolded_images\n        similarity = torch.exp(-torch.norm(diff, dim=1) * 0.5)\n\n        unfolded_weights = unfold_wo_center(\n            image_masks[None, None],\n            kernel_size=self.pairwise_size,\n            dilation=self.pairwise_dilation)\n        unfolded_weights = torch.max(unfolded_weights, dim=1)[0]\n\n        return similarity * unfolded_weights\n\n    def forward(self, data: dict, training: bool = False) -> dict:\n        \"\"\"Get pseudo mask labels using color similarity.\"\"\"\n        det_data = super().forward(data, training)\n        inputs, data_samples = det_data['inputs'], det_data['data_samples']\n\n        if training:\n            # get image masks and remove bottom pixels\n            b_img_h, b_img_w = data_samples[0].batch_input_shape\n            img_masks = []\n            for i in range(inputs.shape[0]):\n                img_h, img_w = data_samples[i].img_shape\n                img_mask = inputs.new_ones((img_h, img_w))\n                pixels_removed = int(self.bottom_pixels_removed *\n                                     float(img_h) / float(b_img_h))\n                if pixels_removed > 0:\n                    img_mask[-pixels_removed:, :] = 0\n                pad_w = b_img_w - img_w\n                pad_h = b_img_h - img_h\n                img_mask = F.pad(img_mask, (0, pad_w, 0, pad_h), 'constant',\n                                 0.)\n                img_masks.append(img_mask)\n            img_masks = torch.stack(img_masks, dim=0)\n            start = int(self.mask_stride // 2)\n            img_masks = img_masks[:, start::self.mask_stride,\n                                  start::self.mask_stride]\n\n            # Get origin rgb image for color similarity\n            ori_imgs = inputs * self.std + self.mean\n            downsampled_imgs = F.avg_pool2d(\n                ori_imgs.float(),\n                kernel_size=self.mask_stride,\n                stride=self.mask_stride,\n                padding=0)\n\n            # Compute color similarity for pseudo mask generation\n            for im_i, data_sample in enumerate(data_samples):\n                # TODO: Support rgb2lab in mmengine?\n                images_lab = skimage.color.rgb2lab(\n                    downsampled_imgs[im_i].byte().permute(1, 2,\n                                                          0).cpu().numpy())\n                images_lab = torch.as_tensor(\n                    images_lab, device=ori_imgs.device, dtype=torch.float32)\n                images_lab = images_lab.permute(2, 0, 1)[None]\n                images_color_similarity = self.get_images_color_similarity(\n                    images_lab, img_masks[im_i])\n                pairwise_mask = (images_color_similarity >=\n                                 self.pairwise_color_thresh).float()\n\n                per_im_bboxes = data_sample.gt_instances.bboxes\n                if per_im_bboxes.shape[0] > 0:\n                    per_im_masks = []\n                    for per_box in per_im_bboxes:\n                        mask_full = torch.zeros((b_img_h, b_img_w),\n                                                device=self.device).float()\n                        mask_full[int(per_box[1]):int(per_box[3] + 1),\n                                  int(per_box[0]):int(per_box[2] + 1)] = 1.0\n                        per_im_masks.append(mask_full)\n                    per_im_masks = torch.stack(per_im_masks, dim=0)\n                    pairwise_masks = torch.cat(\n                        [pairwise_mask for _ in range(per_im_bboxes.shape[0])],\n                        dim=0)\n                else:\n                    per_im_masks = torch.zeros((0, b_img_h, b_img_w))\n                    pairwise_masks = torch.zeros(\n                        (0, self.pairwise_size**2 - 1, b_img_h, b_img_w))\n\n                # TODO: Support BitmapMasks with tensor?\n                data_sample.gt_instances.masks = BitmapMasks(\n                    per_im_masks.cpu().numpy(), b_img_h, b_img_w)\n                data_sample.gt_instances.pairwise_masks = pairwise_masks\n        return {'inputs': inputs, 'data_samples': data_samples}\n"
  },
  {
    "path": "mmdet/models/dense_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor_free_head import AnchorFreeHead\nfrom .anchor_head import AnchorHead\nfrom .atss_head import ATSSHead\nfrom .autoassign_head import AutoAssignHead\nfrom .boxinst_head import BoxInstBboxHead, BoxInstMaskHead\nfrom .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead\nfrom .centernet_head import CenterNetHead\nfrom .centernet_update_head import CenterNetUpdateHead\nfrom .centripetal_head import CentripetalHead\nfrom .condinst_head import CondInstBboxHead, CondInstMaskHead\nfrom .conditional_detr_head import ConditionalDETRHead\nfrom .corner_head import CornerHead\nfrom .dab_detr_head import DABDETRHead\nfrom .ddod_head import DDODHead\nfrom .deformable_detr_head import DeformableDETRHead\nfrom .detr_head import DETRHead\nfrom .dino_head import DINOHead\nfrom .embedding_rpn_head import EmbeddingRPNHead\nfrom .fcos_head import FCOSHead\nfrom .fovea_head import FoveaHead\nfrom .free_anchor_retina_head import FreeAnchorRetinaHead\nfrom .fsaf_head import FSAFHead\nfrom .ga_retina_head import GARetinaHead\nfrom .ga_rpn_head import GARPNHead\nfrom .gfl_head import GFLHead\nfrom .guided_anchor_head import FeatureAdaption, GuidedAnchorHead\nfrom .lad_head import LADHead\nfrom .ld_head import LDHead\nfrom .mask2former_head import Mask2FormerHead\nfrom .maskformer_head import MaskFormerHead\nfrom .nasfcos_head import NASFCOSHead\nfrom .paa_head import PAAHead\nfrom .pisa_retinanet_head import PISARetinaHead\nfrom .pisa_ssd_head import PISASSDHead\nfrom .reppoints_head import RepPointsHead\nfrom .retina_head import RetinaHead\nfrom .retina_sepbn_head import RetinaSepBNHead\nfrom .rpn_head import RPNHead\nfrom .rtmdet_head import RTMDetHead, RTMDetSepBNHead\nfrom .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead\nfrom .sabl_retina_head import SABLRetinaHead\nfrom .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead\nfrom .solov2_head import SOLOV2Head\nfrom .ssd_head import SSDHead\nfrom .tood_head import TOODHead\nfrom .vfnet_head import VFNetHead\nfrom .yolact_head import YOLACTHead, YOLACTProtonet\nfrom .yolo_head import YOLOV3Head\nfrom .yolof_head import YOLOFHead\nfrom .yolox_head import YOLOXHead\n\n__all__ = [\n    'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',\n    'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',\n    'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',\n    'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',\n    'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',\n    'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',\n    'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',\n    'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',\n    'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',\n    'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',\n    'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',\n    'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',\n    'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',\n    'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',\n    'DABDETRHead'\n]\n"
  },
  {
    "path": "mmdet/models/dense_heads/anchor_free_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import abstractmethod\nfrom typing import Any, List, Sequence, Tuple, Union\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom numpy import ndarray\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptInstanceList)\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom ..utils import multi_apply\nfrom .base_dense_head import BaseDenseHead\n\nStrideType = Union[Sequence[int], Sequence[Tuple[int, int]]]\n\n\n@MODELS.register_module()\nclass AnchorFreeHead(BaseDenseHead):\n    \"\"\"Anchor-free head (FCOS, Fovea, RepPoints, etc.).\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n        stacked_convs (int): Number of stacking convs of the head.\n        strides (Sequence[int] or Sequence[Tuple[int, int]]): Downsample\n            factor of each feature map.\n        dcn_on_last_conv (bool): If true, use dcn in the last layer of\n            towers. Defaults to False.\n        conv_bias (bool or str): If specified as `auto`, it will be decided by\n            the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n            None, otherwise False. Default: \"auto\".\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.\n        bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults\n            'DistancePointBBoxCoder'.\n        conv_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for\n            normalization layer. Defaults to None.\n        train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of\n            anchor-free head.\n        test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of\n            anchor-free head.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    _version = 1\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int,\n        feat_channels: int = 256,\n        stacked_convs: int = 4,\n        strides: StrideType = (4, 8, 16, 32, 64),\n        dcn_on_last_conv: bool = False,\n        conv_bias: Union[bool, str] = 'auto',\n        loss_cls: ConfigType = dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0),\n        bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        init_cfg: MultiConfig = dict(\n            type='Normal',\n            layer='Conv2d',\n            std=0.01,\n            override=dict(\n                type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.strides = strides\n        self.dcn_on_last_conv = dcn_on_last_conv\n        assert conv_bias == 'auto' or isinstance(conv_bias, bool)\n        self.conv_bias = conv_bias\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox = MODELS.build(loss_bbox)\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n\n        self.prior_generator = MlvlPointGenerator(strides)\n\n        # In order to keep a more general interface and be consistent with\n        # anchor_head. We can think of point like one anchor\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.fp16_enabled = False\n\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self._init_cls_convs()\n        self._init_reg_convs()\n        self._init_predictor()\n\n    def _init_cls_convs(self) -> None:\n        \"\"\"Initialize classification conv layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n                conv_cfg = dict(type='DCNv2')\n            else:\n                conv_cfg = self.conv_cfg\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.conv_bias))\n\n    def _init_reg_convs(self) -> None:\n        \"\"\"Initialize bbox regression conv layers of the head.\"\"\"\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n                conv_cfg = dict(type='DCNv2')\n            else:\n                conv_cfg = self.conv_cfg\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.conv_bias))\n\n    def _init_predictor(self) -> None:\n        \"\"\"Initialize predictor layers of the head.\"\"\"\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n    def _load_from_state_dict(self, state_dict: dict, prefix: str,\n                              local_metadata: dict, strict: bool,\n                              missing_keys: Union[List[str], str],\n                              unexpected_keys: Union[List[str], str],\n                              error_msgs: Union[List[str], str]) -> None:\n        \"\"\"Hack some keys of the model state dict so that can load checkpoints\n        of previous version.\"\"\"\n        version = local_metadata.get('version', None)\n        if version is None:\n            # the key is different in early versions\n            # for example, 'fcos_cls' become 'conv_cls' now\n            bbox_head_keys = [\n                k for k in state_dict.keys() if k.startswith(prefix)\n            ]\n            ori_predictor_keys = []\n            new_predictor_keys = []\n            # e.g. 'fcos_cls' or 'fcos_reg'\n            for key in bbox_head_keys:\n                ori_predictor_keys.append(key)\n                key = key.split('.')\n                if len(key) < 2:\n                    conv_name = None\n                elif key[1].endswith('cls'):\n                    conv_name = 'conv_cls'\n                elif key[1].endswith('reg'):\n                    conv_name = 'conv_reg'\n                elif key[1].endswith('centerness'):\n                    conv_name = 'conv_centerness'\n                else:\n                    conv_name = None\n                if conv_name is not None:\n                    key[1] = conv_name\n                    new_predictor_keys.append('.'.join(key))\n                else:\n                    ori_predictor_keys.pop(-1)\n            for i in range(len(new_predictor_keys)):\n                state_dict[new_predictor_keys[i]] = state_dict.pop(\n                    ori_predictor_keys[i])\n        super()._load_from_state_dict(state_dict, prefix, local_metadata,\n                                      strict, missing_keys, unexpected_keys,\n                                      error_msgs)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually contain classification scores and bbox predictions.\n\n            - cls_scores (list[Tensor]): Box scores for each scale level, \\\n            each is a 4D-tensor, the channel number is \\\n            num_points * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for each scale \\\n            level, each is a 4D-tensor, the channel number is num_points * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, x)[:2]\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n\n        Returns:\n            tuple: Scores for each class, bbox predictions, features\n            after classification and regression conv layers, some\n            models needs these features like FCOS.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n\n        for cls_layer in self.cls_convs:\n            cls_feat = cls_layer(cls_feat)\n        cls_score = self.conv_cls(cls_feat)\n\n        for reg_layer in self.reg_convs:\n            reg_feat = reg_layer(reg_feat)\n        bbox_pred = self.conv_reg(reg_feat)\n        return cls_score, bbox_pred, cls_feat, reg_feat\n\n    @abstractmethod\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n        \"\"\"\n\n        raise NotImplementedError\n\n    @abstractmethod\n    def get_targets(self, points: List[Tensor],\n                    batch_gt_instances: InstanceList) -> Any:\n        \"\"\"Compute regression, classification and centerness targets for points\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n        \"\"\"\n        raise NotImplementedError\n\n    # TODO refactor aug_test\n    def aug_test(self,\n                 aug_batch_feats: List[Tensor],\n                 aug_batch_img_metas: List[List[Tensor]],\n                 rescale: bool = False) -> List[ndarray]:\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            aug_batch_feats (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains features for all images in the batch.\n            aug_batch_img_metas (list[list[dict]]): the outer list indicates\n                test-time augs (multiscale, flip, etc.) and the inner list\n                indicates images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[ndarray]: bbox results of each class\n        \"\"\"\n        return self.aug_test_bboxes(\n            aug_batch_feats, aug_batch_img_metas, rescale=rescale)\n"
  },
  {
    "path": "mmdet/models/dense_heads/anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import BaseBoxes, cat_boxes, get_box_tensor\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, OptMultiConfig)\nfrom ..task_modules.prior_generators import (AnchorGenerator,\n                                             anchor_inside_flags)\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import images_to_levels, multi_apply, unmap\nfrom .base_dense_head import BaseDenseHead\n\n\n@MODELS.register_module()\nclass AnchorHead(BaseDenseHead):\n    \"\"\"Anchor-based head (RPN, RetinaNet, SSD, etc.).\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n        anchor_generator (dict): Config dict for anchor generator\n        bbox_coder (dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int,\n        feat_channels: int = 256,\n        anchor_generator: ConfigType = dict(\n            type='AnchorGenerator',\n            scales=[8, 16, 32],\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        bbox_coder: ConfigType = dict(\n            type='DeltaXYWHBBoxCoder',\n            clip_border=True,\n            target_means=(.0, .0, .0, .0),\n            target_stds=(1.0, 1.0, 1.0, 1.0)),\n        reg_decoded_bbox: bool = False,\n        loss_cls: ConfigType = dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox: ConfigType = dict(\n            type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        init_cfg: OptMultiConfig = dict(\n            type='Normal', layer='Conv2d', std=0.01)\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n\n        if self.cls_out_channels <= 0:\n            raise ValueError(f'num_classes={num_classes} is too small')\n        self.reg_decoded_bbox = reg_decoded_bbox\n\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox = MODELS.build(loss_bbox)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            if train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler(context=self)\n\n        self.fp16_enabled = False\n\n        self.prior_generator = TASK_UTILS.build(anchor_generator)\n\n        # Usually the numbers of anchors for each level are the same\n        # except SSD detectors. So it is an int in the most dense\n        # heads but a list of int in SSDHead\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n        self._init_layers()\n\n    @property\n    def num_anchors(self) -> int:\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'for consistency or also use '\n                      '`num_base_priors` instead')\n        return self.prior_generator.num_base_priors[0]\n\n    @property\n    def anchor_generator(self) -> AnchorGenerator:\n        warnings.warn('DeprecationWarning: anchor_generator is deprecated, '\n                      'please use \"prior_generator\" instead')\n        return self.prior_generator\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.conv_cls = nn.Conv2d(self.in_channels,\n                                  self.num_base_priors * self.cls_out_channels,\n                                  1)\n        reg_dim = self.bbox_coder.encode_size\n        self.conv_reg = nn.Conv2d(self.in_channels,\n                                  self.num_base_priors * reg_dim, 1)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level \\\n                    the channels number is num_base_priors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale \\\n                    level, the channels number is num_base_priors * 4.\n        \"\"\"\n        cls_score = self.conv_cls(x)\n        bbox_pred = self.conv_reg(x)\n        return cls_score, bbox_pred\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and bbox prediction.\n\n                - cls_scores (list[Tensor]): Classification scores for all \\\n                    scale levels, each is a 4D-tensor, the channels number \\\n                    is num_base_priors * num_classes.\n                - bbox_preds (list[Tensor]): Box energies / deltas for all \\\n                    scale levels, each is a 4D-tensor, the channels number \\\n                    is num_base_priors * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, x)\n\n    def get_anchors(self,\n                    featmap_sizes: List[tuple],\n                    batch_img_metas: List[dict],\n                    device: Union[torch.device, str] = 'cuda') \\\n            -> Tuple[List[List[Tensor]], List[List[Tensor]]]:\n        \"\"\"Get anchors according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            batch_img_metas (list[dict]): Image meta info.\n            device (torch.device | str): Device for returned tensors.\n                Defaults to cuda.\n\n        Returns:\n            tuple:\n\n                - anchor_list (list[list[Tensor]]): Anchors of each image.\n                - valid_flag_list (list[list[Tensor]]): Valid flags of each\n                  image.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # anchors for one time\n        multi_level_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level anchors\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            multi_level_flags = self.prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'], device)\n            valid_flag_list.append(multi_level_flags)\n\n        return anchor_list, valid_flag_list\n\n    def _get_targets_single(self,\n                            flat_anchors: Union[Tensor, BaseBoxes],\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Args:\n            flat_anchors (Tensor or :obj:`BaseBoxes`): Multi-level anchors\n                of the image, which are concatenated into a single tensor\n                or box type of shape (num_anchors, 4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors, ).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.  Defaults to True.\n\n        Returns:\n            tuple:\n\n                - labels (Tensor): Labels of each level.\n                - label_weights (Tensor): Label weights of each level.\n                - bbox_targets (Tensor): BBox targets of each level.\n                - bbox_weights (Tensor): BBox weights of each level.\n                - pos_inds (Tensor): positive samples indexes.\n                - neg_inds (Tensor): negative samples indexes.\n                - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags]\n\n        pred_instances = InstanceData(priors=anchors)\n        assign_result = self.assigner.assign(pred_instances, gt_instances,\n                                             gt_instances_ignore)\n        # No sampling is required except for RPN and\n        # Guided Anchoring algorithms\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        target_dim = gt_instances.bboxes.size(-1) if self.reg_decoded_bbox \\\n            else self.bbox_coder.encode_size\n        bbox_targets = anchors.new_zeros(num_valid_anchors, target_dim)\n        bbox_weights = anchors.new_zeros(num_valid_anchors, target_dim)\n\n        # TODO: Considering saving memory, is it necessary to be long?\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        # `bbox_coder.encode` accepts tensor or box type inputs and generates\n        # tensor targets. If regressing decoded boxes, the code will convert\n        # box type `pos_bbox_targets` to tensor.\n        if len(pos_inds) > 0:\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_priors, sampling_result.pos_gt_bboxes)\n            else:\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n                pos_bbox_targets = get_box_tensor(pos_bbox_targets)\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags,\n                fill=self.num_classes)  # fill bg label\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds, sampling_result)\n\n    def get_targets(self,\n                    anchor_list: List[List[Tensor]],\n                    valid_flag_list: List[List[Tensor]],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs: bool = True,\n                    return_sampling_results: bool = False) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n            return_sampling_results (bool): Whether to return the sampling\n                results. Defaults to False.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - avg_factor (int): Average factor that is used to average\n                  the loss. When using sampling method, avg_factor is usually\n                  the sum of positive and negative priors. When using\n                  `PseudoSampler`, `avg_factor` is usually equal to the number\n                  of positive priors.\n\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors to a single tensor\n        concat_anchor_list = []\n        concat_valid_flag_list = []\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            concat_anchor_list.append(cat_boxes(anchor_list[i]))\n            concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        results = multi_apply(\n            self._get_targets_single,\n            concat_anchor_list,\n            concat_valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore,\n            unmap_outputs=unmap_outputs)\n        (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,\n         pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]\n        rest_results = list(results[7:])  # user-added return values\n        # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n        # When using sampling method, avg_factor is usually the sum of\n        # positive and negative priors. When using `PseudoSampler`,\n        # `avg_factor` is usually equal to the number of positive priors.\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # update `_raw_positive_infos`, which will be used when calling\n        # `get_positive_infos`.\n        self._raw_positive_infos.update(sampling_results=sampling_results_list)\n        # split targets to a list w.r.t. multiple levels\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        res = (labels_list, label_weights_list, bbox_targets_list,\n               bbox_weights_list, avg_factor)\n        if return_sampling_results:\n            res = res + (sampling_results_list, )\n        for i, r in enumerate(rest_results):  # user-added return values\n            rest_results[i] = images_to_levels(r, num_level_anchors)\n\n        return res + tuple(rest_results)\n\n    def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                            anchors: Tensor, labels: Tensor,\n                            label_weights: Tensor, bbox_targets: Tensor,\n                            bbox_weights: Tensor, avg_factor: int) -> tuple:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            bbox_weights (Tensor): BBox regression loss weights of each anchor\n                with shape (N, num_total_anchors, 4).\n            avg_factor (int): Average factor that is used to average the loss.\n\n        Returns:\n            tuple: loss components.\n        \"\"\"\n        # classification loss\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=avg_factor)\n        # regression loss\n        target_dim = bbox_targets.size(-1)\n        bbox_targets = bbox_targets.reshape(-1, target_dim)\n        bbox_weights = bbox_weights.reshape(-1, target_dim)\n        bbox_pred = bbox_pred.permute(0, 2, 3,\n                                      1).reshape(-1,\n                                                 self.bbox_coder.encode_size)\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            anchors = anchors.reshape(-1, anchors.size(-1))\n            bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)\n            bbox_pred = get_box_tensor(bbox_pred)\n        loss_bbox = self.loss_bbox(\n            bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor)\n        return loss_cls, loss_bbox\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor) = cls_reg_targets\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(cat_boxes(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_by_feat_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            avg_factor=avg_factor)\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n"
  },
  {
    "path": "mmdet/models/dense_heads/atss_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..task_modules.prior_generators import anchor_inside_flags\nfrom ..utils import images_to_levels, multi_apply, unmap\nfrom .anchor_head import AnchorHead\n\n\n@MODELS.register_module()\nclass ATSSHead(AnchorHead):\n    \"\"\"Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.\n\n    ATSS head structure is similar with FCOS, however ATSS use anchor boxes\n    and assign label by Adaptive Training Sample Selection instead max-iou.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        pred_kernel_size (int): Kernel size of ``nn.Conv2d``\n        stacked_convs (int): Number of stacking convs of the head.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n            layer. Defaults to ``dict(type='GN', num_groups=32,\n            requires_grad=True)``.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Defaults to False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.\n            Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,\n            loss_weight=1.0)``.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`]): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 pred_kernel_size: int = 3,\n                 stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 reg_decoded_bbox: bool = True,\n                 loss_centerness: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='atss_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        self.pred_kernel_size = pred_kernel_size\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            reg_decoded_bbox=reg_decoded_bbox,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        self.sampling = False\n        self.loss_centerness = MODELS.build(loss_centerness)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        pred_pad_size = self.pred_kernel_size // 2\n        self.atss_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_anchors * self.cls_out_channels,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.atss_reg = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * 4,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.atss_centerness = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * 1,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, x, self.scales)\n\n    def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level\n                    the channels number is num_anchors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale\n                    level, the channels number is num_anchors * 4.\n                centerness (Tensor): Centerness for a single scale level, the\n                    channel number is (N, num_anchors * 1, H, W).\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.atss_cls(cls_feat)\n        # we just follow atss, not apply exp in bbox_pred\n        bbox_pred = scale(self.atss_reg(reg_feat)).float()\n        centerness = self.atss_centerness(reg_feat)\n        return cls_score, bbox_pred, centerness\n\n    def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n                            bbox_pred: Tensor, centerness: Tensor,\n                            labels: Tensor, label_weights: Tensor,\n                            bbox_targets: Tensor, avg_factor: float) -> dict:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            avg_factor (float): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        centerness = centerness.permute(0, 2, 3, 1).reshape(-1)\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        # classification loss\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=avg_factor)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n            pos_centerness = centerness[pos_inds]\n\n            centerness_targets = self.centerness_target(\n                pos_anchors, pos_bbox_targets)\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchors, pos_bbox_pred)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_bbox_targets,\n                weight=centerness_targets,\n                avg_factor=1.0)\n\n            # centerness loss\n            loss_centerness = self.loss_centerness(\n                pos_centerness, centerness_targets, avg_factor=avg_factor)\n\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            loss_centerness = centerness.sum() * 0\n            centerness_targets = bbox_targets.new_tensor(0.)\n\n        return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            centernesses: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            centernesses (list[Tensor]): Centerness for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_reg_targets\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n        losses_cls, losses_bbox, loss_centerness, \\\n            bbox_avg_factor = multi_apply(\n                self.loss_by_feat_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                centernesses,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                avg_factor=avg_factor)\n\n        bbox_avg_factor = sum(bbox_avg_factor)\n        bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            loss_centerness=loss_centerness)\n\n    def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:\n        \"\"\"Calculate the centerness between anchors and gts.\n\n        Only calculate pos centerness targets, otherwise there may be nan.\n\n        Args:\n            anchors (Tensor): Anchors with shape (N, 4), \"xyxy\" format.\n            gts (Tensor): Ground truth bboxes with shape (N, 4), \"xyxy\" format.\n\n        Returns:\n            Tensor: Centerness between anchors and gts.\n        \"\"\"\n        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n        l_ = anchors_cx - gts[:, 0]\n        t_ = anchors_cy - gts[:, 1]\n        r_ = gts[:, 2] - anchors_cx\n        b_ = gts[:, 3] - anchors_cy\n\n        left_right = torch.stack([l_, r_], dim=1)\n        top_bottom = torch.stack([t_, b_], dim=1)\n        centerness = torch.sqrt(\n            (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *\n            (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))\n        assert not torch.isnan(centerness).any()\n        return centerness\n\n    def get_targets(self,\n                    anchor_list: List[List[Tensor]],\n                    valid_flag_list: List[List[Tensor]],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs: bool = True) -> tuple:\n        \"\"\"Get targets for ATSS head.\n\n        This method is almost the same as `AnchorHead.get_targets()`. Besides\n        returning the targets as the parent method does, it also returns the\n        anchors as the first element of the returned tuple.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list,\n         sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             anchor_list,\n             valid_flag_list,\n             num_level_anchors_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             unmap_outputs=unmap_outputs)\n        # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n        # When using sampling method, avg_factor is usually the sum of\n        # positive and negative priors. When using `PseudoSampler`,\n        # `avg_factor` is usually equal to the number of positive priors.\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, avg_factor)\n\n    def _get_targets_single(self,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            num_level_anchors: List[int],\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors ,4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            num_level_anchors (List[int]): Number of anchors of each scale\n                level.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n                labels (Tensor): Labels of all anchors in the image with shape\n                    (N,).\n                label_weights (Tensor): Label weights of all anchor in the\n                    image with shape (N,).\n                bbox_targets (Tensor): BBox targets of all anchors in the\n                    image with shape (N, 4).\n                bbox_weights (Tensor): BBox weights of all anchors in the\n                    image with shape (N, 4)\n                pos_inds (Tensor): Indices of positive anchor with shape\n                    (num_pos,).\n                neg_inds (Tensor): Indices of negative anchor with shape\n                    (num_neg,).\n                sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        num_level_anchors_inside = self.get_num_level_anchors_inside(\n            num_level_anchors, inside_flags)\n        pred_instances = InstanceData(priors=anchors)\n        assign_result = self.assigner.assign(pred_instances,\n                                             num_level_anchors_inside,\n                                             gt_instances, gt_instances_ignore)\n\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if self.reg_decoded_bbox:\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            else:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_priors, sampling_result.pos_gt_bboxes)\n\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n                pos_inds, neg_inds, sampling_result)\n\n    def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n        \"\"\"Get the number of valid anchors in every level.\"\"\"\n\n        split_inside_flags = torch.split(inside_flags, num_level_anchors)\n        num_level_anchors_inside = [\n            int(flags.sum()) for flags in split_inside_flags\n        ]\n        return num_level_anchors_inside\n"
  },
  {
    "path": "mmdet/models/dense_heads/autoassign_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Scale\nfrom mmengine.model import bias_init_with_prob, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import InstanceList, OptInstanceList, reduce_mean\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom ..utils import levels_to_images, multi_apply\nfrom .fcos_head import FCOSHead\n\nEPS = 1e-12\n\n\nclass CenterPrior(nn.Module):\n    \"\"\"Center Weighting module to adjust the category-specific prior\n    distributions.\n\n    Args:\n        force_topk (bool): When no point falls into gt_bbox, forcibly\n            select the k points closest to the center to calculate\n            the center prior. Defaults to False.\n        topk (int): The number of points used to calculate the\n            center prior when no point falls in gt_bbox. Only work when\n            force_topk if True. Defaults to 9.\n        num_classes (int): The class number of dataset. Defaults to 80.\n        strides (Sequence[int]): The stride of each input feature map.\n            Defaults to (8, 16, 32, 64, 128).\n    \"\"\"\n\n    def __init__(\n        self,\n        force_topk: bool = False,\n        topk: int = 9,\n        num_classes: int = 80,\n        strides: Sequence[int] = (8, 16, 32, 64, 128)\n    ) -> None:\n        super().__init__()\n        self.mean = nn.Parameter(torch.zeros(num_classes, 2))\n        self.sigma = nn.Parameter(torch.ones(num_classes, 2))\n        self.strides = strides\n        self.force_topk = force_topk\n        self.topk = topk\n\n    def forward(self, anchor_points_list: List[Tensor],\n                gt_instances: InstanceData,\n                inside_gt_bbox_mask: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Get the center prior of each point on the feature map for each\n        instance.\n\n        Args:\n            anchor_points_list (list[Tensor]): list of coordinate\n                of points on feature map. Each with shape\n                (num_points, 2).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            inside_gt_bbox_mask (Tensor): Tensor of bool type,\n                with shape of (num_points, num_gt), each\n                value is used to mark whether this point falls\n                within a certain gt.\n\n        Returns:\n            tuple[Tensor, Tensor]:\n\n            - center_prior_weights(Tensor): Float tensor with shape  of \\\n            (num_points, num_gt). Each value represents the center \\\n            weighting coefficient.\n            - inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape \\\n            of (num_points, num_gt), each value is used to mark whether this \\\n            point falls within a certain gt or is the topk nearest points for \\\n            a specific gt_bbox.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        labels = gt_instances.labels\n\n        inside_gt_bbox_mask = inside_gt_bbox_mask.clone()\n        num_gts = len(labels)\n        num_points = sum([len(item) for item in anchor_points_list])\n        if num_gts == 0:\n            return gt_bboxes.new_zeros(num_points,\n                                       num_gts), inside_gt_bbox_mask\n        center_prior_list = []\n        for slvl_points, stride in zip(anchor_points_list, self.strides):\n            # slvl_points: points from single level in FPN, has shape (h*w, 2)\n            # single_level_points has shape (h*w, num_gt, 2)\n            single_level_points = slvl_points[:, None, :].expand(\n                (slvl_points.size(0), len(gt_bboxes), 2))\n            gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2)\n            gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2)\n            gt_center = torch.stack((gt_center_x, gt_center_y), dim=1)\n            gt_center = gt_center[None]\n            # instance_center has shape (1, num_gt, 2)\n            instance_center = self.mean[labels][None]\n            # instance_sigma has shape (1, num_gt, 2)\n            instance_sigma = self.sigma[labels][None]\n            # distance has shape (num_points, num_gt, 2)\n            distance = (((single_level_points - gt_center) / float(stride) -\n                         instance_center)**2)\n            center_prior = torch.exp(-distance /\n                                     (2 * instance_sigma**2)).prod(dim=-1)\n            center_prior_list.append(center_prior)\n        center_prior_weights = torch.cat(center_prior_list, dim=0)\n\n        if self.force_topk:\n            gt_inds_no_points_inside = torch.nonzero(\n                inside_gt_bbox_mask.sum(0) == 0).reshape(-1)\n            if gt_inds_no_points_inside.numel():\n                topk_center_index = \\\n                    center_prior_weights[:, gt_inds_no_points_inside].topk(\n                                                             self.topk,\n                                                             dim=0)[1]\n                temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside]\n                inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \\\n                    torch.scatter(temp_mask,\n                                  dim=0,\n                                  index=topk_center_index,\n                                  src=torch.ones_like(\n                                    topk_center_index,\n                                    dtype=torch.bool))\n\n        center_prior_weights[~inside_gt_bbox_mask] = 0\n        return center_prior_weights, inside_gt_bbox_mask\n\n\n@MODELS.register_module()\nclass AutoAssignHead(FCOSHead):\n    \"\"\"AutoAssignHead head used in AutoAssign.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2007.03496>`_ .\n\n    Args:\n        force_topk (bool): Used in center prior initialization to\n            handle extremely small gt. Default is False.\n        topk (int): The number of points used to calculate the\n            center prior when no point falls in gt_bbox. Only work when\n            force_topk if True. Defaults to 9.\n        pos_loss_weight (float): The loss weight of positive loss\n            and with default value 0.25.\n        neg_loss_weight (float): The loss weight of negative loss\n            and with default value 0.75.\n        center_loss_weight (float): The loss weight of center prior\n            loss and with default value 0.75.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 force_topk: bool = False,\n                 topk: int = 9,\n                 pos_loss_weight: float = 0.25,\n                 neg_loss_weight: float = 0.75,\n                 center_loss_weight: float = 0.75,\n                 **kwargs) -> None:\n        super().__init__(*args, conv_bias=True, **kwargs)\n        self.center_prior = CenterPrior(\n            force_topk=force_topk,\n            topk=topk,\n            num_classes=self.num_classes,\n            strides=self.strides)\n        self.pos_loss_weight = pos_loss_weight\n        self.neg_loss_weight = neg_loss_weight\n        self.center_loss_weight = center_loss_weight\n        self.prior_generator = MlvlPointGenerator(self.strides, offset=0)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\n\n        In particular, we have special initialization for classified conv's and\n        regression conv's bias\n        \"\"\"\n\n        super(AutoAssignHead, self).init_weights()\n        bias_cls = bias_init_with_prob(0.02)\n        normal_init(self.conv_cls, std=0.01, bias=bias_cls)\n        normal_init(self.conv_reg, std=0.01, bias=4.0)\n\n    def forward_single(self, x: Tensor, scale: Scale,\n                       stride: int) -> Tuple[Tensor, Tensor, Tensor]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps, only\n                used to normalize the bbox prediction when self.norm_on_bbox\n                is True.\n\n        Returns:\n            tuple[Tensor, Tensor, Tensor]: scores for each class, bbox\n            predictions and centerness predictions of input feature maps.\n        \"\"\"\n        cls_score, bbox_pred, cls_feat, reg_feat = super(\n            FCOSHead, self).forward_single(x)\n        centerness = self.conv_centerness(reg_feat)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        # bbox_pred needed for gradient computation has been modified\n        # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n        # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n        bbox_pred = bbox_pred.clamp(min=0)\n        bbox_pred *= stride\n        return cls_score, bbox_pred, centerness\n\n    def get_pos_loss_single(self, cls_score: Tensor, objectness: Tensor,\n                            reg_loss: Tensor, gt_instances: InstanceData,\n                            center_prior_weights: Tensor) -> Tuple[Tensor]:\n        \"\"\"Calculate the positive loss of all points in gt_bboxes.\n\n        Args:\n            cls_score (Tensor): All category scores for each point on\n                the feature map. The shape is (num_points, num_class).\n            objectness (Tensor): Foreground probability of all points,\n                has shape (num_points, 1).\n            reg_loss (Tensor): The regression loss of each gt_bbox and each\n                prediction box, has shape of (num_points, num_gt).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            center_prior_weights (Tensor): Float tensor with shape\n                of (num_points, num_gt). Each value represents\n                the center weighting coefficient.\n\n        Returns:\n            tuple[Tensor]:\n\n            - pos_loss (Tensor): The positive loss of all points in the \\\n            gt_bboxes.\n        \"\"\"\n        gt_labels = gt_instances.labels\n        # p_loc: localization confidence\n        p_loc = torch.exp(-reg_loss)\n        # p_cls: classification confidence\n        p_cls = (cls_score * objectness)[:, gt_labels]\n        # p_pos: joint confidence indicator\n        p_pos = p_cls * p_loc\n\n        # 3 is a hyper-parameter to control the contributions of high and\n        # low confidence locations towards positive losses.\n        confidence_weight = torch.exp(p_pos * 3)\n        p_pos_weight = (confidence_weight * center_prior_weights) / (\n            (confidence_weight * center_prior_weights).sum(\n                0, keepdim=True)).clamp(min=EPS)\n        reweighted_p_pos = (p_pos * p_pos_weight).sum(0)\n        pos_loss = F.binary_cross_entropy(\n            reweighted_p_pos,\n            torch.ones_like(reweighted_p_pos),\n            reduction='none')\n        pos_loss = pos_loss.sum() * self.pos_loss_weight\n        return pos_loss,\n\n    def get_neg_loss_single(self, cls_score: Tensor, objectness: Tensor,\n                            gt_instances: InstanceData, ious: Tensor,\n                            inside_gt_bbox_mask: Tensor) -> Tuple[Tensor]:\n        \"\"\"Calculate the negative loss of all points in feature map.\n\n        Args:\n            cls_score (Tensor): All category scores for each point on\n                the feature map. The shape is (num_points, num_class).\n            objectness (Tensor): Foreground probability of all points\n                and is shape of (num_points, 1).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            ious (Tensor): Float tensor with shape of (num_points, num_gt).\n                Each value represent the iou of pred_bbox and gt_bboxes.\n            inside_gt_bbox_mask (Tensor): Tensor of bool type,\n                with shape of (num_points, num_gt), each\n                value is used to mark whether this point falls\n                within a certain gt.\n\n        Returns:\n            tuple[Tensor]:\n\n            - neg_loss (Tensor): The negative loss of all points in the \\\n            feature map.\n        \"\"\"\n        gt_labels = gt_instances.labels\n        num_gts = len(gt_labels)\n        joint_conf = (cls_score * objectness)\n        p_neg_weight = torch.ones_like(joint_conf)\n        if num_gts > 0:\n            # the order of dinmension would affect the value of\n            # p_neg_weight, we strictly follow the original\n            # implementation.\n            inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0)\n            ious = ious.permute(1, 0)\n\n            foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True)\n            temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS))\n\n            def normalize(x):\n                return (x - x.min() + EPS) / (x.max() - x.min() + EPS)\n\n            for instance_idx in range(num_gts):\n                idxs = foreground_idxs[0] == instance_idx\n                if idxs.any():\n                    temp_weight[idxs] = normalize(temp_weight[idxs])\n\n            p_neg_weight[foreground_idxs[1],\n                         gt_labels[foreground_idxs[0]]] = 1 - temp_weight\n\n        logits = (joint_conf * p_neg_weight)\n        neg_loss = (\n            logits**2 * F.binary_cross_entropy(\n                logits, torch.zeros_like(logits), reduction='none'))\n        neg_loss = neg_loss.sum() * self.neg_loss_weight\n        return neg_loss,\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        objectnesses: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            objectnesses (list[Tensor]): objectness for each scale level, each\n                is a 4D-tensor, the channel number is num_points * 1.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        assert len(cls_scores) == len(bbox_preds) == len(objectnesses)\n        all_num_gt = sum([len(item) for item in batch_gt_instances])\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets(\n            all_level_points, batch_gt_instances)\n\n        center_prior_weight_list = []\n        temp_inside_gt_bbox_mask_list = []\n        for gt_instances, inside_gt_bbox_mask in zip(batch_gt_instances,\n                                                     inside_gt_bbox_mask_list):\n            center_prior_weight, inside_gt_bbox_mask = \\\n                self.center_prior(all_level_points, gt_instances,\n                                  inside_gt_bbox_mask)\n            center_prior_weight_list.append(center_prior_weight)\n            temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask)\n        inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list\n        mlvl_points = torch.cat(all_level_points, dim=0)\n        bbox_preds = levels_to_images(bbox_preds)\n        cls_scores = levels_to_images(cls_scores)\n        objectnesses = levels_to_images(objectnesses)\n\n        reg_loss_list = []\n        ious_list = []\n        num_points = len(mlvl_points)\n\n        for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip(\n                bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list):\n            temp_num_gt = encoded_targets.size(1)\n            expand_mlvl_points = mlvl_points[:, None, :].expand(\n                num_points, temp_num_gt, 2).reshape(-1, 2)\n            encoded_targets = encoded_targets.reshape(-1, 4)\n            expand_bbox_pred = bbox_pred[:, None, :].expand(\n                num_points, temp_num_gt, 4).reshape(-1, 4)\n            decoded_bbox_preds = self.bbox_coder.decode(\n                expand_mlvl_points, expand_bbox_pred)\n            decoded_target_preds = self.bbox_coder.decode(\n                expand_mlvl_points, encoded_targets)\n            with torch.no_grad():\n                ious = bbox_overlaps(\n                    decoded_bbox_preds, decoded_target_preds, is_aligned=True)\n                ious = ious.reshape(num_points, temp_num_gt)\n                if temp_num_gt:\n                    ious = ious.max(\n                        dim=-1, keepdim=True).values.repeat(1, temp_num_gt)\n                else:\n                    ious = ious.new_zeros(num_points, temp_num_gt)\n                ious[~inside_gt_bbox_mask] = 0\n                ious_list.append(ious)\n            loss_bbox = self.loss_bbox(\n                decoded_bbox_preds,\n                decoded_target_preds,\n                weight=None,\n                reduction_override='none')\n            reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt))\n\n        cls_scores = [item.sigmoid() for item in cls_scores]\n        objectnesses = [item.sigmoid() for item in objectnesses]\n        pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores,\n                                     objectnesses, reg_loss_list,\n                                     batch_gt_instances,\n                                     center_prior_weight_list)\n        pos_avg_factor = reduce_mean(\n            bbox_pred.new_tensor(all_num_gt)).clamp_(min=1)\n        pos_loss = sum(pos_loss_list) / pos_avg_factor\n\n        neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores,\n                                     objectnesses, batch_gt_instances,\n                                     ious_list, inside_gt_bbox_mask_list)\n        neg_avg_factor = sum(item.data.sum()\n                             for item in center_prior_weight_list)\n        neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1)\n        neg_loss = sum(neg_loss_list) / neg_avg_factor\n\n        center_loss = []\n        for i in range(len(batch_img_metas)):\n\n            if inside_gt_bbox_mask_list[i].any():\n                center_loss.append(\n                    len(batch_gt_instances[i]) /\n                    center_prior_weight_list[i].sum().clamp_(min=EPS))\n            # when width or height of gt_bbox is smaller than stride of p3\n            else:\n                center_loss.append(center_prior_weight_list[i].sum() * 0)\n\n        center_loss = torch.stack(center_loss).mean() * self.center_loss_weight\n\n        # avoid dead lock in DDP\n        if all_num_gt == 0:\n            pos_loss = bbox_preds[0].sum() * 0\n            dummy_center_prior_loss = self.center_prior.mean.sum(\n            ) * 0 + self.center_prior.sigma.sum() * 0\n            center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss\n\n        loss = dict(\n            loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss)\n\n        return loss\n\n    def get_targets(\n            self, points: List[Tensor], batch_gt_instances: InstanceList\n    ) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Compute regression targets and each point inside or outside gt_bbox\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of all fpn level, each has shape\n                (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple(list[Tensor], list[Tensor]):\n\n            - inside_gt_bbox_mask_list (list[Tensor]): Each Tensor is with \\\n            bool type and shape of (num_points, num_gt), each value is used \\\n            to mark whether this point falls within a certain gt.\n            - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \\\n            level. Each tensor has shape (num_points, num_gt, 4).\n        \"\"\"\n\n        concat_points = torch.cat(points, dim=0)\n        # the number of points per img, per lvl\n        inside_gt_bbox_mask_list, bbox_targets_list = multi_apply(\n            self._get_targets_single, batch_gt_instances, points=concat_points)\n        return inside_gt_bbox_mask_list, bbox_targets_list\n\n    def _get_targets_single(self, gt_instances: InstanceData,\n                            points: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Compute regression targets and each point inside or outside gt_bbox\n        for a single image.\n\n        Args:\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            points (Tensor): Points of all fpn level, has shape\n                (num_points, 2).\n\n        Returns:\n            tuple[Tensor, Tensor]: Containing the following Tensors:\n\n            - inside_gt_bbox_mask (Tensor): Bool tensor with shape \\\n            (num_points, num_gt), each value is used to mark whether this \\\n            point falls within a certain gt.\n            - bbox_targets (Tensor): BBox targets of each points with each \\\n            gt_bboxes, has shape (num_points, num_gt, 4).\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        num_points = points.size(0)\n        num_gts = gt_bboxes.size(0)\n        gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n        xs, ys = points[:, 0], points[:, 1]\n        xs = xs[:, None]\n        ys = ys[:, None]\n        left = xs - gt_bboxes[..., 0]\n        right = gt_bboxes[..., 2] - xs\n        top = ys - gt_bboxes[..., 1]\n        bottom = gt_bboxes[..., 3] - ys\n        bbox_targets = torch.stack((left, top, right, bottom), -1)\n        if num_gts:\n            inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n        else:\n            inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts),\n                                                         dtype=torch.bool)\n\n        return inside_gt_bbox_mask, bbox_targets\n"
  },
  {
    "path": "mmdet/models/dense_heads/base_dense_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom abc import ABCMeta, abstractmethod\nfrom inspect import signature\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom mmcv.ops import batched_nms\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule, constant_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import (cat_boxes, get_box_tensor, get_box_wh,\n                                   scale_boxes)\nfrom mmdet.utils import InstanceList, OptMultiConfig\nfrom ..test_time_augs import merge_aug_results\nfrom ..utils import (filter_scores_and_topk, select_single_mlvl,\n                     unpack_gt_instances)\n\n\nclass BaseDenseHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for DenseHeads.\n\n    1. The ``init_weights`` method is used to initialize densehead's\n    model parameters. After detector initialization, ``init_weights``\n    is triggered when ``detector.init_weights()`` is called externally.\n\n    2. The ``loss`` method is used to calculate the loss of densehead,\n    which includes two steps: (1) the densehead model performs forward\n    propagation to obtain the feature maps (2) The ``loss_by_feat`` method\n    is called based on the feature maps to calculate the loss.\n\n    .. code:: text\n\n    loss(): forward() -> loss_by_feat()\n\n    3. The ``predict`` method is used to predict detection results,\n    which includes two steps: (1) the densehead model performs forward\n    propagation to obtain the feature maps (2) The ``predict_by_feat`` method\n    is called based on the feature maps to predict detection results including\n    post-processing.\n\n    .. code:: text\n\n    predict(): forward() -> predict_by_feat()\n\n    4. The ``loss_and_predict`` method is used to return loss and detection\n    results at the same time. It will call densehead's ``forward``,\n    ``loss_by_feat`` and ``predict_by_feat`` methods in order.  If one-stage is\n    used as RPN, the densehead needs to return both losses and predictions.\n    This predictions is used as the proposal of roihead.\n\n    .. code:: text\n\n    loss_and_predict(): forward() -> loss_by_feat() -> predict_by_feat()\n    \"\"\"\n\n    def __init__(self, init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        # `_raw_positive_infos` will be used in `get_positive_infos`, which\n        # can get positive information.\n        self._raw_positive_infos = dict()\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize the weights.\"\"\"\n        super().init_weights()\n        # avoid init_cfg overwrite the initialization of `conv_offset`\n        for m in self.modules():\n            # DeformConv2dPack, ModulatedDeformConv2dPack\n            if hasattr(m, 'conv_offset'):\n                constant_init(m.conv_offset, 0)\n\n    def get_positive_infos(self) -> InstanceList:\n        \"\"\"Get positive information from sampling results.\n\n        Returns:\n            list[:obj:`InstanceData`]: Positive information of each image,\n            usually including positive bboxes, positive labels, positive\n            priors, etc.\n        \"\"\"\n        if len(self._raw_positive_infos) == 0:\n            return None\n\n        sampling_results = self._raw_positive_infos.get(\n            'sampling_results', None)\n        assert sampling_results is not None\n        positive_infos = []\n        for sampling_result in enumerate(sampling_results):\n            pos_info = InstanceData()\n            pos_info.bboxes = sampling_result.pos_gt_bboxes\n            pos_info.labels = sampling_result.pos_gt_labels\n            pos_info.priors = sampling_result.pos_priors\n            pos_info.pos_assigned_gt_inds = \\\n                sampling_result.pos_assigned_gt_inds\n            pos_info.pos_inds = sampling_result.pos_inds\n            positive_infos.append(pos_info)\n        return positive_infos\n\n    def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        outs = self(x)\n\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas,\n                              batch_gt_instances_ignore)\n        losses = self.loss_by_feat(*loss_inputs)\n        return losses\n\n    @abstractmethod\n    def loss_by_feat(self, **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\"\"\"\n        pass\n\n    def loss_and_predict(\n        self,\n        x: Tuple[Tensor],\n        batch_data_samples: SampleList,\n        proposal_cfg: Optional[ConfigDict] = None\n    ) -> Tuple[dict, InstanceList]:\n        \"\"\"Perform forward propagation of the head, then calculate loss and\n        predictions from the features and data samples.\n\n        Args:\n            x (tuple[Tensor]): Features from FPN.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n            proposal_cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n\n        Returns:\n            tuple: the return value is a tuple contains:\n\n                - losses: (dict[str, Tensor]): A dictionary of loss components.\n                - predictions (list[:obj:`InstanceData`]): Detection\n                  results of each image after the post process.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n\n        outs = self(x)\n\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas,\n                              batch_gt_instances_ignore)\n        losses = self.loss_by_feat(*loss_inputs)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas, cfg=proposal_cfg)\n        return losses, predictions\n\n    def predict(self,\n                x: Tuple[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        outs = self(x)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n        return predictions\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        score_factors: Optional[List[Tensor]] = None,\n                        batch_img_metas: Optional[List[dict]] = None,\n                        cfg: Optional[ConfigDict] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Note: When score_factors is not None, the cls_scores are\n        usually multiplied by it then obtain the real score used in NMS,\n        such as CenterNess in FCOS, IoU branch in ATSS.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            score_factors (list[Tensor], optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 1, H, W). Defaults to None.\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n                Defaults to None.\n            cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        if score_factors is None:\n            # e.g. Retina, FreeAnchor, Foveabox, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, AutoAssign, etc.\n            with_score_factors = True\n            assert len(cls_scores) == len(score_factors)\n\n        num_levels = len(cls_scores)\n\n        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device)\n\n        result_list = []\n\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            cls_score_list = select_single_mlvl(\n                cls_scores, img_id, detach=True)\n            bbox_pred_list = select_single_mlvl(\n                bbox_preds, img_id, detach=True)\n            if with_score_factors:\n                score_factor_list = select_single_mlvl(\n                    score_factors, img_id, detach=True)\n            else:\n                score_factor_list = [None for _ in range(num_levels)]\n\n            results = self._predict_by_feat_single(\n                cls_score_list=cls_score_list,\n                bbox_pred_list=bbox_pred_list,\n                score_factor_list=score_factor_list,\n                mlvl_priors=mlvl_priors,\n                img_meta=img_meta,\n                cfg=cfg,\n                rescale=rescale,\n                with_nms=with_nms)\n            result_list.append(results)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmengine.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        if score_factor_list[0] is None:\n            # e.g. Retina, FreeAnchor, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, etc.\n            with_score_factors = True\n\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bbox_preds = []\n        mlvl_valid_priors = []\n        mlvl_scores = []\n        mlvl_labels = []\n        if with_score_factors:\n            mlvl_score_factors = []\n        else:\n            mlvl_score_factors = None\n        for level_idx, (cls_score, bbox_pred, score_factor, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              score_factor_list, mlvl_priors)):\n\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            dim = self.bbox_coder.encode_size\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)\n            if with_score_factors:\n                score_factor = score_factor.permute(1, 2,\n                                                    0).reshape(-1).sigmoid()\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                # remind that we set FG labels to [0, num_class-1]\n                # since mmdet v2.0\n                # BG cat_id: num_class\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            score_thr = cfg.get('score_thr', 0)\n\n            results = filter_scores_and_topk(\n                scores, score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, keep_idxs, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            if with_score_factors:\n                score_factor = score_factor[keep_idxs]\n\n            mlvl_bbox_preds.append(bbox_pred)\n            mlvl_valid_priors.append(priors)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n            if with_score_factors:\n                mlvl_score_factors.append(score_factor)\n\n        bbox_pred = torch.cat(mlvl_bbox_preds)\n        priors = cat_boxes(mlvl_valid_priors)\n        bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape)\n\n        results = InstanceData()\n        results.bboxes = bboxes\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n        if with_score_factors:\n            results.score_factors = torch.cat(mlvl_score_factors)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n    def _bbox_post_process(self,\n                           results: InstanceData,\n                           cfg: ConfigDict,\n                           rescale: bool = False,\n                           with_nms: bool = True,\n                           img_meta: Optional[dict] = None) -> InstanceData:\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually `with_nms` is False is used for aug test.\n\n        Args:\n            results (:obj:`InstaceData`): Detection instance results,\n                each item has shape (num_bboxes, ).\n            cfg (ConfigDict): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default to True.\n            img_meta (dict, optional): Image meta info. Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = [1 / s for s in img_meta['scale_factor']]\n            results.bboxes = scale_boxes(results.bboxes, scale_factor)\n\n        if hasattr(results, 'score_factors'):\n            # TODO： Add sqrt operation in order to be consistent with\n            #  the paper.\n            score_factors = results.pop('score_factors')\n            results.scores = results.scores * score_factors\n\n        # filter small size bboxes\n        if cfg.get('min_bbox_size', -1) >= 0:\n            w, h = get_box_wh(results.bboxes)\n            valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n            if not valid_mask.all():\n                results = results[valid_mask]\n\n        # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg\n        if with_nms and results.bboxes.numel() > 0:\n            bboxes = get_box_tensor(results.bboxes)\n            det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,\n                                                results.labels, cfg.nms)\n            results = results[keep_idxs]\n            # some nms would reweight the score, such as softnms\n            results.scores = det_bboxes[:, -1]\n            results = results[:cfg.max_per_img]\n\n        return results\n\n    def aug_test(self,\n                 aug_batch_feats,\n                 aug_batch_img_metas,\n                 rescale=False,\n                 with_ori_nms=False,\n                 **kwargs):\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            aug_batch_feats (list[tuple[Tensor]]): The outer list\n                indicates test-time augmentations and inner tuple\n                indicate the multi-level feats from\n                FPN, each Tensor should have a shape (B, C, H, W),\n            aug_batch_img_metas (list[list[dict]]): Meta information\n                of images under the different test-time augs\n                (multiscale, flip, etc.). The outer list indicate\n                the\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n            with_ori_nms (bool): Whether execute the nms in original head.\n                Defaults to False. It will be `True` when the head is\n                adopted as `rpn_head`.\n\n        Returns:\n            list(obj:`InstanceData`): Detection results of the\n            input images. Each item usually contains\\\n            following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance,)\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances,).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        # TODO: remove this for detr and deformdetr\n        sig_of_get_results = signature(self.get_results)\n        get_results_args = [\n            p.name for p in sig_of_get_results.parameters.values()\n        ]\n        get_results_single_sig = signature(self._get_results_single)\n        get_results_single_sig_args = [\n            p.name for p in get_results_single_sig.parameters.values()\n        ]\n        assert ('with_nms' in get_results_args) and \\\n               ('with_nms' in get_results_single_sig_args), \\\n               f'{self.__class__.__name__}' \\\n               'does not support test-time augmentation '\n\n        num_imgs = len(aug_batch_img_metas[0])\n        aug_batch_results = []\n        for x, img_metas in zip(aug_batch_feats, aug_batch_img_metas):\n            outs = self.forward(x)\n            batch_instance_results = self.get_results(\n                *outs,\n                img_metas=img_metas,\n                cfg=self.test_cfg,\n                rescale=False,\n                with_nms=with_ori_nms,\n                **kwargs)\n            aug_batch_results.append(batch_instance_results)\n\n        # after merging, bboxes will be rescaled to the original image\n        batch_results = merge_aug_results(aug_batch_results,\n                                          aug_batch_img_metas)\n\n        final_results = []\n        for img_id in range(num_imgs):\n            results = batch_results[img_id]\n            det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores,\n                                                results.labels,\n                                                self.test_cfg.nms)\n            results = results[keep_idxs]\n            # some nms operation may reweight the score such as softnms\n            results.scores = det_bboxes[:, -1]\n            results = results[:self.test_cfg.max_per_img]\n            if rescale:\n                # all results have been mapped to the original scale\n                # in `merge_aug_results`, so just pass\n                pass\n            else:\n                # map to the first aug image scale\n                scale_factor = results.bboxes.new_tensor(\n                    aug_batch_img_metas[0][img_id]['scale_factor'])\n                results.bboxes = \\\n                    results.bboxes * scale_factor\n\n            final_results.append(results)\n\n        return final_results\n"
  },
  {
    "path": "mmdet/models/dense_heads/base_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import List, Tuple, Union\n\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig\nfrom ..utils import unpack_gt_instances\n\n\nclass BaseMaskHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for mask heads used in One-Stage Instance Segmentation.\"\"\"\n\n    def __init__(self, init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n\n    @abstractmethod\n    def loss_by_feat(self, *args, **kwargs):\n        \"\"\"Calculate the loss based on the features extracted by the mask\n        head.\"\"\"\n        pass\n\n    @abstractmethod\n    def predict_by_feat(self, *args, **kwargs):\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\"\"\"\n        pass\n\n    def loss(self,\n             x: Union[List[Tensor], Tuple[Tensor]],\n             batch_data_samples: SampleList,\n             positive_infos: OptInstanceList = None,\n             **kwargs) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the mask head on\n        the features of the upstream network.\n\n        Args:\n            x (list[Tensor] | tuple[Tensor]): Features from FPN.\n                Each has a shape (B, C, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n            positive_infos (list[:obj:`InstanceData`], optional): Information\n                of positive samples. Used when the label assignment is\n                done outside the MaskHead, e.g., BboxHead in\n                YOLACT or CondInst, etc. When the label assignment is done in\n                MaskHead, it would be None, like SOLO or SOLOv2. All values\n                in it should have shape (num_positive_samples, *).\n\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        if positive_infos is None:\n            outs = self(x)\n        else:\n            outs = self(x, positive_infos)\n\n        assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \\\n                                        'even if only one item is returned'\n\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n        for gt_instances, img_metas in zip(batch_gt_instances,\n                                           batch_img_metas):\n            img_shape = img_metas['batch_input_shape']\n            gt_masks = gt_instances.masks.pad(img_shape)\n            gt_instances.masks = gt_masks\n\n        losses = self.loss_by_feat(\n            *outs,\n            batch_gt_instances=batch_gt_instances,\n            batch_img_metas=batch_img_metas,\n            positive_infos=positive_infos,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            **kwargs)\n        return losses\n\n    def predict(self,\n                x: Tuple[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = False,\n                results_list: OptInstanceList = None,\n                **kwargs) -> InstanceList:\n        \"\"\"Test function without test-time augmentation.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n            results_list (list[obj:`InstanceData`], optional): Detection\n                results of each image after the post process. Only exist\n                if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.\n\n        Returns:\n            list[obj:`InstanceData`]: Instance segmentation\n            results of each image after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance,)\n                - labels (Tensor): Has a shape (num_instances,).\n                - masks (Tensor): Processed mask results, has a\n                  shape (num_instances, h, w).\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n        if results_list is None:\n            outs = self(x)\n        else:\n            outs = self(x, results_list)\n\n        results_list = self.predict_by_feat(\n            *outs,\n            batch_img_metas=batch_img_metas,\n            rescale=rescale,\n            results_list=results_list,\n            **kwargs)\n\n        return results_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/boxinst_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine import MessageHub\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import InstanceList\nfrom ..utils.misc import unfold_wo_center\nfrom .condinst_head import CondInstBboxHead, CondInstMaskHead\n\n\n@MODELS.register_module()\nclass BoxInstBboxHead(CondInstBboxHead):\n    \"\"\"BoxInst box head used in https://arxiv.org/abs/2012.02310.\"\"\"\n\n    def __init__(self, *args, **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n\n\n@MODELS.register_module()\nclass BoxInstMaskHead(CondInstMaskHead):\n    \"\"\"BoxInst mask head used in https://arxiv.org/abs/2012.02310.\n\n    This head outputs the mask for BoxInst.\n\n    Args:\n        pairwise_size (dict): The size of neighborhood for each pixel.\n            Defaults to 3.\n        pairwise_dilation (int): The dilation of neighborhood for each pixel.\n            Defaults to 2.\n        warmup_iters (int): Warmup iterations for pair-wise loss.\n            Defaults to 10000.\n    \"\"\"\n\n    def __init__(self,\n                 *arg,\n                 pairwise_size: int = 3,\n                 pairwise_dilation: int = 2,\n                 warmup_iters: int = 10000,\n                 **kwargs) -> None:\n        self.pairwise_size = pairwise_size\n        self.pairwise_dilation = pairwise_dilation\n        self.warmup_iters = warmup_iters\n        super().__init__(*arg, **kwargs)\n\n    def get_pairwise_affinity(self, mask_logits: Tensor) -> Tensor:\n        \"\"\"Compute the pairwise affinity for each pixel.\"\"\"\n        log_fg_prob = F.logsigmoid(mask_logits).unsqueeze(1)\n        log_bg_prob = F.logsigmoid(-mask_logits).unsqueeze(1)\n\n        log_fg_prob_unfold = unfold_wo_center(\n            log_fg_prob,\n            kernel_size=self.pairwise_size,\n            dilation=self.pairwise_dilation)\n        log_bg_prob_unfold = unfold_wo_center(\n            log_bg_prob,\n            kernel_size=self.pairwise_size,\n            dilation=self.pairwise_dilation)\n\n        # the probability of making the same prediction:\n        # p_i * p_j + (1 - p_i) * (1 - p_j)\n        # we compute the the probability in log space\n        # to avoid numerical instability\n        log_same_fg_prob = log_fg_prob[:, :, None] + log_fg_prob_unfold\n        log_same_bg_prob = log_bg_prob[:, :, None] + log_bg_prob_unfold\n\n        # TODO: Figure out the difference between it and directly sum\n        max_ = torch.max(log_same_fg_prob, log_same_bg_prob)\n        log_same_prob = torch.log(\n            torch.exp(log_same_fg_prob - max_) +\n            torch.exp(log_same_bg_prob - max_)) + max_\n\n        return -log_same_prob[:, 0]\n\n    def loss_by_feat(self, mask_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict], positive_infos: InstanceList,\n                     **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mask_preds (list[Tensor]): List of predicted masks, each has\n                shape (num_classes, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``masks``,\n                and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of multiple images.\n            positive_infos (List[:obj:``InstanceData``]): Information of\n                positive samples of each image that are assigned in detection\n                head.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert positive_infos is not None, \\\n            'positive_infos should not be None in `BoxInstMaskHead`'\n        losses = dict()\n\n        loss_mask_project = 0.\n        loss_mask_pairwise = 0.\n        num_imgs = len(mask_preds)\n        total_pos = 0.\n        avg_fatcor = 0.\n\n        for idx in range(num_imgs):\n            (mask_pred, pos_mask_targets, pos_pairwise_masks, num_pos) = \\\n                self._get_targets_single(\n                mask_preds[idx], batch_gt_instances[idx],\n                positive_infos[idx])\n            # mask loss\n            total_pos += num_pos\n            if num_pos == 0 or pos_mask_targets is None:\n                loss_project = mask_pred.new_zeros(1).mean()\n                loss_pairwise = mask_pred.new_zeros(1).mean()\n                avg_fatcor += 0.\n            else:\n                # compute the project term\n                loss_project_x = self.loss_mask(\n                    mask_pred.max(dim=1, keepdim=True)[0],\n                    pos_mask_targets.max(dim=1, keepdim=True)[0],\n                    reduction_override='none').sum()\n                loss_project_y = self.loss_mask(\n                    mask_pred.max(dim=2, keepdim=True)[0],\n                    pos_mask_targets.max(dim=2, keepdim=True)[0],\n                    reduction_override='none').sum()\n                loss_project = loss_project_x + loss_project_y\n                # compute the pairwise term\n                pairwise_affinity = self.get_pairwise_affinity(mask_pred)\n                avg_fatcor += pos_pairwise_masks.sum().clamp(min=1.0)\n                loss_pairwise = (pairwise_affinity * pos_pairwise_masks).sum()\n\n            loss_mask_project += loss_project\n            loss_mask_pairwise += loss_pairwise\n\n        if total_pos == 0:\n            total_pos += 1  # avoid nan\n        if avg_fatcor == 0:\n            avg_fatcor += 1  # avoid nan\n        loss_mask_project = loss_mask_project / total_pos\n        loss_mask_pairwise = loss_mask_pairwise / avg_fatcor\n        message_hub = MessageHub.get_current_instance()\n        iter = message_hub.get_info('iter')\n        warmup_factor = min(iter / float(self.warmup_iters), 1.0)\n        loss_mask_pairwise *= warmup_factor\n\n        losses.update(\n            loss_mask_project=loss_mask_project,\n            loss_mask_pairwise=loss_mask_pairwise)\n        return losses\n\n    def _get_targets_single(self, mask_preds: Tensor,\n                            gt_instances: InstanceData,\n                            positive_info: InstanceData):\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            mask_preds (Tensor): Predicted prototypes with shape\n                (num_classes, H, W).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes``, ``labels``,\n                and ``masks`` attributes.\n            positive_info (:obj:`InstanceData`): Information of positive\n                samples that are assigned in detection head. It usually\n                contains following keys.\n\n                    - pos_assigned_gt_inds (Tensor): Assigner GT indexes of\n                      positive proposals, has shape (num_pos, )\n                    - pos_inds (Tensor): Positive index of image, has\n                      shape (num_pos, ).\n                    - param_pred (Tensor): Positive param preditions\n                      with shape (num_pos, num_params).\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n            - mask_preds (Tensor): Positive predicted mask with shape\n              (num_pos, mask_h, mask_w).\n            - pos_mask_targets (Tensor): Positive mask targets with shape\n              (num_pos, mask_h, mask_w).\n            - pos_pairwise_masks (Tensor): Positive pairwise masks with\n              shape: (num_pos, num_neighborhood, mask_h, mask_w).\n            - num_pos (int): Positive numbers.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        device = gt_bboxes.device\n        # Note that gt_masks are generated by full box\n        # from BoxInstDataPreprocessor\n        gt_masks = gt_instances.masks.to_tensor(\n            dtype=torch.bool, device=device).float()\n        # Note that pairwise_masks are generated by image color similarity\n        # from BoxInstDataPreprocessor\n        pairwise_masks = gt_instances.pairwise_masks\n        pairwise_masks = pairwise_masks.to(device=device)\n\n        # process with mask targets\n        pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds')\n        scores = positive_info.get('scores')\n        centernesses = positive_info.get('centernesses')\n        num_pos = pos_assigned_gt_inds.size(0)\n\n        if gt_masks.size(0) == 0 or num_pos == 0:\n            return mask_preds, None, None, 0\n        # Since we're producing (near) full image masks,\n        # it'd take too much vram to backprop on every single mask.\n        # Thus we select only a subset.\n        if (self.max_masks_to_train != -1) and \\\n           (num_pos > self.max_masks_to_train):\n            perm = torch.randperm(num_pos)\n            select = perm[:self.max_masks_to_train]\n            mask_preds = mask_preds[select]\n            pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n            num_pos = self.max_masks_to_train\n        elif self.topk_masks_per_img != -1:\n            unique_gt_inds = pos_assigned_gt_inds.unique()\n            num_inst_per_gt = max(\n                int(self.topk_masks_per_img / len(unique_gt_inds)), 1)\n\n            keep_mask_preds = []\n            keep_pos_assigned_gt_inds = []\n            for gt_ind in unique_gt_inds:\n                per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind)\n                mask_preds_per_inst = mask_preds[per_inst_pos_inds]\n                gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds]\n                if sum(per_inst_pos_inds) > num_inst_per_gt:\n                    per_inst_scores = scores[per_inst_pos_inds].sigmoid().max(\n                        dim=1)[0]\n                    per_inst_centerness = centernesses[\n                        per_inst_pos_inds].sigmoid().reshape(-1, )\n                    select = (per_inst_scores * per_inst_centerness).topk(\n                        k=num_inst_per_gt, dim=0)[1]\n                    mask_preds_per_inst = mask_preds_per_inst[select]\n                    gt_inds_per_inst = gt_inds_per_inst[select]\n                keep_mask_preds.append(mask_preds_per_inst)\n                keep_pos_assigned_gt_inds.append(gt_inds_per_inst)\n            mask_preds = torch.cat(keep_mask_preds)\n            pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds)\n            num_pos = pos_assigned_gt_inds.size(0)\n\n        # Follow the origin implement\n        start = int(self.mask_out_stride // 2)\n        gt_masks = gt_masks[:, start::self.mask_out_stride,\n                            start::self.mask_out_stride]\n        gt_masks = gt_masks.gt(0.5).float()\n        pos_mask_targets = gt_masks[pos_assigned_gt_inds]\n        pos_pairwise_masks = pairwise_masks[pos_assigned_gt_inds]\n        pos_pairwise_masks = pos_pairwise_masks * pos_mask_targets.unsqueeze(1)\n\n        return (mask_preds, pos_mask_targets, pos_pairwise_masks, num_pos)\n"
  },
  {
    "path": "mmdet/models/dense_heads/cascade_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom __future__ import division\nimport copy\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.ops import DeformConv2d\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule, ModuleList\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig,\n                         OptInstanceList, OptMultiConfig)\nfrom ..task_modules.assigners import RegionAssigner\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import (images_to_levels, multi_apply, select_single_mlvl,\n                     unpack_gt_instances)\nfrom .base_dense_head import BaseDenseHead\nfrom .rpn_head import RPNHead\n\n\nclass AdaptiveConv(BaseModule):\n    \"\"\"AdaptiveConv used to adapt the sampling location with the anchors.\n\n    Args:\n        in_channels (int): Number of channels in the input image.\n        out_channels (int): Number of channels produced by the convolution.\n        kernel_size (int or tuple[int]): Size of the conv kernel.\n            Defaults to 3.\n        stride (int or tuple[int]): Stride of the convolution. Defaults to 1.\n        padding (int or tuple[int]): Zero-padding added to both sides of\n            the input. Defaults to 1.\n        dilation (int or tuple[int]): Spacing between kernel elements.\n            Defaults to 3.\n        groups (int): Number of blocked connections from input channels to\n            output channels. Defaults to 1.\n        bias (bool): If set True, adds a learnable bias to the output.\n            Defaults to False.\n        adapt_type (str): Type of adaptive conv, can be either ``offset``\n            (arbitrary anchors) or 'dilation' (uniform anchor).\n            Defaults to 'dilation'.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \\\n            list[dict]): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        out_channels: int,\n        kernel_size: Union[int, Tuple[int]] = 3,\n        stride: Union[int, Tuple[int]] = 1,\n        padding: Union[int, Tuple[int]] = 1,\n        dilation: Union[int, Tuple[int]] = 3,\n        groups: int = 1,\n        bias: bool = False,\n        adapt_type: str = 'dilation',\n        init_cfg: MultiConfig = dict(\n            type='Normal', std=0.01, override=dict(name='conv'))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert adapt_type in ['offset', 'dilation']\n        self.adapt_type = adapt_type\n\n        assert kernel_size == 3, 'Adaptive conv only supports kernels 3'\n        if self.adapt_type == 'offset':\n            assert stride == 1 and padding == 1 and groups == 1, \\\n                'Adaptive conv offset mode only supports padding: {1}, ' \\\n                f'stride: {1}, groups: {1}'\n            self.conv = DeformConv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=padding,\n                stride=stride,\n                groups=groups,\n                bias=bias)\n        else:\n            self.conv = nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=dilation,\n                dilation=dilation)\n\n    def forward(self, x: Tensor, offset: Tensor) -> Tensor:\n        \"\"\"Forward function.\"\"\"\n        if self.adapt_type == 'offset':\n            N, _, H, W = x.shape\n            assert offset is not None\n            assert H * W == offset.shape[1]\n            # reshape [N, NA, 18] to (N, 18, H, W)\n            offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)\n            offset = offset.contiguous()\n            x = self.conv(x, offset)\n        else:\n            assert offset is None\n            x = self.conv(x)\n        return x\n\n\n@MODELS.register_module()\nclass StageCascadeRPNHead(RPNHead):\n    \"\"\"Stage of CascadeRPNHead.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        anchor_generator (:obj:`ConfigDict` or dict): anchor generator config.\n        adapt_cfg (:obj:`ConfigDict` or dict): adaptation config.\n        bridged_feature (bool): whether update rpn feature. Defaults to False.\n        with_cls (bool): whether use classification branch. Defaults to True.\n        init_cfg :obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 anchor_generator: ConfigType = dict(\n                     type='AnchorGenerator',\n                     scales=[8],\n                     ratios=[1.0],\n                     strides=[4, 8, 16, 32, 64]),\n                 adapt_cfg: ConfigType = dict(type='dilation', dilation=3),\n                 bridged_feature: bool = False,\n                 with_cls: bool = True,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        self.with_cls = with_cls\n        self.anchor_strides = anchor_generator['strides']\n        self.anchor_scales = anchor_generator['scales']\n        self.bridged_feature = bridged_feature\n        self.adapt_cfg = adapt_cfg\n        super().__init__(\n            in_channels=in_channels,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        # override sampling and sampler\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            # use PseudoSampler when sampling is False\n            if self.train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler(context=self)\n\n        if init_cfg is None:\n            self.init_cfg = dict(\n                type='Normal', std=0.01, override=[dict(name='rpn_reg')])\n            if self.with_cls:\n                self.init_cfg['override'].append(dict(name='rpn_cls'))\n\n    def _init_layers(self) -> None:\n        \"\"\"Init layers of a CascadeRPN stage.\"\"\"\n        adapt_cfg = copy.deepcopy(self.adapt_cfg)\n        adapt_cfg['adapt_type'] = adapt_cfg.pop('type')\n        self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,\n                                     **adapt_cfg)\n        if self.with_cls:\n            self.rpn_cls = nn.Conv2d(self.feat_channels,\n                                     self.num_anchors * self.cls_out_channels,\n                                     1)\n        self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward_single(self, x: Tensor, offset: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward function of single scale.\"\"\"\n        bridged_x = x\n        x = self.relu(self.rpn_conv(x, offset))\n        if self.bridged_feature:\n            bridged_x = x  # update feature\n        cls_score = self.rpn_cls(x) if self.with_cls else None\n        bbox_pred = self.rpn_reg(x)\n        return bridged_x, cls_score, bbox_pred\n\n    def forward(\n            self,\n            feats: List[Tensor],\n            offset_list: Optional[List[Tensor]] = None) -> Tuple[List[Tensor]]:\n        \"\"\"Forward function.\"\"\"\n        if offset_list is None:\n            offset_list = [None for _ in range(len(feats))]\n        return multi_apply(self.forward_single, feats, offset_list)\n\n    def _region_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor,\n                               gt_instances: InstanceData, img_meta: dict,\n                               gt_instances_ignore: InstanceData,\n                               featmap_sizes: List[Tuple[int, int]],\n                               num_level_anchors: List[int]) -> tuple:\n        \"\"\"Get anchor targets based on region for single level.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors, 4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors, ).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            featmap_sizes (list[Tuple[int, int]]): Feature map size each level.\n            num_level_anchors (list[int]): The number of anchors in each level.\n\n        Returns:\n            tuple:\n\n                - labels (Tensor): Labels of each level.\n                - label_weights (Tensor): Label weights of each level.\n                - bbox_targets (Tensor): BBox targets of each level.\n                - bbox_weights (Tensor): BBox weights of each level.\n                - pos_inds (Tensor): positive samples indexes.\n                - neg_inds (Tensor): negative samples indexes.\n                - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        pred_instances = InstanceData()\n        pred_instances.priors = flat_anchors\n        pred_instances.valid_flags = valid_flags\n\n        assign_result = self.assigner.assign(\n            pred_instances,\n            gt_instances,\n            img_meta,\n            featmap_sizes,\n            num_level_anchors,\n            self.anchor_scales[0],\n            self.anchor_strides,\n            gt_instances_ignore=gt_instances_ignore,\n            allowed_border=self.train_cfg['allowed_border'])\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_anchors = flat_anchors.shape[0]\n        bbox_targets = torch.zeros_like(flat_anchors)\n        bbox_weights = torch.zeros_like(flat_anchors)\n        labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)\n        label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            else:\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds, sampling_result)\n\n    def region_targets(\n        self,\n        anchor_list: List[List[Tensor]],\n        valid_flag_list: List[List[Tensor]],\n        featmap_sizes: List[Tuple[int, int]],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None,\n        return_sampling_results: bool = False,\n    ) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors when using\n        RegionAssigner.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image.\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image.\n            featmap_sizes (list[Tuple[int, int]]): Feature map size each level.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            tuple:\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - avg_factor (int): Average factor that is used to average\n                  the loss. When using sampling method, avg_factor is usually\n                  the sum of positive and negative priors. When using\n                  ``PseudoSampler``, ``avg_factor`` is usually equal to the\n                  number of positive priors.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors to a single tensor\n        concat_anchor_list = []\n        concat_valid_flag_list = []\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n            concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,\n         pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply(\n             self._region_targets_single,\n             concat_anchor_list,\n             concat_valid_flag_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             featmap_sizes=featmap_sizes,\n             num_level_anchors=num_level_anchors)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        res = (labels_list, label_weights_list, bbox_targets_list,\n               bbox_weights_list, avg_factor)\n        if return_sampling_results:\n            res = res + (sampling_results_list, )\n        return res\n\n    def get_targets(\n        self,\n        anchor_list: List[List[Tensor]],\n        valid_flag_list: List[List[Tensor]],\n        featmap_sizes: List[Tuple[int, int]],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None,\n        return_sampling_results: bool = False,\n    ) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image.\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image.\n            featmap_sizes (list[Tuple[int, int]]): Feature map size each level.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            return_sampling_results (bool): Whether to return the sampling\n                results. Defaults to False.\n\n        Returns:\n            tuple:\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - avg_factor (int): Average factor that is used to average\n                  the loss. When using sampling method, avg_factor is usually\n                  the sum of positive and negative priors. When using\n                  ``PseudoSampler``, ``avg_factor`` is usually equal to the\n                  number of positive priors.\n        \"\"\"\n        if isinstance(self.assigner, RegionAssigner):\n            cls_reg_targets = self.region_targets(\n                anchor_list,\n                valid_flag_list,\n                featmap_sizes,\n                batch_gt_instances,\n                batch_img_metas,\n                batch_gt_instances_ignore=batch_gt_instances_ignore,\n                return_sampling_results=return_sampling_results)\n        else:\n            cls_reg_targets = super().get_targets(\n                anchor_list,\n                valid_flag_list,\n                batch_gt_instances,\n                batch_img_metas,\n                batch_gt_instances_ignore=batch_gt_instances_ignore,\n                return_sampling_results=return_sampling_results)\n        return cls_reg_targets\n\n    def anchor_offset(self, anchor_list: List[List[Tensor]],\n                      anchor_strides: List[int],\n                      featmap_sizes: List[Tuple[int, int]]) -> List[Tensor]:\n        \"\"\" Get offset for deformable conv based on anchor shape\n        NOTE: currently support deformable kernel_size=3 and dilation=1\n\n        Args:\n            anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of\n                multi-level anchors\n            anchor_strides (list[int]): anchor stride of each level\n\n        Returns:\n            list[tensor]: offset of DeformConv kernel with shapes of\n            [NLVL, NA, 2, 18].\n        \"\"\"\n\n        def _shape_offset(anchors, stride, ks=3, dilation=1):\n            # currently support kernel_size=3 and dilation=1\n            assert ks == 3 and dilation == 1\n            pad = (ks - 1) // 2\n            idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)\n            yy, xx = torch.meshgrid(idx, idx)  # return order matters\n            xx = xx.reshape(-1)\n            yy = yy.reshape(-1)\n            w = (anchors[:, 2] - anchors[:, 0]) / stride\n            h = (anchors[:, 3] - anchors[:, 1]) / stride\n            w = w / (ks - 1) - dilation\n            h = h / (ks - 1) - dilation\n            offset_x = w[:, None] * xx  # (NA, ks**2)\n            offset_y = h[:, None] * yy  # (NA, ks**2)\n            return offset_x, offset_y\n\n        def _ctr_offset(anchors, stride, featmap_size):\n            feat_h, feat_w = featmap_size\n            assert len(anchors) == feat_h * feat_w\n\n            x = (anchors[:, 0] + anchors[:, 2]) * 0.5\n            y = (anchors[:, 1] + anchors[:, 3]) * 0.5\n            # compute centers on feature map\n            x = x / stride\n            y = y / stride\n            # compute predefine centers\n            xx = torch.arange(0, feat_w, device=anchors.device)\n            yy = torch.arange(0, feat_h, device=anchors.device)\n            yy, xx = torch.meshgrid(yy, xx)\n            xx = xx.reshape(-1).type_as(x)\n            yy = yy.reshape(-1).type_as(y)\n\n            offset_x = x - xx  # (NA, )\n            offset_y = y - yy  # (NA, )\n            return offset_x, offset_y\n\n        num_imgs = len(anchor_list)\n        num_lvls = len(anchor_list[0])\n        dtype = anchor_list[0][0].dtype\n        device = anchor_list[0][0].device\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        offset_list = []\n        for i in range(num_imgs):\n            mlvl_offset = []\n            for lvl in range(num_lvls):\n                c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],\n                                                     anchor_strides[lvl],\n                                                     featmap_sizes[lvl])\n                s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],\n                                                       anchor_strides[lvl])\n\n                # offset = ctr_offset + shape_offset\n                offset_x = s_offset_x + c_offset_x[:, None]\n                offset_y = s_offset_y + c_offset_y[:, None]\n\n                # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)\n                offset = torch.stack([offset_y, offset_x], dim=-1)\n                offset = offset.reshape(offset.size(0), -1)  # [NA, 2*ks**2]\n                mlvl_offset.append(offset)\n            offset_list.append(torch.cat(mlvl_offset))  # [totalNA, 2*ks**2]\n        offset_list = images_to_levels(offset_list, num_level_anchors)\n        return offset_list\n\n    def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                            anchors: Tensor, labels: Tensor,\n                            label_weights: Tensor, bbox_targets: Tensor,\n                            bbox_weights: Tensor, avg_factor: int) -> tuple:\n        \"\"\"Loss function on single scale.\"\"\"\n        # classification loss\n        if self.with_cls:\n            labels = labels.reshape(-1)\n            label_weights = label_weights.reshape(-1)\n            cls_score = cls_score.permute(0, 2, 3,\n                                          1).reshape(-1, self.cls_out_channels)\n            loss_cls = self.loss_cls(\n                cls_score, labels, label_weights, avg_factor=avg_factor)\n        # regression loss\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        bbox_weights = bbox_weights.reshape(-1, 4)\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            anchors = anchors.reshape(-1, 4)\n            bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)\n        loss_reg = self.loss_bbox(\n            bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor)\n        if self.with_cls:\n            return loss_cls, loss_reg\n        return None, loss_reg\n\n    def loss_by_feat(\n        self,\n        anchor_list: List[List[Tensor]],\n        valid_flag_list: List[List[Tensor]],\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image.\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            featmap_sizes,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            return_sampling_results=True)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor, sampling_results_list) = cls_reg_targets\n        if not sampling_results_list[0].avg_factor_with_neg:\n            # 200 is hard-coded average factor,\n            # which follows guided anchoring.\n            avg_factor = sum([label.numel() for label in labels_list]) / 200.0\n\n        # change per image, per level anchor_list to per_level, per_image\n        mlvl_anchor_list = list(zip(*anchor_list))\n        # concat mlvl_anchor_list\n        mlvl_anchor_list = [\n            torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list\n        ]\n\n        losses = multi_apply(\n            self.loss_by_feat_single,\n            cls_scores,\n            bbox_preds,\n            mlvl_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            avg_factor=avg_factor)\n        if self.with_cls:\n            return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])\n        return dict(loss_rpn_reg=losses[1])\n\n    def predict_by_feat(self,\n                        anchor_list: List[List[Tensor]],\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        batch_img_metas: List[dict],\n                        cfg: Optional[ConfigDict] = None,\n                        rescale: bool = False) -> InstanceList:\n        \"\"\"Get proposal predict. Overriding to enable input ``anchor_list``\n        from outside.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image.\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            batch_img_metas (list[dict], Optional): Image meta info.\n            cfg (:obj:`ConfigDict`, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_score_list = select_single_mlvl(cls_scores, img_id)\n            bbox_pred_list = select_single_mlvl(bbox_preds, img_id)\n            proposals = self._predict_by_feat_single(\n                cls_scores=cls_score_list,\n                bbox_preds=bbox_pred_list,\n                mlvl_anchors=anchor_list[img_id],\n                img_meta=batch_img_metas[img_id],\n                cfg=cfg,\n                rescale=rescale)\n            result_list.append(proposals)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_scores: List[Tensor],\n                                bbox_preds: List[Tensor],\n                                mlvl_anchors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False) -> InstanceData:\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has\n                shape (num_anchors * 4, H, W).\n            mlvl_anchors (list[Tensor]): Box reference from all scale\n                levels of a single image, each item has shape\n                (num_total_anchors, 4).\n            img_shape (tuple[int]): Shape of the input image,\n                (height, width, 3).\n            scale_factor (ndarray): Scale factor of the image arange as\n                (w_scale, h_scale, w_scale, h_scale).\n            cfg (:obj:`ConfigDict`): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        # bboxes from different level should be independent during NMS,\n        # level_ids are used as labels for batched NMS to separate them\n        level_ids = []\n        mlvl_scores = []\n        mlvl_bbox_preds = []\n        mlvl_valid_anchors = []\n        nms_pre = cfg.get('nms_pre', -1)\n        for idx in range(len(cls_scores)):\n            rpn_cls_score = cls_scores[idx]\n            rpn_bbox_pred = bbox_preds[idx]\n            assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]\n            rpn_cls_score = rpn_cls_score.permute(1, 2, 0)\n            if self.use_sigmoid_cls:\n                rpn_cls_score = rpn_cls_score.reshape(-1)\n                scores = rpn_cls_score.sigmoid()\n            else:\n                rpn_cls_score = rpn_cls_score.reshape(-1, 2)\n                # We set FG labels to [0, num_class-1] and BG label to\n                # num_class in RPN head since mmdet v2.5, which is unified to\n                # be consistent with other head since mmdet v2.0. In mmdet v2.0\n                # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.\n                scores = rpn_cls_score.softmax(dim=1)[:, 0]\n            rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            anchors = mlvl_anchors[idx]\n\n            if 0 < nms_pre < scores.shape[0]:\n                # sort is faster than topk\n                # _, topk_inds = scores.topk(cfg.nms_pre)\n                ranked_scores, rank_inds = scores.sort(descending=True)\n                topk_inds = rank_inds[:nms_pre]\n                scores = ranked_scores[:nms_pre]\n                rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]\n                anchors = anchors[topk_inds, :]\n            mlvl_scores.append(scores)\n            mlvl_bbox_preds.append(rpn_bbox_pred)\n            mlvl_valid_anchors.append(anchors)\n            level_ids.append(\n                scores.new_full((scores.size(0), ), idx, dtype=torch.long))\n\n        anchors = torch.cat(mlvl_valid_anchors)\n        rpn_bbox_pred = torch.cat(mlvl_bbox_preds)\n        bboxes = self.bbox_coder.decode(\n            anchors, rpn_bbox_pred, max_shape=img_meta['img_shape'])\n\n        proposals = InstanceData()\n        proposals.bboxes = bboxes\n        proposals.scores = torch.cat(mlvl_scores)\n        proposals.level_ids = torch.cat(level_ids)\n\n        return self._bbox_post_process(\n            results=proposals, cfg=cfg, rescale=rescale, img_meta=img_meta)\n\n    def refine_bboxes(self, anchor_list: List[List[Tensor]],\n                      bbox_preds: List[Tensor],\n                      img_metas: List[dict]) -> List[List[Tensor]]:\n        \"\"\"Refine bboxes through stages.\"\"\"\n        num_levels = len(bbox_preds)\n        new_anchor_list = []\n        for img_id in range(len(img_metas)):\n            mlvl_anchors = []\n            for i in range(num_levels):\n                bbox_pred = bbox_preds[i][img_id].detach()\n                bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n                img_shape = img_metas[img_id]['img_shape']\n                bboxes = self.bbox_coder.decode(anchor_list[img_id][i],\n                                                bbox_pred, img_shape)\n                mlvl_anchors.append(bboxes)\n            new_anchor_list.append(mlvl_anchors)\n        return new_anchor_list\n\n    def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, _, batch_img_metas = outputs\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        if self.adapt_cfg['type'] == 'offset':\n            offset_list = self.anchor_offset(anchor_list, self.anchor_strides,\n                                             featmap_sizes)\n        else:\n            offset_list = None\n\n        x, cls_score, bbox_pred = self(x, offset_list)\n        rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred,\n                           batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*rpn_loss_inputs)\n\n        return losses\n\n    def loss_and_predict(\n        self,\n        x: Tuple[Tensor],\n        batch_data_samples: SampleList,\n        proposal_cfg: Optional[ConfigDict] = None,\n    ) -> Tuple[dict, InstanceList]:\n        \"\"\"Perform forward propagation of the head, then calculate loss and\n        predictions from the features and data samples.\n\n        Args:\n            x (tuple[Tensor]): Features from FPN.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n            proposal_cfg (:obj`ConfigDict`, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n\n        Returns:\n            tuple: the return value is a tuple contains:\n\n                - losses: (dict[str, Tensor]): A dictionary of loss components.\n                - predictions (list[:obj:`InstanceData`]): Detection\n                  results of each image after the post process.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, _, batch_img_metas = outputs\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        if self.adapt_cfg['type'] == 'offset':\n            offset_list = self.anchor_offset(anchor_list, self.anchor_strides,\n                                             featmap_sizes)\n        else:\n            offset_list = None\n\n        x, cls_score, bbox_pred = self(x, offset_list)\n        rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred,\n                           batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*rpn_loss_inputs)\n\n        predictions = self.predict_by_feat(\n            anchor_list,\n            cls_score,\n            bbox_pred,\n            batch_img_metas=batch_img_metas,\n            cfg=proposal_cfg)\n        return losses, predictions\n\n    def predict(self,\n                x: Tuple[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, _ = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        if self.adapt_cfg['type'] == 'offset':\n            offset_list = self.anchor_offset(anchor_list, self.anchor_strides,\n                                             featmap_sizes)\n        else:\n            offset_list = None\n\n        x, cls_score, bbox_pred = self(x, offset_list)\n        predictions = self.stages[-1].predict_by_feat(\n            anchor_list,\n            cls_score,\n            bbox_pred,\n            batch_img_metas=batch_img_metas,\n            rescale=rescale)\n        return predictions\n\n\n@MODELS.register_module()\nclass CascadeRPNHead(BaseDenseHead):\n    \"\"\"The CascadeRPNHead will predict more accurate region proposals, which is\n    required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN\n    consists of a sequence of RPNStage to progressively improve the accuracy of\n    the detected proposals.\n\n    More details can be found in ``https://arxiv.org/abs/1909.06720``.\n\n    Args:\n        num_stages (int): number of CascadeRPN stages.\n        stages (list[:obj:`ConfigDict` or dict]): list of configs to build\n            the stages.\n        train_cfg (list[:obj:`ConfigDict` or dict]): list of configs at\n            training time each stage.\n        test_cfg (:obj:`ConfigDict` or dict): config at testing time.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \\\n            list[dict]): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 num_stages: int,\n                 stages: List[ConfigType],\n                 train_cfg: List[ConfigType],\n                 test_cfg: ConfigType,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert num_classes == 1, 'Only support num_classes == 1'\n        assert num_stages == len(stages)\n        self.num_stages = num_stages\n        # Be careful! Pretrained weights cannot be loaded when use\n        # nn.ModuleList\n        self.stages = ModuleList()\n        for i in range(len(stages)):\n            train_cfg_i = train_cfg[i] if train_cfg is not None else None\n            stages[i].update(train_cfg=train_cfg_i)\n            stages[i].update(test_cfg=test_cfg)\n            self.stages.append(MODELS.build(stages[i]))\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def loss_by_feat(self):\n        \"\"\"loss_by_feat() is implemented in StageCascadeRPNHead.\"\"\"\n        pass\n\n    def predict_by_feat(self):\n        \"\"\"predict_by_feat() is implemented in StageCascadeRPNHead.\"\"\"\n        pass\n\n    def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, _, batch_img_metas = outputs\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, valid_flag_list = self.stages[0].get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        losses = dict()\n\n        for i in range(self.num_stages):\n            stage = self.stages[i]\n\n            if stage.adapt_cfg['type'] == 'offset':\n                offset_list = stage.anchor_offset(anchor_list,\n                                                  stage.anchor_strides,\n                                                  featmap_sizes)\n            else:\n                offset_list = None\n            x, cls_score, bbox_pred = stage(x, offset_list)\n            rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,\n                               bbox_pred, batch_gt_instances, batch_img_metas)\n            stage_loss = stage.loss_by_feat(*rpn_loss_inputs)\n            for name, value in stage_loss.items():\n                losses['s{}.{}'.format(i, name)] = value\n\n            # refine boxes\n            if i < self.num_stages - 1:\n                anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,\n                                                  batch_img_metas)\n\n        return losses\n\n    def loss_and_predict(\n        self,\n        x: Tuple[Tensor],\n        batch_data_samples: SampleList,\n        proposal_cfg: Optional[ConfigDict] = None,\n    ) -> Tuple[dict, InstanceList]:\n        \"\"\"Perform forward propagation of the head, then calculate loss and\n        predictions from the features and data samples.\n\n        Args:\n            x (tuple[Tensor]): Features from FPN.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n            proposal_cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n\n        Returns:\n            tuple: the return value is a tuple contains:\n\n                - losses: (dict[str, Tensor]): A dictionary of loss components.\n                - predictions (list[:obj:`InstanceData`]): Detection\n                  results of each image after the post process.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, _, batch_img_metas = outputs\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, valid_flag_list = self.stages[0].get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        losses = dict()\n\n        for i in range(self.num_stages):\n            stage = self.stages[i]\n\n            if stage.adapt_cfg['type'] == 'offset':\n                offset_list = stage.anchor_offset(anchor_list,\n                                                  stage.anchor_strides,\n                                                  featmap_sizes)\n            else:\n                offset_list = None\n            x, cls_score, bbox_pred = stage(x, offset_list)\n            rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,\n                               bbox_pred, batch_gt_instances, batch_img_metas)\n            stage_loss = stage.loss_by_feat(*rpn_loss_inputs)\n            for name, value in stage_loss.items():\n                losses['s{}.{}'.format(i, name)] = value\n\n            # refine boxes\n            if i < self.num_stages - 1:\n                anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,\n                                                  batch_img_metas)\n\n        predictions = self.stages[-1].predict_by_feat(\n            anchor_list,\n            cls_score,\n            bbox_pred,\n            batch_img_metas=batch_img_metas,\n            cfg=proposal_cfg)\n        return losses, predictions\n\n    def predict(self,\n                x: Tuple[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, _ = self.stages[0].get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        for i in range(self.num_stages):\n            stage = self.stages[i]\n            if stage.adapt_cfg['type'] == 'offset':\n                offset_list = stage.anchor_offset(anchor_list,\n                                                  stage.anchor_strides,\n                                                  featmap_sizes)\n            else:\n                offset_list = None\n            x, cls_score, bbox_pred = stage(x, offset_list)\n            if i < self.num_stages - 1:\n                anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,\n                                                  batch_img_metas)\n\n        predictions = self.stages[-1].predict_by_feat(\n            anchor_list,\n            cls_score,\n            bbox_pred,\n            batch_img_metas=batch_img_metas,\n            rescale=rescale)\n        return predictions\n"
  },
  {
    "path": "mmdet/models/dense_heads/centernet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.ops import batched_nms\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import bias_init_with_prob, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, OptMultiConfig)\nfrom ..utils import (gaussian_radius, gen_gaussian_target, get_local_maximum,\n                     get_topk_from_heatmap, multi_apply,\n                     transpose_and_gather_feat)\nfrom .base_dense_head import BaseDenseHead\n\n\n@MODELS.register_module()\nclass CenterNetHead(BaseDenseHead):\n    \"\"\"Objects as Points Head. CenterHead use center_point to indicate object's\n    position. Paper link <https://arxiv.org/abs/1904.07850>\n\n    Args:\n        in_channels (int): Number of channel in the input feature map.\n        feat_channels (int): Number of channel in the intermediate feature map.\n        num_classes (int): Number of categories excluding the background\n            category.\n        loss_center_heatmap (:obj:`ConfigDict` or dict): Config of center\n            heatmap loss. Defaults to\n            dict(type='GaussianFocalLoss', loss_weight=1.0)\n        loss_wh (:obj:`ConfigDict` or dict): Config of wh loss. Defaults to\n             dict(type='L1Loss', loss_weight=0.1).\n        loss_offset (:obj:`ConfigDict` or dict): Config of offset loss.\n            Defaults to dict(type='L1Loss', loss_weight=1.0).\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config.\n            Useless in CenterNet, but we keep this variable for\n            SingleStageDetector.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config\n            of CenterNet.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`], optional): Initialization\n            config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 feat_channels: int,\n                 num_classes: int,\n                 loss_center_heatmap: ConfigType = dict(\n                     type='GaussianFocalLoss', loss_weight=1.0),\n                 loss_wh: ConfigType = dict(type='L1Loss', loss_weight=0.1),\n                 loss_offset: ConfigType = dict(\n                     type='L1Loss', loss_weight=1.0),\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.heatmap_head = self._build_head(in_channels, feat_channels,\n                                             num_classes)\n        self.wh_head = self._build_head(in_channels, feat_channels, 2)\n        self.offset_head = self._build_head(in_channels, feat_channels, 2)\n\n        self.loss_center_heatmap = MODELS.build(loss_center_heatmap)\n        self.loss_wh = MODELS.build(loss_wh)\n        self.loss_offset = MODELS.build(loss_offset)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.fp16_enabled = False\n\n    def _build_head(self, in_channels: int, feat_channels: int,\n                    out_channels: int) -> nn.Sequential:\n        \"\"\"Build head for each branch.\"\"\"\n        layer = nn.Sequential(\n            nn.Conv2d(in_channels, feat_channels, kernel_size=3, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(feat_channels, out_channels, kernel_size=1))\n        return layer\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        bias_init = bias_init_with_prob(0.1)\n        self.heatmap_head[-1].bias.data.fill_(bias_init)\n        for head in [self.wh_head, self.offset_head]:\n            for m in head.modules():\n                if isinstance(m, nn.Conv2d):\n                    normal_init(m, std=0.001)\n\n    def forward(self, x: Tuple[Tensor, ...]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features. Notice CenterNet head does not use FPN.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            center_heatmap_preds (list[Tensor]): center predict heatmaps for\n                all levels, the channels number is num_classes.\n            wh_preds (list[Tensor]): wh predicts for all levels, the channels\n                number is 2.\n            offset_preds (list[Tensor]): offset predicts for all levels, the\n               channels number is 2.\n        \"\"\"\n        return multi_apply(self.forward_single, x)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]:\n        \"\"\"Forward feature of a single level.\n\n        Args:\n            x (Tensor): Feature of a single level.\n\n        Returns:\n            center_heatmap_pred (Tensor): center predict heatmaps, the\n               channels number is num_classes.\n            wh_pred (Tensor): wh predicts, the channels number is 2.\n            offset_pred (Tensor): offset predicts, the channels number is 2.\n        \"\"\"\n        center_heatmap_pred = self.heatmap_head(x).sigmoid()\n        wh_pred = self.wh_head(x)\n        offset_pred = self.offset_head(x)\n        return center_heatmap_pred, wh_pred, offset_pred\n\n    def loss_by_feat(\n            self,\n            center_heatmap_preds: List[Tensor],\n            wh_preds: List[Tensor],\n            offset_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            center_heatmap_preds (list[Tensor]): center predict heatmaps for\n               all levels with shape (B, num_classes, H, W).\n            wh_preds (list[Tensor]): wh predicts for all levels with\n               shape (B, 2, H, W).\n            offset_preds (list[Tensor]): offset predicts for all levels\n               with shape (B, 2, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: which has components below:\n                - loss_center_heatmap (Tensor): loss of center heatmap.\n                - loss_wh (Tensor): loss of hw heatmap\n                - loss_offset (Tensor): loss of offset heatmap.\n        \"\"\"\n        assert len(center_heatmap_preds) == len(wh_preds) == len(\n            offset_preds) == 1\n        center_heatmap_pred = center_heatmap_preds[0]\n        wh_pred = wh_preds[0]\n        offset_pred = offset_preds[0]\n\n        gt_bboxes = [\n            gt_instances.bboxes for gt_instances in batch_gt_instances\n        ]\n        gt_labels = [\n            gt_instances.labels for gt_instances in batch_gt_instances\n        ]\n        img_shape = batch_img_metas[0]['batch_input_shape']\n        target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels,\n                                                     center_heatmap_pred.shape,\n                                                     img_shape)\n\n        center_heatmap_target = target_result['center_heatmap_target']\n        wh_target = target_result['wh_target']\n        offset_target = target_result['offset_target']\n        wh_offset_target_weight = target_result['wh_offset_target_weight']\n\n        # Since the channel of wh_target and offset_target is 2, the avg_factor\n        # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset.\n        loss_center_heatmap = self.loss_center_heatmap(\n            center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor)\n        loss_wh = self.loss_wh(\n            wh_pred,\n            wh_target,\n            wh_offset_target_weight,\n            avg_factor=avg_factor * 2)\n        loss_offset = self.loss_offset(\n            offset_pred,\n            offset_target,\n            wh_offset_target_weight,\n            avg_factor=avg_factor * 2)\n        return dict(\n            loss_center_heatmap=loss_center_heatmap,\n            loss_wh=loss_wh,\n            loss_offset=loss_offset)\n\n    def get_targets(self, gt_bboxes: List[Tensor], gt_labels: List[Tensor],\n                    feat_shape: tuple, img_shape: tuple) -> Tuple[dict, int]:\n        \"\"\"Compute regression and classification targets in multiple images.\n\n        Args:\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box.\n            feat_shape (tuple): feature map shape with value [B, _, H, W]\n            img_shape (tuple): image shape.\n\n        Returns:\n            tuple[dict, float]: The float value is mean avg_factor, the dict\n            has components below:\n               - center_heatmap_target (Tensor): targets of center heatmap, \\\n                   shape (B, num_classes, H, W).\n               - wh_target (Tensor): targets of wh predict, shape \\\n                   (B, 2, H, W).\n               - offset_target (Tensor): targets of offset predict, shape \\\n                   (B, 2, H, W).\n               - wh_offset_target_weight (Tensor): weights of wh and offset \\\n                   predict, shape (B, 2, H, W).\n        \"\"\"\n        img_h, img_w = img_shape[:2]\n        bs, _, feat_h, feat_w = feat_shape\n\n        width_ratio = float(feat_w / img_w)\n        height_ratio = float(feat_h / img_h)\n\n        center_heatmap_target = gt_bboxes[-1].new_zeros(\n            [bs, self.num_classes, feat_h, feat_w])\n        wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w])\n        offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w])\n        wh_offset_target_weight = gt_bboxes[-1].new_zeros(\n            [bs, 2, feat_h, feat_w])\n\n        for batch_id in range(bs):\n            gt_bbox = gt_bboxes[batch_id]\n            gt_label = gt_labels[batch_id]\n            center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2\n            center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2\n            gt_centers = torch.cat((center_x, center_y), dim=1)\n\n            for j, ct in enumerate(gt_centers):\n                ctx_int, cty_int = ct.int()\n                ctx, cty = ct\n                scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio\n                scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio\n                radius = gaussian_radius([scale_box_h, scale_box_w],\n                                         min_overlap=0.3)\n                radius = max(0, int(radius))\n                ind = gt_label[j]\n                gen_gaussian_target(center_heatmap_target[batch_id, ind],\n                                    [ctx_int, cty_int], radius)\n\n                wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w\n                wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h\n\n                offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int\n                offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int\n\n                wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1\n\n        avg_factor = max(1, center_heatmap_target.eq(1).sum())\n        target_result = dict(\n            center_heatmap_target=center_heatmap_target,\n            wh_target=wh_target,\n            offset_target=offset_target,\n            wh_offset_target_weight=wh_offset_target_weight)\n        return target_result, avg_factor\n\n    def predict_by_feat(self,\n                        center_heatmap_preds: List[Tensor],\n                        wh_preds: List[Tensor],\n                        offset_preds: List[Tensor],\n                        batch_img_metas: Optional[List[dict]] = None,\n                        rescale: bool = True,\n                        with_nms: bool = False) -> InstanceList:\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            center_heatmap_preds (list[Tensor]): Center predict heatmaps for\n                all levels with shape (B, num_classes, H, W).\n            wh_preds (list[Tensor]): WH predicts for all levels with\n                shape (B, 2, H, W).\n            offset_preds (list[Tensor]): Offset predicts for all levels\n                with shape (B, 2, H, W).\n            batch_img_metas (list[dict], optional): Batch image meta info.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to True.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Instance segmentation\n            results of each image after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(center_heatmap_preds) == len(wh_preds) == len(\n            offset_preds) == 1\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            result_list.append(\n                self._predict_by_feat_single(\n                    center_heatmap_preds[0][img_id:img_id + 1, ...],\n                    wh_preds[0][img_id:img_id + 1, ...],\n                    offset_preds[0][img_id:img_id + 1, ...],\n                    batch_img_metas[img_id],\n                    rescale=rescale,\n                    with_nms=with_nms))\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                center_heatmap_pred: Tensor,\n                                wh_pred: Tensor,\n                                offset_pred: Tensor,\n                                img_meta: dict,\n                                rescale: bool = True,\n                                with_nms: bool = False) -> InstanceData:\n        \"\"\"Transform outputs of a single image into bbox results.\n\n        Args:\n            center_heatmap_pred (Tensor): Center heatmap for current level with\n                shape (1, num_classes, H, W).\n            wh_pred (Tensor): WH heatmap for current level with shape\n                (1, num_classes, H, W).\n            offset_pred (Tensor): Offset for current level with shape\n                (1, corner_offset_channels, H, W).\n            img_meta (dict): Meta information of current image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to True.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to False.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        batch_det_bboxes, batch_labels = self._decode_heatmap(\n            center_heatmap_pred,\n            wh_pred,\n            offset_pred,\n            img_meta['batch_input_shape'],\n            k=self.test_cfg.topk,\n            kernel=self.test_cfg.local_maximum_kernel)\n\n        det_bboxes = batch_det_bboxes.view([-1, 5])\n        det_labels = batch_labels.view(-1)\n\n        batch_border = det_bboxes.new_tensor(img_meta['border'])[...,\n                                                                 [2, 0, 2, 0]]\n        det_bboxes[..., :4] -= batch_border\n\n        if rescale and 'scale_factor' in img_meta:\n            det_bboxes[..., :4] /= det_bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n\n        if with_nms:\n            det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels,\n                                                      self.test_cfg)\n        results = InstanceData()\n        results.bboxes = det_bboxes[..., :4]\n        results.scores = det_bboxes[..., 4]\n        results.labels = det_labels\n        return results\n\n    def _decode_heatmap(self,\n                        center_heatmap_pred: Tensor,\n                        wh_pred: Tensor,\n                        offset_pred: Tensor,\n                        img_shape: tuple,\n                        k: int = 100,\n                        kernel: int = 3) -> Tuple[Tensor, Tensor]:\n        \"\"\"Transform outputs into detections raw bbox prediction.\n\n        Args:\n            center_heatmap_pred (Tensor): center predict heatmap,\n               shape (B, num_classes, H, W).\n            wh_pred (Tensor): wh predict, shape (B, 2, H, W).\n            offset_pred (Tensor): offset predict, shape (B, 2, H, W).\n            img_shape (tuple): image shape in hw format.\n            k (int): Get top k center keypoints from heatmap. Defaults to 100.\n            kernel (int): Max pooling kernel for extract local maximum pixels.\n               Defaults to 3.\n\n        Returns:\n            tuple[Tensor]: Decoded output of CenterNetHead, containing\n               the following Tensors:\n\n              - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5)\n              - batch_topk_labels (Tensor): Categories of each box with \\\n                  shape (B, k)\n        \"\"\"\n        height, width = center_heatmap_pred.shape[2:]\n        inp_h, inp_w = img_shape\n\n        center_heatmap_pred = get_local_maximum(\n            center_heatmap_pred, kernel=kernel)\n\n        *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap(\n            center_heatmap_pred, k=k)\n        batch_scores, batch_index, batch_topk_labels = batch_dets\n\n        wh = transpose_and_gather_feat(wh_pred, batch_index)\n        offset = transpose_and_gather_feat(offset_pred, batch_index)\n        topk_xs = topk_xs + offset[..., 0]\n        topk_ys = topk_ys + offset[..., 1]\n        tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width)\n        tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height)\n        br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width)\n        br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height)\n\n        batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2)\n        batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]),\n                                 dim=-1)\n        return batch_bboxes, batch_topk_labels\n\n    def _bboxes_nms(self, bboxes: Tensor, labels: Tensor,\n                    cfg: ConfigDict) -> Tuple[Tensor, Tensor]:\n        \"\"\"bboxes nms.\"\"\"\n        if labels.numel() > 0:\n            max_num = cfg.max_per_img\n            bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,\n                                                             -1].contiguous(),\n                                       labels, cfg.nms)\n            if max_num > 0:\n                bboxes = bboxes[:max_num]\n                labels = labels[keep][:max_num]\n\n        return bboxes, labels\n"
  },
  {
    "path": "mmdet/models/dense_heads/centernet_update_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Scale\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox2distance\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..utils import multi_apply\nfrom .anchor_free_head import AnchorFreeHead\n\nINF = 1000000000\nRangeType = Sequence[Tuple[int, int]]\n\n\ndef _transpose(tensor_list: List[Tensor],\n               num_point_list: list) -> List[Tensor]:\n    \"\"\"This function is used to transpose image first tensors to level first\n    ones.\"\"\"\n    for img_idx in range(len(tensor_list)):\n        tensor_list[img_idx] = torch.split(\n            tensor_list[img_idx], num_point_list, dim=0)\n\n    tensors_level_first = []\n    for targets_per_level in zip(*tensor_list):\n        tensors_level_first.append(torch.cat(targets_per_level, dim=0))\n    return tensors_level_first\n\n\n@MODELS.register_module()\nclass CenterNetUpdateHead(AnchorFreeHead):\n    \"\"\"CenterNetUpdateHead is an improved version of CenterNet in CenterNet2.\n    Paper link `<https://arxiv.org/abs/2103.07461>`_.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channel in the input feature map.\n        regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple\n            level points.\n        hm_min_radius (int): Heatmap target minimum radius of cls branch.\n            Defaults to 4.\n        hm_min_overlap (float): Heatmap target minimum overlap of cls branch.\n            Defaults to 0.8.\n        more_pos_thresh (float): The filtering threshold when the cls branch\n            adds more positive samples. Defaults to 0.2.\n        more_pos_topk (int): The maximum number of additional positive samples\n            added to each gt. Defaults to 9.\n        soft_weight_on_reg (bool): Whether to use the soft target of the\n            cls branch as the soft weight of the bbox branch.\n            Defaults to False.\n        loss_cls (:obj:`ConfigDict` or dict): Config of cls loss. Defaults to\n            dict(type='GaussianFocalLoss', loss_weight=1.0)\n        loss_bbox (:obj:`ConfigDict` or dict): Config of bbox loss. Defaults to\n             dict(type='GIoULoss', loss_weight=2.0).\n        norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct\n            and config norm layer.  Defaults to\n            ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config.\n            Unused in CenterNet. Reserved for compatibility with\n            SingleStageDetector.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config\n            of CenterNet.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 regress_ranges: RangeType = ((0, 80), (64, 160), (128, 320),\n                                              (256, 640), (512, INF)),\n                 hm_min_radius: int = 4,\n                 hm_min_overlap: float = 0.8,\n                 more_pos_thresh: float = 0.2,\n                 more_pos_topk: int = 9,\n                 soft_weight_on_reg: bool = False,\n                 loss_cls: ConfigType = dict(\n                     type='GaussianFocalLoss',\n                     pos_weight=0.25,\n                     neg_weight=0.75,\n                     loss_weight=1.0),\n                 loss_bbox: ConfigType = dict(\n                     type='GIoULoss', loss_weight=2.0),\n                 norm_cfg: OptConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 **kwargs) -> None:\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            norm_cfg=norm_cfg,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            **kwargs)\n        self.soft_weight_on_reg = soft_weight_on_reg\n        self.hm_min_radius = hm_min_radius\n        self.more_pos_thresh = more_pos_thresh\n        self.more_pos_topk = more_pos_topk\n        self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap)\n        self.sigmoid_clamp = 0.0001\n\n        # GaussianFocalLoss must be sigmoid mode\n        self.use_sigmoid_cls = True\n        self.cls_out_channels = num_classes\n\n        self.regress_ranges = regress_ranges\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n    def _init_predictor(self) -> None:\n        \"\"\"Initialize predictor layers of the head.\"\"\"\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.num_classes, 3, padding=1)\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of each level outputs.\n\n            - cls_scores (list[Tensor]): Box scores for each scale level, \\\n            each is a 4D-tensor, the channel number is num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for each \\\n            scale level, each is a 4D-tensor, the channel number is 4.\n        \"\"\"\n        return multi_apply(self.forward_single, x, self.scales, self.strides)\n\n    def forward_single(self, x: Tensor, scale: Scale,\n                       stride: int) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps.\n\n        Returns:\n            tuple: scores for each class, bbox predictions of\n            input feature maps.\n        \"\"\"\n        cls_score, bbox_pred, _, _ = super().forward_single(x)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        # bbox_pred needed for gradient computation has been modified\n        # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n        # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n        bbox_pred = bbox_pred.clamp(min=0)\n        if not self.training:\n            bbox_pred *= stride\n        return cls_score, bbox_pred\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is 4.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_imgs = cls_scores[0].size(0)\n        assert len(cls_scores) == len(bbox_preds)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n\n        # 1 flatten outputs\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        assert (torch.isfinite(flatten_bbox_preds).all().item())\n\n        # 2 calc reg and cls branch targets\n        cls_targets, bbox_targets = self.get_targets(all_level_points,\n                                                     batch_gt_instances)\n\n        # 3 add more pos index for cls branch\n        featmap_sizes = flatten_points.new_tensor(featmap_sizes)\n        pos_inds, cls_labels = self.add_cls_pos_inds(flatten_points,\n                                                     flatten_bbox_preds,\n                                                     featmap_sizes,\n                                                     batch_gt_instances)\n\n        # 4 calc cls loss\n        if pos_inds is None:\n            # num_gts=0\n            num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float)\n        else:\n            num_pos_cls = bbox_preds[0].new_tensor(\n                len(pos_inds), dtype=torch.float)\n        num_pos_cls = max(reduce_mean(num_pos_cls), 1.0)\n        flatten_cls_scores = flatten_cls_scores.sigmoid().clamp(\n            min=self.sigmoid_clamp, max=1 - self.sigmoid_clamp)\n        cls_loss = self.loss_cls(\n            flatten_cls_scores,\n            cls_targets,\n            pos_inds=pos_inds,\n            pos_labels=cls_labels,\n            avg_factor=num_pos_cls)\n\n        # 5 calc reg loss\n        pos_bbox_inds = torch.nonzero(\n            bbox_targets.max(dim=1)[0] >= 0).squeeze(1)\n        pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds]\n        pos_bbox_targets = bbox_targets[pos_bbox_inds]\n\n        bbox_weight_map = cls_targets.max(dim=1)[0]\n        bbox_weight_map = bbox_weight_map[pos_bbox_inds]\n        bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \\\n            else torch.ones_like(bbox_weight_map)\n        num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0)\n\n        if len(pos_bbox_inds) > 0:\n            pos_points = flatten_points[pos_bbox_inds]\n            pos_decoded_bbox_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_preds)\n            pos_decoded_target_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_targets)\n            bbox_loss = self.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds,\n                weight=bbox_weight_map,\n                avg_factor=num_pos_bbox)\n        else:\n            bbox_loss = flatten_bbox_preds.sum() * 0\n\n        return dict(loss_cls=cls_loss, loss_bbox=bbox_loss)\n\n    def get_targets(\n        self,\n        points: List[Tensor],\n        batch_gt_instances: InstanceList,\n    ) -> Tuple[Tensor, Tensor]:\n        \"\"\"Compute classification and bbox targets for points in multiple\n        images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple: Targets of each level.\n\n            - concat_lvl_labels (Tensor): Labels of all level and batch.\n            - concat_lvl_bbox_targets (Tensor): BBox targets of all \\\n            level and batch.\n        \"\"\"\n        assert len(points) == len(self.regress_ranges)\n\n        num_levels = len(points)\n        # the number of points per img, per lvl\n        num_points = [center.size(0) for center in points]\n\n        # expand regress ranges to align with points\n        expanded_regress_ranges = [\n            points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n                points[i]) for i in range(num_levels)\n        ]\n        # concat all levels points and regress ranges\n        concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n        concat_points = torch.cat(points, dim=0)\n        concat_strides = torch.cat([\n            concat_points.new_ones(num_points[i]) * self.strides[i]\n            for i in range(num_levels)\n        ])\n\n        # get labels and bbox_targets of each image\n        cls_targets_list, bbox_targets_list = multi_apply(\n            self._get_targets_single,\n            batch_gt_instances,\n            points=concat_points,\n            regress_ranges=concat_regress_ranges,\n            strides=concat_strides)\n\n        bbox_targets_list = _transpose(bbox_targets_list, num_points)\n        cls_targets_list = _transpose(cls_targets_list, num_points)\n        concat_lvl_bbox_targets = torch.cat(bbox_targets_list, 0)\n        concat_lvl_cls_targets = torch.cat(cls_targets_list, dim=0)\n        return concat_lvl_cls_targets, concat_lvl_bbox_targets\n\n    def _get_targets_single(self, gt_instances: InstanceData, points: Tensor,\n                            regress_ranges: Tensor,\n                            strides: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Compute classification and bbox targets for a single image.\"\"\"\n        num_points = points.size(0)\n        num_gts = len(gt_instances)\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n\n        if num_gts == 0:\n            return gt_labels.new_full((num_points,\n                                       self.num_classes),\n                                      self.num_classes), \\\n                   gt_bboxes.new_full((num_points, 4), -1)\n\n        # Calculate the regression tblr target corresponding to all points\n        points = points[:, None].expand(num_points, num_gts, 2)\n        gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n        strides = strides[:, None, None].expand(num_points, num_gts, 2)\n\n        bbox_target = bbox2distance(points, gt_bboxes)  # M x N x 4\n\n        # condition1: inside a gt bbox\n        inside_gt_bbox_mask = bbox_target.min(dim=2)[0] > 0  # M x N\n\n        # condition2: Calculate the nearest points from\n        # the upper, lower, left and right ranges from\n        # the center of the gt bbox\n        centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2)\n        centers_discret = ((centers / strides).int() * strides).float() + \\\n            strides / 2\n\n        centers_discret_dist = points - centers_discret\n        dist_x = centers_discret_dist[..., 0].abs()\n        dist_y = centers_discret_dist[..., 1].abs()\n        inside_gt_center3x3_mask = (dist_x <= strides[..., 0]) & \\\n                                   (dist_y <= strides[..., 0])\n\n        # condition3： limit the regression range for each location\n        bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:]\n        crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2\n        inside_fpn_level_mask = (crit >= regress_ranges[:, [0]]) & \\\n                                (crit <= regress_ranges[:, [1]])\n        bbox_target_mask = inside_gt_bbox_mask & \\\n            inside_gt_center3x3_mask & \\\n            inside_fpn_level_mask\n\n        # Calculate the distance weight map\n        gt_center_peak_mask = ((centers_discret_dist**2).sum(dim=2) == 0)\n        weighted_dist = ((points - centers)**2).sum(dim=2)  # M x N\n        weighted_dist[gt_center_peak_mask] = 0\n\n        areas = (gt_bboxes[..., 2] - gt_bboxes[..., 0]) * (\n            gt_bboxes[..., 3] - gt_bboxes[..., 1])\n        radius = self.delta**2 * 2 * areas\n        radius = torch.clamp(radius, min=self.hm_min_radius**2)\n        weighted_dist = weighted_dist / radius\n\n        # Calculate bbox_target\n        bbox_weighted_dist = weighted_dist.clone()\n        bbox_weighted_dist[bbox_target_mask == 0] = INF * 1.0\n        min_dist, min_inds = bbox_weighted_dist.min(dim=1)\n        bbox_target = bbox_target[range(len(bbox_target)),\n                                  min_inds]  # M x N x 4 --> M x 4\n        bbox_target[min_dist == INF] = -INF\n\n        # Convert to feature map scale\n        bbox_target /= strides[:, 0, :].repeat(1, 2)\n\n        # Calculate cls_target\n        cls_target = self._create_heatmaps_from_dist(weighted_dist, gt_labels)\n\n        return cls_target, bbox_target\n\n    @torch.no_grad()\n    def add_cls_pos_inds(\n        self, flatten_points: Tensor, flatten_bbox_preds: Tensor,\n        featmap_sizes: Tensor, batch_gt_instances: InstanceList\n    ) -> Tuple[Optional[Tensor], Optional[Tensor]]:\n        \"\"\"Provide additional adaptive positive samples to the classification\n        branch.\n\n        Args:\n            flatten_points (Tensor): The point after flatten, including\n                batch image and all levels. The shape is (N, 2).\n            flatten_bbox_preds (Tensor): The bbox predicts after flatten,\n                including batch image and all levels. The shape is (N, 4).\n            featmap_sizes (Tensor): Feature map size of all layers.\n                The shape is (5, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n           tuple:\n\n           - pos_inds (Tensor): Adaptively selected positive sample index.\n           - cls_labels (Tensor): Corresponding positive class label.\n        \"\"\"\n        outputs = self._get_center3x3_region_index_targets(\n            batch_gt_instances, featmap_sizes)\n        cls_labels, fpn_level_masks, center3x3_inds, \\\n            center3x3_bbox_targets, center3x3_masks = outputs\n\n        num_gts, total_level, K = cls_labels.shape[0], len(\n            self.strides), center3x3_masks.shape[-1]\n\n        if num_gts == 0:\n            return None, None\n\n        # The out-of-bounds index is forcibly set to 0\n        # to prevent loss calculation errors\n        center3x3_inds[center3x3_masks == 0] = 0\n        reg_pred_center3x3 = flatten_bbox_preds[center3x3_inds]\n        center3x3_points = flatten_points[center3x3_inds].view(-1, 2)\n\n        center3x3_bbox_targets_expand = center3x3_bbox_targets.view(\n            -1, 4).clamp(min=0)\n\n        pos_decoded_bbox_preds = self.bbox_coder.decode(\n            center3x3_points, reg_pred_center3x3.view(-1, 4))\n        pos_decoded_target_preds = self.bbox_coder.decode(\n            center3x3_points, center3x3_bbox_targets_expand)\n        center3x3_bbox_loss = self.loss_bbox(\n            pos_decoded_bbox_preds,\n            pos_decoded_target_preds,\n            None,\n            reduction_override='none').view(num_gts, total_level,\n                                            K) / self.loss_bbox.loss_weight\n\n        # Invalid index Loss set to infinity\n        center3x3_bbox_loss[center3x3_masks == 0] = INF\n\n        # 4 is the center point of the sampled 9 points, the center point\n        # of gt bbox after discretization.\n        # The center point of gt bbox after discretization\n        # must be a positive sample, so we force its loss to be set to 0.\n        center3x3_bbox_loss.view(-1, K)[fpn_level_masks.view(-1), 4] = 0\n        center3x3_bbox_loss = center3x3_bbox_loss.view(num_gts, -1)\n\n        loss_thr = torch.kthvalue(\n            center3x3_bbox_loss, self.more_pos_topk, dim=1)[0]\n\n        loss_thr[loss_thr > self.more_pos_thresh] = self.more_pos_thresh\n        new_pos = center3x3_bbox_loss < loss_thr.view(num_gts, 1)\n        pos_inds = center3x3_inds.view(num_gts, -1)[new_pos]\n        cls_labels = cls_labels.view(num_gts,\n                                     1).expand(num_gts,\n                                               total_level * K)[new_pos]\n        return pos_inds, cls_labels\n\n    def _create_heatmaps_from_dist(self, weighted_dist: Tensor,\n                                   cls_labels: Tensor) -> Tensor:\n        \"\"\"Generate heatmaps of classification branch based on weighted\n        distance map.\"\"\"\n        heatmaps = weighted_dist.new_zeros(\n            (weighted_dist.shape[0], self.num_classes))\n        for c in range(self.num_classes):\n            inds = (cls_labels == c)  # N\n            if inds.int().sum() == 0:\n                continue\n            heatmaps[:, c] = torch.exp(-weighted_dist[:, inds].min(dim=1)[0])\n            zeros = heatmaps[:, c] < 1e-4\n            heatmaps[zeros, c] = 0\n        return heatmaps\n\n    def _get_center3x3_region_index_targets(self,\n                                            bacth_gt_instances: InstanceList,\n                                            shapes_per_level: Tensor) -> tuple:\n        \"\"\"Get the center (and the 3x3 region near center) locations and target\n        of each objects.\"\"\"\n        cls_labels = []\n        inside_fpn_level_masks = []\n        center3x3_inds = []\n        center3x3_masks = []\n        center3x3_bbox_targets = []\n\n        total_levels = len(self.strides)\n        batch = len(bacth_gt_instances)\n\n        shapes_per_level = shapes_per_level.long()\n        area_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1])\n\n        # Select a total of 9 positions of 3x3 in the center of the gt bbox\n        # as candidate positive samples\n        K = 9\n        dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0,\n                                          1]).view(1, 1, K)\n        dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1,\n                                          1]).view(1, 1, K)\n\n        regress_ranges = shapes_per_level.new_tensor(self.regress_ranges).view(\n            len(self.regress_ranges), 2)  # L x 2\n        strides = shapes_per_level.new_tensor(self.strides)\n\n        start_coord_pre_level = []\n        _start = 0\n        for level in range(total_levels):\n            start_coord_pre_level.append(_start)\n            _start = _start + batch * area_per_level[level]\n        start_coord_pre_level = shapes_per_level.new_tensor(\n            start_coord_pre_level).view(1, total_levels, 1)\n        area_per_level = area_per_level.view(1, total_levels, 1)\n\n        for im_i in range(batch):\n            gt_instance = bacth_gt_instances[im_i]\n            gt_bboxes = gt_instance.bboxes\n            gt_labels = gt_instance.labels\n            num_gts = gt_bboxes.shape[0]\n            if num_gts == 0:\n                continue\n\n            cls_labels.append(gt_labels)\n\n            gt_bboxes = gt_bboxes[:, None].expand(num_gts, total_levels, 4)\n            expanded_strides = strides[None, :,\n                                       None].expand(num_gts, total_levels, 2)\n            expanded_regress_ranges = regress_ranges[None].expand(\n                num_gts, total_levels, 2)\n            expanded_shapes_per_level = shapes_per_level[None].expand(\n                num_gts, total_levels, 2)\n\n            # calc reg_target\n            centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2)\n            centers_inds = (centers / expanded_strides).long()\n            centers_discret = centers_inds * expanded_strides \\\n                + expanded_strides // 2\n\n            bbox_target = bbox2distance(centers_discret,\n                                        gt_bboxes)  # M x N x 4\n\n            # calc inside_fpn_level_mask\n            bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:]\n            crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2\n            inside_fpn_level_mask = \\\n                (crit >= expanded_regress_ranges[..., 0]) & \\\n                (crit <= expanded_regress_ranges[..., 1])\n\n            inside_gt_bbox_mask = bbox_target.min(dim=2)[0] >= 0\n            inside_fpn_level_mask = inside_gt_bbox_mask & inside_fpn_level_mask\n            inside_fpn_level_masks.append(inside_fpn_level_mask)\n\n            # calc center3x3_ind and mask\n            expand_ws = expanded_shapes_per_level[..., 1:2].expand(\n                num_gts, total_levels, K)\n            expand_hs = expanded_shapes_per_level[..., 0:1].expand(\n                num_gts, total_levels, K)\n            centers_inds_x = centers_inds[..., 0:1]\n            centers_inds_y = centers_inds[..., 1:2]\n\n            center3x3_idx = start_coord_pre_level + \\\n                im_i * area_per_level + \\\n                (centers_inds_y + dy) * expand_ws + \\\n                (centers_inds_x + dx)\n            center3x3_mask = \\\n                ((centers_inds_y + dy) < expand_hs) & \\\n                ((centers_inds_y + dy) >= 0) & \\\n                ((centers_inds_x + dx) < expand_ws) & \\\n                ((centers_inds_x + dx) >= 0)\n\n            # recalc center3x3 region reg target\n            bbox_target = bbox_target / expanded_strides.repeat(1, 1, 2)\n            center3x3_bbox_target = bbox_target[..., None, :].expand(\n                num_gts, total_levels, K, 4).clone()\n            center3x3_bbox_target[..., 0] += dx\n            center3x3_bbox_target[..., 1] += dy\n            center3x3_bbox_target[..., 2] -= dx\n            center3x3_bbox_target[..., 3] -= dy\n            # update center3x3_mask\n            center3x3_mask = center3x3_mask & (\n                center3x3_bbox_target.min(dim=3)[0] >= 0)  # n x L x K\n\n            center3x3_inds.append(center3x3_idx)\n            center3x3_masks.append(center3x3_mask)\n            center3x3_bbox_targets.append(center3x3_bbox_target)\n\n        if len(inside_fpn_level_masks) > 0:\n            cls_labels = torch.cat(cls_labels, dim=0)\n            inside_fpn_level_masks = torch.cat(inside_fpn_level_masks, dim=0)\n            center3x3_inds = torch.cat(center3x3_inds, dim=0).long()\n            center3x3_bbox_targets = torch.cat(center3x3_bbox_targets, dim=0)\n            center3x3_masks = torch.cat(center3x3_masks, dim=0)\n        else:\n            cls_labels = shapes_per_level.new_zeros(0).long()\n            inside_fpn_level_masks = shapes_per_level.new_zeros(\n                (0, total_levels)).bool()\n            center3x3_inds = shapes_per_level.new_zeros(\n                (0, total_levels, K)).long()\n            center3x3_bbox_targets = shapes_per_level.new_zeros(\n                (0, total_levels, K, 4)).float()\n            center3x3_masks = shapes_per_level.new_zeros(\n                (0, total_levels, K)).bool()\n        return cls_labels, inside_fpn_level_masks, center3x3_inds, \\\n            center3x3_bbox_targets, center3x3_masks\n"
  },
  {
    "path": "mmdet/models/dense_heads/centripetal_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import DeformConv2d\nfrom mmengine.model import normal_init\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import (ConfigType, InstanceList, OptInstanceList,\n                         OptMultiConfig)\nfrom ..utils import multi_apply\nfrom .corner_head import CornerHead\n\n\n@MODELS.register_module()\nclass CentripetalHead(CornerHead):\n    \"\"\"Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object\n    Detection.\n\n    CentripetalHead inherits from :class:`CornerHead`. It removes the\n    embedding branch and adds guiding shift and centripetal shift branches.\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2003.09119>`_ .\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        num_feat_levels (int): Levels of feature from the previous module.\n            2 for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104\n            outputs the final feature and intermediate supervision feature and\n            HourglassNet-52 only outputs the final feature. Defaults to 2.\n        corner_emb_channels (int): Channel of embedding vector. Defaults to 1.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config.\n            Useless in CornerHead, but we keep this variable for\n            SingleStageDetector.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            CornerHead.\n        loss_heatmap (:obj:`ConfigDict` or dict): Config of corner heatmap\n            loss. Defaults to GaussianFocalLoss.\n        loss_embedding (:obj:`ConfigDict` or dict): Config of corner embedding\n            loss. Defaults to AssociativeEmbeddingLoss.\n        loss_offset (:obj:`ConfigDict` or dict): Config of corner offset loss.\n            Defaults to SmoothL1Loss.\n        loss_guiding_shift (:obj:`ConfigDict` or dict): Config of\n            guiding shift loss. Defaults to SmoothL1Loss.\n        loss_centripetal_shift (:obj:`ConfigDict` or dict): Config of\n            centripetal shift loss. Defaults to SmoothL1Loss.\n       init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n           the initialization.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 centripetal_shift_channels: int = 2,\n                 guiding_shift_channels: int = 2,\n                 feat_adaption_conv_kernel: int = 3,\n                 loss_guiding_shift: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=0.05),\n                 loss_centripetal_shift: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1),\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        assert centripetal_shift_channels == 2, (\n            'CentripetalHead only support centripetal_shift_channels == 2')\n        self.centripetal_shift_channels = centripetal_shift_channels\n        assert guiding_shift_channels == 2, (\n            'CentripetalHead only support guiding_shift_channels == 2')\n        self.guiding_shift_channels = guiding_shift_channels\n        self.feat_adaption_conv_kernel = feat_adaption_conv_kernel\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n        self.loss_guiding_shift = MODELS.build(loss_guiding_shift)\n        self.loss_centripetal_shift = MODELS.build(loss_centripetal_shift)\n\n    def _init_centripetal_layers(self) -> None:\n        \"\"\"Initialize centripetal layers.\n\n        Including feature adaption deform convs (feat_adaption), deform offset\n        prediction convs (dcn_off), guiding shift (guiding_shift) and\n        centripetal shift ( centripetal_shift). Each branch has two parts:\n        prefix `tl_` for top-left and `br_` for bottom-right.\n        \"\"\"\n        self.tl_feat_adaption = nn.ModuleList()\n        self.br_feat_adaption = nn.ModuleList()\n        self.tl_dcn_offset = nn.ModuleList()\n        self.br_dcn_offset = nn.ModuleList()\n        self.tl_guiding_shift = nn.ModuleList()\n        self.br_guiding_shift = nn.ModuleList()\n        self.tl_centripetal_shift = nn.ModuleList()\n        self.br_centripetal_shift = nn.ModuleList()\n\n        for _ in range(self.num_feat_levels):\n            self.tl_feat_adaption.append(\n                DeformConv2d(self.in_channels, self.in_channels,\n                             self.feat_adaption_conv_kernel, 1, 1))\n            self.br_feat_adaption.append(\n                DeformConv2d(self.in_channels, self.in_channels,\n                             self.feat_adaption_conv_kernel, 1, 1))\n\n            self.tl_guiding_shift.append(\n                self._make_layers(\n                    out_channels=self.guiding_shift_channels,\n                    in_channels=self.in_channels))\n            self.br_guiding_shift.append(\n                self._make_layers(\n                    out_channels=self.guiding_shift_channels,\n                    in_channels=self.in_channels))\n\n            self.tl_dcn_offset.append(\n                ConvModule(\n                    self.guiding_shift_channels,\n                    self.feat_adaption_conv_kernel**2 *\n                    self.guiding_shift_channels,\n                    1,\n                    bias=False,\n                    act_cfg=None))\n            self.br_dcn_offset.append(\n                ConvModule(\n                    self.guiding_shift_channels,\n                    self.feat_adaption_conv_kernel**2 *\n                    self.guiding_shift_channels,\n                    1,\n                    bias=False,\n                    act_cfg=None))\n\n            self.tl_centripetal_shift.append(\n                self._make_layers(\n                    out_channels=self.centripetal_shift_channels,\n                    in_channels=self.in_channels))\n            self.br_centripetal_shift.append(\n                self._make_layers(\n                    out_channels=self.centripetal_shift_channels,\n                    in_channels=self.in_channels))\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers for CentripetalHead.\n\n        Including two parts: CornerHead layers and CentripetalHead layers\n        \"\"\"\n        super()._init_layers()  # using _init_layers in CornerHead\n        self._init_centripetal_layers()\n\n    def init_weights(self) -> None:\n        super().init_weights()\n        for i in range(self.num_feat_levels):\n            normal_init(self.tl_feat_adaption[i], std=0.01)\n            normal_init(self.br_feat_adaption[i], std=0.01)\n            normal_init(self.tl_dcn_offset[i].conv, std=0.1)\n            normal_init(self.br_dcn_offset[i].conv, std=0.1)\n            _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]]\n            _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]]\n            _ = [\n                x.conv.reset_parameters() for x in self.tl_centripetal_shift[i]\n            ]\n            _ = [\n                x.conv.reset_parameters() for x in self.br_centripetal_shift[i]\n            ]\n\n    def forward_single(self, x: Tensor, lvl_ind: int) -> List[Tensor]:\n        \"\"\"Forward feature of a single level.\n\n        Args:\n            x (Tensor): Feature of a single level.\n            lvl_ind (int): Level index of current feature.\n\n        Returns:\n            tuple[Tensor]: A tuple of CentripetalHead's output for current\n            feature level. Containing the following Tensors:\n\n                - tl_heat (Tensor): Predicted top-left corner heatmap.\n                - br_heat (Tensor): Predicted bottom-right corner heatmap.\n                - tl_off (Tensor): Predicted top-left offset heatmap.\n                - br_off (Tensor): Predicted bottom-right offset heatmap.\n                - tl_guiding_shift (Tensor): Predicted top-left guiding shift\n                  heatmap.\n                - br_guiding_shift (Tensor): Predicted bottom-right guiding\n                  shift heatmap.\n                - tl_centripetal_shift (Tensor): Predicted top-left centripetal\n                  shift heatmap.\n                - br_centripetal_shift (Tensor): Predicted bottom-right\n                  centripetal shift heatmap.\n        \"\"\"\n        tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super(\n        ).forward_single(\n            x, lvl_ind, return_pool=True)\n\n        tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool)\n        br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool)\n\n        tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach())\n        br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach())\n\n        tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool,\n                                                          tl_dcn_offset)\n        br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool,\n                                                          br_dcn_offset)\n\n        tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind](\n            tl_feat_adaption)\n        br_centripetal_shift = self.br_centripetal_shift[lvl_ind](\n            br_feat_adaption)\n\n        result_list = [\n            tl_heat, br_heat, tl_off, br_off, tl_guiding_shift,\n            br_guiding_shift, tl_centripetal_shift, br_centripetal_shift\n        ]\n        return result_list\n\n    def loss_by_feat(\n            self,\n            tl_heats: List[Tensor],\n            br_heats: List[Tensor],\n            tl_offs: List[Tensor],\n            br_offs: List[Tensor],\n            tl_guiding_shifts: List[Tensor],\n            br_guiding_shifts: List[Tensor],\n            tl_centripetal_shifts: List[Tensor],\n            br_centripetal_shifts: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each\n                level with shape (N, guiding_shift_channels, H, W).\n            br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for\n                each level with shape (N, guiding_shift_channels, H, W).\n            tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts\n                for each level with shape (N, centripetal_shift_channels, H,\n                W).\n            br_centripetal_shifts (list[Tensor]): Bottom-right centripetal\n                shifts for each level with shape (N,\n                centripetal_shift_channels, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Specify which bounding boxes can be ignored when computing\n                the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components. Containing the\n            following losses:\n\n                - det_loss (list[Tensor]): Corner keypoint losses of all\n                  feature levels.\n                - off_loss (list[Tensor]): Corner offset losses of all feature\n                  levels.\n                - guiding_loss (list[Tensor]): Guiding shift losses of all\n                  feature levels.\n                - centripetal_loss (list[Tensor]): Centripetal shift losses of\n                  all feature levels.\n        \"\"\"\n        gt_bboxes = [\n            gt_instances.bboxes for gt_instances in batch_gt_instances\n        ]\n        gt_labels = [\n            gt_instances.labels for gt_instances in batch_gt_instances\n        ]\n\n        targets = self.get_targets(\n            gt_bboxes,\n            gt_labels,\n            tl_heats[-1].shape,\n            batch_img_metas[0]['batch_input_shape'],\n            with_corner_emb=self.with_corner_emb,\n            with_guiding_shift=True,\n            with_centripetal_shift=True)\n        mlvl_targets = [targets for _ in range(self.num_feat_levels)]\n        [det_losses, off_losses, guiding_losses, centripetal_losses\n         ] = multi_apply(self.loss_by_feat_single, tl_heats, br_heats, tl_offs,\n                         br_offs, tl_guiding_shifts, br_guiding_shifts,\n                         tl_centripetal_shifts, br_centripetal_shifts,\n                         mlvl_targets)\n        loss_dict = dict(\n            det_loss=det_losses,\n            off_loss=off_losses,\n            guiding_loss=guiding_losses,\n            centripetal_loss=centripetal_losses)\n        return loss_dict\n\n    def loss_by_feat_single(self, tl_hmp: Tensor, br_hmp: Tensor,\n                            tl_off: Tensor, br_off: Tensor,\n                            tl_guiding_shift: Tensor, br_guiding_shift: Tensor,\n                            tl_centripetal_shift: Tensor,\n                            br_centripetal_shift: Tensor,\n                            targets: dict) -> Tuple[Tensor, ...]:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            tl_hmp (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_hmp (Tensor): Bottom-right corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            tl_guiding_shift (Tensor): Top-left guiding shift for current level\n                with shape (N, guiding_shift_channels, H, W).\n            br_guiding_shift (Tensor): Bottom-right guiding shift for current\n                level with shape (N, guiding_shift_channels, H, W).\n            tl_centripetal_shift (Tensor): Top-left centripetal shift for\n                current level with shape (N, centripetal_shift_channels, H, W).\n            br_centripetal_shift (Tensor): Bottom-right centripetal shift for\n                current level with shape (N, centripetal_shift_channels, H, W).\n            targets (dict): Corner target generated by `get_targets`.\n\n        Returns:\n            tuple[torch.Tensor]: Losses of the head's different branches\n            containing the following losses:\n\n                - det_loss (Tensor): Corner keypoint loss.\n                - off_loss (Tensor): Corner offset loss.\n                - guiding_loss (Tensor): Guiding shift loss.\n                - centripetal_loss (Tensor): Centripetal shift loss.\n        \"\"\"\n        targets['corner_embedding'] = None\n\n        det_loss, _, _, off_loss = super().loss_by_feat_single(\n            tl_hmp, br_hmp, None, None, tl_off, br_off, targets)\n\n        gt_tl_guiding_shift = targets['topleft_guiding_shift']\n        gt_br_guiding_shift = targets['bottomright_guiding_shift']\n        gt_tl_centripetal_shift = targets['topleft_centripetal_shift']\n        gt_br_centripetal_shift = targets['bottomright_centripetal_shift']\n\n        gt_tl_heatmap = targets['topleft_heatmap']\n        gt_br_heatmap = targets['bottomright_heatmap']\n        # We only compute the offset loss at the real corner position.\n        # The value of real corner would be 1 in heatmap ground truth.\n        # The mask is computed in class agnostic mode and its shape is\n        # batch * 1 * width * height.\n        tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_tl_heatmap)\n        br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_br_heatmap)\n\n        # Guiding shift loss\n        tl_guiding_loss = self.loss_guiding_shift(\n            tl_guiding_shift,\n            gt_tl_guiding_shift,\n            tl_mask,\n            avg_factor=tl_mask.sum())\n        br_guiding_loss = self.loss_guiding_shift(\n            br_guiding_shift,\n            gt_br_guiding_shift,\n            br_mask,\n            avg_factor=br_mask.sum())\n        guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0\n        # Centripetal shift loss\n        tl_centripetal_loss = self.loss_centripetal_shift(\n            tl_centripetal_shift,\n            gt_tl_centripetal_shift,\n            tl_mask,\n            avg_factor=tl_mask.sum())\n        br_centripetal_loss = self.loss_centripetal_shift(\n            br_centripetal_shift,\n            gt_br_centripetal_shift,\n            br_mask,\n            avg_factor=br_mask.sum())\n        centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0\n\n        return det_loss, off_loss, guiding_loss, centripetal_loss\n\n    def predict_by_feat(self,\n                        tl_heats: List[Tensor],\n                        br_heats: List[Tensor],\n                        tl_offs: List[Tensor],\n                        br_offs: List[Tensor],\n                        tl_guiding_shifts: List[Tensor],\n                        br_guiding_shifts: List[Tensor],\n                        tl_centripetal_shifts: List[Tensor],\n                        br_centripetal_shifts: List[Tensor],\n                        batch_img_metas: Optional[List[dict]] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each\n                level with shape (N, guiding_shift_channels, H, W). Useless in\n                this function, we keep this arg because it's the raw output\n                from CentripetalHead.\n            br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for\n                each level with shape (N, guiding_shift_channels, H, W).\n                Useless in this function, we keep this arg because it's the\n                raw output from CentripetalHead.\n            tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts\n                for each level with shape (N, centripetal_shift_channels, H,\n                W).\n            br_centripetal_shifts (list[Tensor]): Bottom-right centripetal\n                shifts for each level with shape (N,\n                centripetal_shift_channels, H, W).\n            batch_img_metas (list[dict], optional): Batch image meta info.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(\n            batch_img_metas)\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            result_list.append(\n                self._predict_by_feat_single(\n                    tl_heats[-1][img_id:img_id + 1, :],\n                    br_heats[-1][img_id:img_id + 1, :],\n                    tl_offs[-1][img_id:img_id + 1, :],\n                    br_offs[-1][img_id:img_id + 1, :],\n                    batch_img_metas[img_id],\n                    tl_emb=None,\n                    br_emb=None,\n                    tl_centripetal_shift=tl_centripetal_shifts[-1][\n                        img_id:img_id + 1, :],\n                    br_centripetal_shift=br_centripetal_shifts[-1][\n                        img_id:img_id + 1, :],\n                    rescale=rescale,\n                    with_nms=with_nms))\n\n        return result_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/condinst_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule, kaiming_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import cat_boxes\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom ..utils import (aligned_bilinear, filter_scores_and_topk, multi_apply,\n                     relative_coordinate_maps, select_single_mlvl)\nfrom ..utils.misc import empty_instances\nfrom .base_mask_head import BaseMaskHead\nfrom .fcos_head import FCOSHead\n\nINF = 1e8\n\n\n@MODELS.register_module()\nclass CondInstBboxHead(FCOSHead):\n    \"\"\"CondInst box head used in https://arxiv.org/abs/1904.02689.\n\n    Note that CondInst Bbox Head is a extension of FCOS head.\n    Two differences are described as follows:\n\n    1. CondInst box head predicts a set of params for each instance.\n    2. CondInst box head return the pos_gt_inds and pos_inds.\n\n    Args:\n        num_params (int): Number of params for instance segmentation.\n    \"\"\"\n\n    def __init__(self, *args, num_params: int = 169, **kwargs) -> None:\n        self.num_params = num_params\n        super().__init__(*args, **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        super()._init_layers()\n        self.controller = nn.Conv2d(\n            self.feat_channels, self.num_params, 3, padding=1)\n\n    def forward_single(self, x: Tensor, scale: Scale,\n                       stride: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps, only\n                used to normalize the bbox prediction when self.norm_on_bbox\n                is True.\n\n        Returns:\n            tuple: scores for each class, bbox predictions, centerness\n            predictions and param predictions of input feature maps.\n        \"\"\"\n        cls_score, bbox_pred, cls_feat, reg_feat = \\\n            super(FCOSHead, self).forward_single(x)\n        if self.centerness_on_reg:\n            centerness = self.conv_centerness(reg_feat)\n        else:\n            centerness = self.conv_centerness(cls_feat)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        if self.norm_on_bbox:\n            # bbox_pred needed for gradient computation has been modified\n            # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n            # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n            bbox_pred = bbox_pred.clamp(min=0)\n            if not self.training:\n                bbox_pred *= stride\n        else:\n            bbox_pred = bbox_pred.exp()\n        param_pred = self.controller(reg_feat)\n        return cls_score, bbox_pred, centerness, param_pred\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        centernesses: List[Tensor],\n        param_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            centernesses (list[Tensor]): centerness for each scale level, each\n                is a 4D-tensor, the channel number is num_points * 1.\n            param_preds (List[Tensor]): param_pred for each scale level, each\n                is a 4D-tensor, the channel number is num_params.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(centernesses)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        # Need stride for rel coord compute\n        all_level_points_strides = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device,\n            with_stride=True)\n        all_level_points = [i[:, :2] for i in all_level_points_strides]\n        all_level_strides = [i[:, 2] for i in all_level_points_strides]\n        labels, bbox_targets, pos_inds_list, pos_gt_inds_list = \\\n            self.get_targets(all_level_points, batch_gt_instances)\n\n        num_imgs = cls_scores[0].size(0)\n        # flatten cls_scores, bbox_preds and centerness\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_centerness = [\n            centerness.permute(0, 2, 3, 1).reshape(-1)\n            for centerness in centernesses\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_centerness = torch.cat(flatten_centerness)\n        flatten_labels = torch.cat(labels)\n        flatten_bbox_targets = torch.cat(bbox_targets)\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((flatten_labels >= 0)\n                    & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)\n        num_pos = torch.tensor(\n            len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)\n        num_pos = max(reduce_mean(num_pos), 1.0)\n        loss_cls = self.loss_cls(\n            flatten_cls_scores, flatten_labels, avg_factor=num_pos)\n\n        pos_bbox_preds = flatten_bbox_preds[pos_inds]\n        pos_centerness = flatten_centerness[pos_inds]\n        pos_bbox_targets = flatten_bbox_targets[pos_inds]\n        pos_centerness_targets = self.centerness_target(pos_bbox_targets)\n        # centerness weighted iou loss\n        centerness_denorm = max(\n            reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)\n\n        if len(pos_inds) > 0:\n            pos_points = flatten_points[pos_inds]\n            pos_decoded_bbox_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_preds)\n            pos_decoded_target_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_targets)\n            loss_bbox = self.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds,\n                weight=pos_centerness_targets,\n                avg_factor=centerness_denorm)\n            loss_centerness = self.loss_centerness(\n                pos_centerness, pos_centerness_targets, avg_factor=num_pos)\n        else:\n            loss_bbox = pos_bbox_preds.sum()\n            loss_centerness = pos_centerness.sum()\n\n        self._raw_positive_infos.update(cls_scores=cls_scores)\n        self._raw_positive_infos.update(centernesses=centernesses)\n        self._raw_positive_infos.update(param_preds=param_preds)\n        self._raw_positive_infos.update(all_level_points=all_level_points)\n        self._raw_positive_infos.update(all_level_strides=all_level_strides)\n        self._raw_positive_infos.update(pos_gt_inds_list=pos_gt_inds_list)\n        self._raw_positive_infos.update(pos_inds_list=pos_inds_list)\n\n        return dict(\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            loss_centerness=loss_centerness)\n\n    def get_targets(\n        self, points: List[Tensor], batch_gt_instances: InstanceList\n    ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]:\n        \"\"\"Compute regression, classification and centerness targets for points\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple: Targets of each level.\n\n            - concat_lvl_labels (list[Tensor]): Labels of each level.\n            - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \\\n            level.\n            - pos_inds_list (list[Tensor]): pos_inds of each image.\n            - pos_gt_inds_list (List[Tensor]): pos_gt_inds of each image.\n        \"\"\"\n        assert len(points) == len(self.regress_ranges)\n        num_levels = len(points)\n        # expand regress ranges to align with points\n        expanded_regress_ranges = [\n            points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n                points[i]) for i in range(num_levels)\n        ]\n        # concat all levels points and regress ranges\n        concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n        concat_points = torch.cat(points, dim=0)\n\n        # the number of points per img, per lvl\n        num_points = [center.size(0) for center in points]\n\n        # get labels and bbox_targets of each image\n        labels_list, bbox_targets_list, pos_inds_list, pos_gt_inds_list = \\\n            multi_apply(\n                self._get_targets_single,\n                batch_gt_instances,\n                points=concat_points,\n                regress_ranges=concat_regress_ranges,\n                num_points_per_lvl=num_points)\n\n        # split to per img, per level\n        labels_list = [labels.split(num_points, 0) for labels in labels_list]\n        bbox_targets_list = [\n            bbox_targets.split(num_points, 0)\n            for bbox_targets in bbox_targets_list\n        ]\n\n        # concat per level image\n        concat_lvl_labels = []\n        concat_lvl_bbox_targets = []\n        for i in range(num_levels):\n            concat_lvl_labels.append(\n                torch.cat([labels[i] for labels in labels_list]))\n            bbox_targets = torch.cat(\n                [bbox_targets[i] for bbox_targets in bbox_targets_list])\n            if self.norm_on_bbox:\n                bbox_targets = bbox_targets / self.strides[i]\n            concat_lvl_bbox_targets.append(bbox_targets)\n        return (concat_lvl_labels, concat_lvl_bbox_targets, pos_inds_list,\n                pos_gt_inds_list)\n\n    def _get_targets_single(\n        self, gt_instances: InstanceData, points: Tensor,\n        regress_ranges: Tensor, num_points_per_lvl: List[int]\n    ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n        \"\"\"Compute regression and classification targets for a single image.\"\"\"\n        num_points = points.size(0)\n        num_gts = len(gt_instances)\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        gt_masks = gt_instances.get('masks', None)\n\n        if num_gts == 0:\n            return gt_labels.new_full((num_points,), self.num_classes), \\\n                   gt_bboxes.new_zeros((num_points, 4)), \\\n                   gt_bboxes.new_zeros((0,), dtype=torch.int64), \\\n                   gt_bboxes.new_zeros((0,), dtype=torch.int64)\n\n        areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n            gt_bboxes[:, 3] - gt_bboxes[:, 1])\n        # TODO: figure out why these two are different\n        # areas = areas[None].expand(num_points, num_gts)\n        areas = areas[None].repeat(num_points, 1)\n        regress_ranges = regress_ranges[:, None, :].expand(\n            num_points, num_gts, 2)\n        gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n        xs, ys = points[:, 0], points[:, 1]\n        xs = xs[:, None].expand(num_points, num_gts)\n        ys = ys[:, None].expand(num_points, num_gts)\n\n        left = xs - gt_bboxes[..., 0]\n        right = gt_bboxes[..., 2] - xs\n        top = ys - gt_bboxes[..., 1]\n        bottom = gt_bboxes[..., 3] - ys\n        bbox_targets = torch.stack((left, top, right, bottom), -1)\n\n        if self.center_sampling:\n            # condition1: inside a `center bbox`\n            radius = self.center_sample_radius\n            # if gt_mask not None, use gt mask's centroid to determine\n            # the center region rather than gt_bbox center\n            if gt_masks is None:\n                center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2\n                center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2\n            else:\n                h, w = gt_masks.height, gt_masks.width\n                masks = gt_masks.to_tensor(\n                    dtype=torch.bool, device=gt_bboxes.device)\n                yys = torch.arange(\n                    0, h, dtype=torch.float32, device=masks.device)\n                xxs = torch.arange(\n                    0, w, dtype=torch.float32, device=masks.device)\n                # m00/m10/m01 represent the moments of a contour\n                # centroid is computed by m00/m10 and m00/m01\n                m00 = masks.sum(dim=-1).sum(dim=-1).clamp(min=1e-6)\n                m10 = (masks * xxs).sum(dim=-1).sum(dim=-1)\n                m01 = (masks * yys[:, None]).sum(dim=-1).sum(dim=-1)\n                center_xs = m10 / m00\n                center_ys = m01 / m00\n\n                center_xs = center_xs[None].expand(num_points, num_gts)\n                center_ys = center_ys[None].expand(num_points, num_gts)\n            center_gts = torch.zeros_like(gt_bboxes)\n            stride = center_xs.new_zeros(center_xs.shape)\n\n            # project the points on current lvl back to the `original` sizes\n            lvl_begin = 0\n            for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):\n                lvl_end = lvl_begin + num_points_lvl\n                stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius\n                lvl_begin = lvl_end\n\n            x_mins = center_xs - stride\n            y_mins = center_ys - stride\n            x_maxs = center_xs + stride\n            y_maxs = center_ys + stride\n            center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],\n                                             x_mins, gt_bboxes[..., 0])\n            center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],\n                                             y_mins, gt_bboxes[..., 1])\n            center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],\n                                             gt_bboxes[..., 2], x_maxs)\n            center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],\n                                             gt_bboxes[..., 3], y_maxs)\n\n            cb_dist_left = xs - center_gts[..., 0]\n            cb_dist_right = center_gts[..., 2] - xs\n            cb_dist_top = ys - center_gts[..., 1]\n            cb_dist_bottom = center_gts[..., 3] - ys\n            center_bbox = torch.stack(\n                (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)\n            inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0\n        else:\n            # condition1: inside a gt bbox\n            inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n\n        # condition2: limit the regression range for each location\n        max_regress_distance = bbox_targets.max(-1)[0]\n        inside_regress_range = (\n            (max_regress_distance >= regress_ranges[..., 0])\n            & (max_regress_distance <= regress_ranges[..., 1]))\n\n        # if there are still more than one objects for a location,\n        # we choose the one with minimal area\n        areas[inside_gt_bbox_mask == 0] = INF\n        areas[inside_regress_range == 0] = INF\n        min_area, min_area_inds = areas.min(dim=1)\n\n        labels = gt_labels[min_area_inds]\n        labels[min_area == INF] = self.num_classes  # set as BG\n        bbox_targets = bbox_targets[range(num_points), min_area_inds]\n\n        # return pos_inds & pos_gt_inds\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().reshape(-1)\n        pos_gt_inds = min_area_inds[labels < self.num_classes]\n        return labels, bbox_targets, pos_inds, pos_gt_inds\n\n    def get_positive_infos(self) -> InstanceList:\n        \"\"\"Get positive information from sampling results.\n\n        Returns:\n            list[:obj:`InstanceData`]: Positive information of each image,\n            usually including positive bboxes, positive labels, positive\n            priors, etc.\n        \"\"\"\n        assert len(self._raw_positive_infos) > 0\n\n        pos_gt_inds_list = self._raw_positive_infos['pos_gt_inds_list']\n        pos_inds_list = self._raw_positive_infos['pos_inds_list']\n        num_imgs = len(pos_gt_inds_list)\n\n        cls_score_list = []\n        centerness_list = []\n        param_pred_list = []\n        point_list = []\n        stride_list = []\n        for cls_score_per_lvl, centerness_per_lvl, param_pred_per_lvl,\\\n            point_per_lvl, stride_per_lvl in \\\n            zip(self._raw_positive_infos['cls_scores'],\n                self._raw_positive_infos['centernesses'],\n                self._raw_positive_infos['param_preds'],\n                self._raw_positive_infos['all_level_points'],\n                self._raw_positive_infos['all_level_strides']):\n            cls_score_per_lvl = \\\n                cls_score_per_lvl.permute(\n                    0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes)\n            centerness_per_lvl = \\\n                centerness_per_lvl.permute(\n                    0, 2, 3, 1).reshape(num_imgs, -1, 1)\n            param_pred_per_lvl = \\\n                param_pred_per_lvl.permute(\n                    0, 2, 3, 1).reshape(num_imgs, -1, self.num_params)\n            point_per_lvl = point_per_lvl.unsqueeze(0).repeat(num_imgs, 1, 1)\n            stride_per_lvl = stride_per_lvl.unsqueeze(0).repeat(num_imgs, 1)\n\n            cls_score_list.append(cls_score_per_lvl)\n            centerness_list.append(centerness_per_lvl)\n            param_pred_list.append(param_pred_per_lvl)\n            point_list.append(point_per_lvl)\n            stride_list.append(stride_per_lvl)\n        cls_scores = torch.cat(cls_score_list, dim=1)\n        centernesses = torch.cat(centerness_list, dim=1)\n        param_preds = torch.cat(param_pred_list, dim=1)\n        all_points = torch.cat(point_list, dim=1)\n        all_strides = torch.cat(stride_list, dim=1)\n\n        positive_infos = []\n        for i, (pos_gt_inds,\n                pos_inds) in enumerate(zip(pos_gt_inds_list, pos_inds_list)):\n            pos_info = InstanceData()\n            pos_info.points = all_points[i][pos_inds]\n            pos_info.strides = all_strides[i][pos_inds]\n            pos_info.scores = cls_scores[i][pos_inds]\n            pos_info.centernesses = centernesses[i][pos_inds]\n            pos_info.param_preds = param_preds[i][pos_inds]\n            pos_info.pos_assigned_gt_inds = pos_gt_inds\n            pos_info.pos_inds = pos_inds\n            positive_infos.append(pos_info)\n        return positive_infos\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        score_factors: Optional[List[Tensor]] = None,\n                        param_preds: Optional[List[Tensor]] = None,\n                        batch_img_metas: Optional[List[dict]] = None,\n                        cfg: Optional[ConfigDict] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Note: When score_factors is not None, the cls_scores are\n        usually multiplied by it then obtain the real score used in NMS,\n        such as CenterNess in FCOS, IoU branch in ATSS.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            score_factors (list[Tensor], optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 1, H, W). Defaults to None.\n            param_preds (list[Tensor], optional): Params for all scale\n                level, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_params, H, W)\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n                Defaults to None.\n            cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        if score_factors is None:\n            # e.g. Retina, FreeAnchor, Foveabox, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, AutoAssign, etc.\n            with_score_factors = True\n            assert len(cls_scores) == len(score_factors)\n\n        num_levels = len(cls_scores)\n\n        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n        all_level_points_strides = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device,\n            with_stride=True)\n        all_level_points = [i[:, :2] for i in all_level_points_strides]\n        all_level_strides = [i[:, 2] for i in all_level_points_strides]\n\n        result_list = []\n\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            cls_score_list = select_single_mlvl(\n                cls_scores, img_id, detach=True)\n            bbox_pred_list = select_single_mlvl(\n                bbox_preds, img_id, detach=True)\n            if with_score_factors:\n                score_factor_list = select_single_mlvl(\n                    score_factors, img_id, detach=True)\n            else:\n                score_factor_list = [None for _ in range(num_levels)]\n            param_pred_list = select_single_mlvl(\n                param_preds, img_id, detach=True)\n\n            results = self._predict_by_feat_single(\n                cls_score_list=cls_score_list,\n                bbox_pred_list=bbox_pred_list,\n                score_factor_list=score_factor_list,\n                param_pred_list=param_pred_list,\n                mlvl_points=all_level_points,\n                mlvl_strides=all_level_strides,\n                img_meta=img_meta,\n                cfg=cfg,\n                rescale=rescale,\n                with_nms=with_nms)\n            result_list.append(results)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                param_pred_list: List[Tensor],\n                                mlvl_points: List[Tensor],\n                                mlvl_strides: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            param_pred_list (List[Tensor]): Param predition from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_params, H, W).\n            mlvl_points (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid.\n                It has shape (num_priors, 2)\n            mlvl_strides (List[Tensor]):  Each element in the list is\n                the stride of a single level in feature pyramid.\n                It has shape (num_priors, 1)\n            img_meta (dict): Image meta info.\n            cfg (mmengine.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        if score_factor_list[0] is None:\n            # e.g. Retina, FreeAnchor, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, etc.\n            with_score_factors = True\n\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bbox_preds = []\n        mlvl_param_preds = []\n        mlvl_valid_points = []\n        mlvl_valid_strides = []\n        mlvl_scores = []\n        mlvl_labels = []\n        if with_score_factors:\n            mlvl_score_factors = []\n        else:\n            mlvl_score_factors = None\n        for level_idx, (cls_score, bbox_pred, score_factor,\n                        param_pred, points, strides) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              score_factor_list, param_pred_list,\n                              mlvl_points, mlvl_strides)):\n\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            dim = self.bbox_coder.encode_size\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)\n            if with_score_factors:\n                score_factor = score_factor.permute(1, 2,\n                                                    0).reshape(-1).sigmoid()\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                # remind that we set FG labels to [0, num_class-1]\n                # since mmdet v2.0\n                # BG cat_id: num_class\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            param_pred = param_pred.permute(1, 2,\n                                            0).reshape(-1, self.num_params)\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            score_thr = cfg.get('score_thr', 0)\n\n            results = filter_scores_and_topk(\n                scores, score_thr, nms_pre,\n                dict(\n                    bbox_pred=bbox_pred,\n                    param_pred=param_pred,\n                    points=points,\n                    strides=strides))\n            scores, labels, keep_idxs, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            param_pred = filtered_results['param_pred']\n            points = filtered_results['points']\n            strides = filtered_results['strides']\n\n            if with_score_factors:\n                score_factor = score_factor[keep_idxs]\n\n            mlvl_bbox_preds.append(bbox_pred)\n            mlvl_param_preds.append(param_pred)\n            mlvl_valid_points.append(points)\n            mlvl_valid_strides.append(strides)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n            if with_score_factors:\n                mlvl_score_factors.append(score_factor)\n\n        bbox_pred = torch.cat(mlvl_bbox_preds)\n        priors = cat_boxes(mlvl_valid_points)\n        bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape)\n\n        results = InstanceData()\n        results.bboxes = bboxes\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n        results.param_preds = torch.cat(mlvl_param_preds)\n        results.points = torch.cat(mlvl_valid_points)\n        results.strides = torch.cat(mlvl_valid_strides)\n        if with_score_factors:\n            results.score_factors = torch.cat(mlvl_score_factors)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n\nclass MaskFeatModule(BaseModule):\n    \"\"\"CondInst mask feature map branch used in \\\n    https://arxiv.org/abs/1904.02689.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels of the mask feature\n             map branch.\n        start_level (int): The starting feature map level from RPN that\n             will be used to predict the mask feature map.\n        end_level (int): The ending feature map level from rpn that\n             will be used to predict the mask feature map.\n        out_channels (int): Number of output channels of the mask feature\n             map branch. This is the channel count of the mask\n             feature map that to be dynamically convolved with the predicted\n             kernel.\n        mask_stride (int): Downsample factor of the mask feature map output.\n            Defaults to 4.\n        num_stacked_convs (int): Number of convs in mask feature branch.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 feat_channels: int,\n                 start_level: int,\n                 end_level: int,\n                 out_channels: int,\n                 mask_stride: int = 4,\n                 num_stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 init_cfg: MultiConfig = [\n                     dict(type='Normal', layer='Conv2d', std=0.01)\n                 ],\n                 **kwargs) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.start_level = start_level\n        self.end_level = end_level\n        self.mask_stride = mask_stride\n        self.num_stacked_convs = num_stacked_convs\n        assert start_level >= 0 and end_level >= start_level\n        self.out_channels = out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.convs_all_levels = nn.ModuleList()\n        for i in range(self.start_level, self.end_level + 1):\n            convs_per_level = nn.Sequential()\n            convs_per_level.add_module(\n                f'conv{i}',\n                ConvModule(\n                    self.in_channels,\n                    self.feat_channels,\n                    3,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    inplace=False,\n                    bias=False))\n            self.convs_all_levels.append(convs_per_level)\n\n        conv_branch = []\n        for _ in range(self.num_stacked_convs):\n            conv_branch.append(\n                ConvModule(\n                    self.feat_channels,\n                    self.feat_channels,\n                    3,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=False))\n        self.conv_branch = nn.Sequential(*conv_branch)\n\n        self.conv_pred = nn.Conv2d(\n            self.feat_channels, self.out_channels, 1, stride=1)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        super().init_weights()\n        kaiming_init(self.convs_all_levels, a=1, distribution='uniform')\n        kaiming_init(self.conv_branch, a=1, distribution='uniform')\n        kaiming_init(self.conv_pred, a=1, distribution='uniform')\n\n    def forward(self, x: Tuple[Tensor]) -> Tensor:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            Tensor: The predicted mask feature map.\n        \"\"\"\n        inputs = x[self.start_level:self.end_level + 1]\n        assert len(inputs) == (self.end_level - self.start_level + 1)\n        feature_add_all_level = self.convs_all_levels[0](inputs[0])\n        target_h, target_w = feature_add_all_level.size()[2:]\n        for i in range(1, len(inputs)):\n            input_p = inputs[i]\n            x_p = self.convs_all_levels[i](input_p)\n            h, w = x_p.size()[2:]\n            factor_h = target_h // h\n            factor_w = target_w // w\n            assert factor_h == factor_w\n            feature_per_level = aligned_bilinear(x_p, factor_h)\n            feature_add_all_level = feature_add_all_level + \\\n                feature_per_level\n\n        feature_add_all_level = self.conv_branch(feature_add_all_level)\n        feature_pred = self.conv_pred(feature_add_all_level)\n        return feature_pred\n\n\n@MODELS.register_module()\nclass CondInstMaskHead(BaseMaskHead):\n    \"\"\"CondInst mask head used in https://arxiv.org/abs/1904.02689.\n\n    This head outputs the mask for CondInst.\n\n    Args:\n        mask_feature_head (dict): Config of CondInstMaskFeatHead.\n        num_layers (int): Number of dynamic conv layers.\n        feat_channels (int): Number of channels in the dynamic conv.\n        mask_out_stride (int): The stride of the mask feat.\n        size_of_interest (int): The size of the region used in rel coord.\n        max_masks_to_train (int): Maximum number of masks to train for\n            each image.\n        loss_segm (:obj:`ConfigDict` or dict, optional): Config of\n            segmentation loss.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config\n            of head.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            head.\n    \"\"\"\n\n    def __init__(self,\n                 mask_feature_head: ConfigType,\n                 num_layers: int = 3,\n                 feat_channels: int = 8,\n                 mask_out_stride: int = 4,\n                 size_of_interest: int = 8,\n                 max_masks_to_train: int = -1,\n                 topk_masks_per_img: int = -1,\n                 loss_mask: ConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None) -> None:\n        super().__init__()\n        self.mask_feature_head = MaskFeatModule(**mask_feature_head)\n        self.mask_feat_stride = self.mask_feature_head.mask_stride\n        self.in_channels = self.mask_feature_head.out_channels\n        self.num_layers = num_layers\n        self.feat_channels = feat_channels\n        self.size_of_interest = size_of_interest\n        self.mask_out_stride = mask_out_stride\n        self.max_masks_to_train = max_masks_to_train\n        self.topk_masks_per_img = topk_masks_per_img\n        self.prior_generator = MlvlPointGenerator([self.mask_feat_stride])\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.loss_mask = MODELS.build(loss_mask)\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        weight_nums, bias_nums = [], []\n        for i in range(self.num_layers):\n            if i == 0:\n                weight_nums.append((self.in_channels + 2) * self.feat_channels)\n                bias_nums.append(self.feat_channels)\n            elif i == self.num_layers - 1:\n                weight_nums.append(self.feat_channels * 1)\n                bias_nums.append(1)\n            else:\n                weight_nums.append(self.feat_channels * self.feat_channels)\n                bias_nums.append(self.feat_channels)\n\n        self.weight_nums = weight_nums\n        self.bias_nums = bias_nums\n        self.num_params = sum(weight_nums) + sum(bias_nums)\n\n    def parse_dynamic_params(\n            self, params: Tensor) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"parse the dynamic params for dynamic conv.\"\"\"\n        num_insts = params.size(0)\n        params_splits = list(\n            torch.split_with_sizes(\n                params, self.weight_nums + self.bias_nums, dim=1))\n        weight_splits = params_splits[:self.num_layers]\n        bias_splits = params_splits[self.num_layers:]\n        for i in range(self.num_layers):\n            if i < self.num_layers - 1:\n                weight_splits[i] = weight_splits[i].reshape(\n                    num_insts * self.in_channels, -1, 1, 1)\n                bias_splits[i] = bias_splits[i].reshape(num_insts *\n                                                        self.in_channels)\n            else:\n                # out_channels x in_channels x 1 x 1\n                weight_splits[i] = weight_splits[i].reshape(\n                    num_insts * 1, -1, 1, 1)\n                bias_splits[i] = bias_splits[i].reshape(num_insts)\n\n        return weight_splits, bias_splits\n\n    def dynamic_conv_forward(self, features: Tensor, weights: List[Tensor],\n                             biases: List[Tensor], num_insts: int) -> Tensor:\n        \"\"\"dynamic forward, each layer follow a relu.\"\"\"\n        n_layers = len(weights)\n        x = features\n        for i, (w, b) in enumerate(zip(weights, biases)):\n            x = F.conv2d(x, w, bias=b, stride=1, padding=0, groups=num_insts)\n            if i < n_layers - 1:\n                x = F.relu(x)\n        return x\n\n    def forward(self, x: tuple, positive_infos: InstanceList) -> tuple:\n        \"\"\"Forward feature from the upstream network to get prototypes and\n        linearly combine the prototypes, using masks coefficients, into\n        instance masks. Finally, crop the instance masks with given bboxes.\n\n        Args:\n            x (Tuple[Tensor]): Feature from the upstream network, which is\n                a 4D-tensor.\n            positive_infos (List[:obj:``InstanceData``]): Positive information\n                that calculate from detect head.\n\n        Returns:\n            tuple: Predicted instance segmentation masks\n        \"\"\"\n        mask_feats = self.mask_feature_head(x)\n        return multi_apply(self.forward_single, mask_feats, positive_infos)\n\n    def forward_single(self, mask_feat: Tensor,\n                       positive_info: InstanceData) -> Tensor:\n        \"\"\"Forward features of a each image.\"\"\"\n        pos_param_preds = positive_info.get('param_preds')\n        pos_points = positive_info.get('points')\n        pos_strides = positive_info.get('strides')\n\n        num_inst = pos_param_preds.shape[0]\n        mask_feat = mask_feat[None].repeat(num_inst, 1, 1, 1)\n        _, _, H, W = mask_feat.size()\n        if num_inst == 0:\n            return (pos_param_preds.new_zeros((0, 1, H, W)), )\n\n        locations = self.prior_generator.single_level_grid_priors(\n            mask_feat.size()[2:], 0, device=mask_feat.device)\n\n        rel_coords = relative_coordinate_maps(locations, pos_points,\n                                              pos_strides,\n                                              self.size_of_interest,\n                                              mask_feat.size()[2:])\n        mask_head_inputs = torch.cat([rel_coords, mask_feat], dim=1)\n        mask_head_inputs = mask_head_inputs.reshape(1, -1, H, W)\n\n        weights, biases = self.parse_dynamic_params(pos_param_preds)\n        mask_preds = self.dynamic_conv_forward(mask_head_inputs, weights,\n                                               biases, num_inst)\n        mask_preds = mask_preds.reshape(-1, H, W)\n        mask_preds = aligned_bilinear(\n            mask_preds.unsqueeze(0),\n            int(self.mask_feat_stride / self.mask_out_stride)).squeeze(0)\n\n        return (mask_preds, )\n\n    def loss_by_feat(self, mask_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict], positive_infos: InstanceList,\n                     **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mask_preds (list[Tensor]): List of predicted masks, each has\n                shape (num_classes, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``masks``,\n                and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of multiple images.\n            positive_infos (List[:obj:``InstanceData``]): Information of\n                positive samples of each image that are assigned in detection\n                head.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert positive_infos is not None, \\\n            'positive_infos should not be None in `CondInstMaskHead`'\n        losses = dict()\n\n        loss_mask = 0.\n        num_imgs = len(mask_preds)\n        total_pos = 0\n\n        for idx in range(num_imgs):\n            (mask_pred, pos_mask_targets, num_pos) = \\\n                self._get_targets_single(\n                mask_preds[idx], batch_gt_instances[idx],\n                positive_infos[idx])\n            # mask loss\n            total_pos += num_pos\n            if num_pos == 0 or pos_mask_targets is None:\n                loss = mask_pred.new_zeros(1).mean()\n            else:\n                loss = self.loss_mask(\n                    mask_pred, pos_mask_targets,\n                    reduction_override='none').sum()\n            loss_mask += loss\n\n        if total_pos == 0:\n            total_pos += 1  # avoid nan\n        loss_mask = loss_mask / total_pos\n        losses.update(loss_mask=loss_mask)\n        return losses\n\n    def _get_targets_single(self, mask_preds: Tensor,\n                            gt_instances: InstanceData,\n                            positive_info: InstanceData):\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            mask_preds (Tensor): Predicted prototypes with shape\n                (num_classes, H, W).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes``, ``labels``,\n                and ``masks`` attributes.\n            positive_info (:obj:`InstanceData`): Information of positive\n                samples that are assigned in detection head. It usually\n                contains following keys.\n\n                    - pos_assigned_gt_inds (Tensor): Assigner GT indexes of\n                      positive proposals, has shape (num_pos, )\n                    - pos_inds (Tensor): Positive index of image, has\n                      shape (num_pos, ).\n                    - param_pred (Tensor): Positive param preditions\n                      with shape (num_pos, num_params).\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n            - mask_preds (Tensor): Positive predicted mask with shape\n              (num_pos, mask_h, mask_w).\n            - pos_mask_targets (Tensor): Positive mask targets with shape\n              (num_pos, mask_h, mask_w).\n            - num_pos (int): Positive numbers.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        device = gt_bboxes.device\n        gt_masks = gt_instances.masks.to_tensor(\n            dtype=torch.bool, device=device).float()\n\n        # process with mask targets\n        pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds')\n        scores = positive_info.get('scores')\n        centernesses = positive_info.get('centernesses')\n        num_pos = pos_assigned_gt_inds.size(0)\n\n        if gt_masks.size(0) == 0 or num_pos == 0:\n            return mask_preds, None, 0\n        # Since we're producing (near) full image masks,\n        # it'd take too much vram to backprop on every single mask.\n        # Thus we select only a subset.\n        if (self.max_masks_to_train != -1) and \\\n           (num_pos > self.max_masks_to_train):\n            perm = torch.randperm(num_pos)\n            select = perm[:self.max_masks_to_train]\n            mask_preds = mask_preds[select]\n            pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n            num_pos = self.max_masks_to_train\n        elif self.topk_masks_per_img != -1:\n            unique_gt_inds = pos_assigned_gt_inds.unique()\n            num_inst_per_gt = max(\n                int(self.topk_masks_per_img / len(unique_gt_inds)), 1)\n\n            keep_mask_preds = []\n            keep_pos_assigned_gt_inds = []\n            for gt_ind in unique_gt_inds:\n                per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind)\n                mask_preds_per_inst = mask_preds[per_inst_pos_inds]\n                gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds]\n                if sum(per_inst_pos_inds) > num_inst_per_gt:\n                    per_inst_scores = scores[per_inst_pos_inds].sigmoid().max(\n                        dim=1)[0]\n                    per_inst_centerness = centernesses[\n                        per_inst_pos_inds].sigmoid().reshape(-1, )\n                    select = (per_inst_scores * per_inst_centerness).topk(\n                        k=num_inst_per_gt, dim=0)[1]\n                    mask_preds_per_inst = mask_preds_per_inst[select]\n                    gt_inds_per_inst = gt_inds_per_inst[select]\n                keep_mask_preds.append(mask_preds_per_inst)\n                keep_pos_assigned_gt_inds.append(gt_inds_per_inst)\n            mask_preds = torch.cat(keep_mask_preds)\n            pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds)\n            num_pos = pos_assigned_gt_inds.size(0)\n\n        # Follow the origin implement\n        start = int(self.mask_out_stride // 2)\n        gt_masks = gt_masks[:, start::self.mask_out_stride,\n                            start::self.mask_out_stride]\n        gt_masks = gt_masks.gt(0.5).float()\n        pos_mask_targets = gt_masks[pos_assigned_gt_inds]\n\n        return (mask_preds, pos_mask_targets, num_pos)\n\n    def predict_by_feat(self,\n                        mask_preds: List[Tensor],\n                        results_list: InstanceList,\n                        batch_img_metas: List[dict],\n                        rescale: bool = True,\n                        **kwargs) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\n\n        Args:\n            mask_preds (list[Tensor]): Predicted prototypes with shape\n                (num_classes, H, W).\n            results_list (List[:obj:``InstanceData``]): BBoxHead results.\n            batch_img_metas (list[dict]): Meta information of all images.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        assert len(mask_preds) == len(results_list) == len(batch_img_metas)\n\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            results = results_list[img_id]\n            bboxes = results.bboxes\n            mask_pred = mask_preds[img_id]\n            if bboxes.shape[0] == 0 or mask_pred.shape[0] == 0:\n                results_list[img_id] = empty_instances(\n                    [img_meta],\n                    bboxes.device,\n                    task_type='mask',\n                    instance_results=[results])[0]\n            else:\n                im_mask = self._predict_by_feat_single(\n                    mask_preds=mask_pred,\n                    bboxes=bboxes,\n                    img_meta=img_meta,\n                    rescale=rescale)\n                results.masks = im_mask\n        return results_list\n\n    def _predict_by_feat_single(self,\n                                mask_preds: Tensor,\n                                bboxes: Tensor,\n                                img_meta: dict,\n                                rescale: bool,\n                                cfg: OptConfigType = None):\n        \"\"\"Transform a single image's features extracted from the head into\n        mask results.\n\n        Args:\n            mask_preds (Tensor): Predicted prototypes, has shape [H, W, N].\n            img_meta (dict): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If rescale is False, then returned masks will\n                fit the scale of imgs[0].\n            cfg (dict, optional): Config used in test phase.\n                Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n            (1, 2))\n        img_h, img_w = img_meta['img_shape'][:2]\n        ori_h, ori_w = img_meta['ori_shape'][:2]\n\n        mask_preds = mask_preds.sigmoid().unsqueeze(0)\n        mask_preds = aligned_bilinear(mask_preds, self.mask_out_stride)\n        mask_preds = mask_preds[:, :, :img_h, :img_w]\n        if rescale:  # in-placed rescale the bboxes\n            scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n                (1, 2))\n            bboxes /= scale_factor\n\n            masks = F.interpolate(\n                mask_preds, (ori_h, ori_w),\n                mode='bilinear',\n                align_corners=False).squeeze(0) > cfg.mask_thr\n        else:\n            masks = mask_preds.squeeze(0) > cfg.mask_thr\n\n        return masks\n"
  },
  {
    "path": "mmdet/models/dense_heads/conditional_detr_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.model import bias_init_with_prob\nfrom torch import Tensor\n\nfrom mmdet.models.layers.transformer import inverse_sigmoid\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList\nfrom .detr_head import DETRHead\n\n\n@MODELS.register_module()\nclass ConditionalDETRHead(DETRHead):\n    \"\"\"Head of Conditional DETR. Conditional DETR: Conditional DETR for Fast\n    Training Convergence. More details can be found in the `paper.\n\n    <https://arxiv.org/abs/2108.06152>`_ .\n    \"\"\"\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the transformer head.\"\"\"\n        super().init_weights()\n        # The initialization below for transformer head is very\n        # important as we use Focal_loss for loss_cls\n        if self.loss_cls.use_sigmoid:\n            bias_init = bias_init_with_prob(0.01)\n            nn.init.constant_(self.fc_cls.bias, bias_init)\n\n    def forward(self, hidden_states: Tensor,\n                references: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"\"Forward function.\n\n        Args:\n            hidden_states (Tensor): Features from transformer decoder. If\n                `return_intermediate_dec` is True output has shape\n                (num_decoder_layers, bs, num_queries, dim), else has shape (1,\n                bs, num_queries, dim) which only contains the last layer\n                outputs.\n            references (Tensor): References from transformer decoder, has\n                shape (bs, num_queries, 2).\n        Returns:\n            tuple[Tensor]: results of head containing the following tensor.\n\n            - layers_cls_scores (Tensor): Outputs from the classification head,\n              shape (num_decoder_layers, bs, num_queries, cls_out_channels).\n              Note cls_out_channels should include background.\n            - layers_bbox_preds (Tensor): Sigmoid outputs from the regression\n              head with normalized coordinate format (cx, cy, w, h), has shape\n              (num_decoder_layers, bs, num_queries, 4).\n        \"\"\"\n\n        references_unsigmoid = inverse_sigmoid(references)\n        layers_bbox_preds = []\n        for layer_id in range(hidden_states.shape[0]):\n            tmp_reg_preds = self.fc_reg(\n                self.activate(self.reg_ffn(hidden_states[layer_id])))\n            tmp_reg_preds[..., :2] += references_unsigmoid\n            outputs_coord = tmp_reg_preds.sigmoid()\n            layers_bbox_preds.append(outputs_coord)\n        layers_bbox_preds = torch.stack(layers_bbox_preds)\n\n        layers_cls_scores = self.fc_cls(hidden_states)\n        return layers_cls_scores, layers_bbox_preds\n\n    def loss(self, hidden_states: Tensor, references: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\n\n        Args:\n            hidden_states (Tensor): Features from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, dim).\n            references (Tensor): References from the transformer decoder, has\n               shape (num_decoder_layers, bs, num_queries, 2).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        batch_gt_instances = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n\n        outs = self(hidden_states, references)\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*loss_inputs)\n        return losses\n\n    def loss_and_predict(\n            self, hidden_states: Tensor, references: Tensor,\n            batch_data_samples: SampleList) -> Tuple[dict, InstanceList]:\n        \"\"\"Perform forward propagation of the head, then calculate loss and\n        predictions from the features and data samples. Over-write because\n        img_metas are needed as inputs for bbox_head.\n\n        Args:\n            hidden_states (Tensor): Features from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, dim).\n            references (Tensor): References from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, 2).\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns:\n            tuple: The return value is a tuple contains:\n\n            - losses: (dict[str, Tensor]): A dictionary of loss components.\n            - predictions (list[:obj:`InstanceData`]): Detection\n              results of each image after the post process.\n        \"\"\"\n        batch_gt_instances = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n\n        outs = self(hidden_states, references)\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*loss_inputs)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas)\n        return losses, predictions\n\n    def predict(self,\n                hidden_states: Tensor,\n                references: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network. Over-write\n        because img_metas are needed as inputs for bbox_head.\n\n        Args:\n            hidden_states (Tensor): Features from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, dim).\n            references (Tensor): References from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, 2).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        last_layer_hidden_state = hidden_states[-1].unsqueeze(0)\n        outs = self(last_layer_hidden_state, references)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n\n        return predictions\n"
  },
  {
    "path": "mmdet/models/dense_heads/corner_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom logging import warning\nfrom math import ceil, log\nfrom typing import List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import CornerPool, batched_nms\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule, bias_init_with_prob\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, OptMultiConfig)\nfrom ..utils import (gather_feat, gaussian_radius, gen_gaussian_target,\n                     get_local_maximum, get_topk_from_heatmap, multi_apply,\n                     transpose_and_gather_feat)\nfrom .base_dense_head import BaseDenseHead\n\n\nclass BiCornerPool(BaseModule):\n    \"\"\"Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)\n\n    Args:\n        in_channels (int): Input channels of module.\n        directions (list[str]): Directions of two CornerPools.\n        out_channels (int): Output channels of module.\n        feat_channels (int): Feature channels of module.\n        norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct\n            and config norm layer.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to\n            control the initialization.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 directions: List[int],\n                 feat_channels: int = 128,\n                 out_channels: int = 128,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg)\n        self.direction1_conv = ConvModule(\n            in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n        self.direction2_conv = ConvModule(\n            in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n        self.aftpool_conv = ConvModule(\n            feat_channels,\n            out_channels,\n            3,\n            padding=1,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        self.conv1 = ConvModule(\n            in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\n        self.conv2 = ConvModule(\n            in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n        self.direction1_pool = CornerPool(directions[0])\n        self.direction2_pool = CornerPool(directions[1])\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tensor): Input feature of BiCornerPool.\n\n        Returns:\n            conv2 (tensor): Output feature of BiCornerPool.\n        \"\"\"\n        direction1_conv = self.direction1_conv(x)\n        direction2_conv = self.direction2_conv(x)\n        direction1_feat = self.direction1_pool(direction1_conv)\n        direction2_feat = self.direction2_pool(direction2_conv)\n        aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)\n        conv1 = self.conv1(x)\n        relu = self.relu(aftpool_conv + conv1)\n        conv2 = self.conv2(relu)\n        return conv2\n\n\n@MODELS.register_module()\nclass CornerHead(BaseDenseHead):\n    \"\"\"Head of CornerNet: Detecting Objects as Paired Keypoints.\n\n    Code is modified from the `official github repo\n    <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/\n    kp.py#L73>`_ .\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/1808.01244>`_ .\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        num_feat_levels (int): Levels of feature from the previous module.\n            2 for HourglassNet-104 and 1 for HourglassNet-52. Because\n            HourglassNet-104 outputs the final feature and intermediate\n            supervision feature and HourglassNet-52 only outputs the final\n            feature. Defaults to 2.\n        corner_emb_channels (int): Channel of embedding vector. Defaults to 1.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config.\n            Useless in CornerHead, but we keep this variable for\n            SingleStageDetector.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            CornerHead.\n        loss_heatmap (:obj:`ConfigDict` or dict): Config of corner heatmap\n            loss. Defaults to GaussianFocalLoss.\n        loss_embedding (:obj:`ConfigDict` or dict): Config of corner embedding\n            loss. Defaults to AssociativeEmbeddingLoss.\n        loss_offset (:obj:`ConfigDict` or dict): Config of corner offset loss.\n            Defaults to SmoothL1Loss.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 num_feat_levels: int = 2,\n                 corner_emb_channels: int = 1,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 loss_heatmap: ConfigType = dict(\n                     type='GaussianFocalLoss',\n                     alpha=2.0,\n                     gamma=4.0,\n                     loss_weight=1),\n                 loss_embedding: ConfigType = dict(\n                     type='AssociativeEmbeddingLoss',\n                     pull_weight=0.25,\n                     push_weight=0.25),\n                 loss_offset: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1),\n                 init_cfg: OptMultiConfig = None) -> None:\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.corner_emb_channels = corner_emb_channels\n        self.with_corner_emb = self.corner_emb_channels > 0\n        self.corner_offset_channels = 2\n        self.num_feat_levels = num_feat_levels\n        self.loss_heatmap = MODELS.build(\n            loss_heatmap) if loss_heatmap is not None else None\n        self.loss_embedding = MODELS.build(\n            loss_embedding) if loss_embedding is not None else None\n        self.loss_offset = MODELS.build(\n            loss_offset) if loss_offset is not None else None\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        self._init_layers()\n\n    def _make_layers(self,\n                     out_channels: int,\n                     in_channels: int = 256,\n                     feat_channels: int = 256) -> nn.Sequential:\n        \"\"\"Initialize conv sequential for CornerHead.\"\"\"\n        return nn.Sequential(\n            ConvModule(in_channels, feat_channels, 3, padding=1),\n            ConvModule(\n                feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))\n\n    def _init_corner_kpt_layers(self) -> None:\n        \"\"\"Initialize corner keypoint layers.\n\n        Including corner heatmap branch and corner offset branch. Each branch\n        has two parts: prefix `tl_` for top-left and `br_` for bottom-right.\n        \"\"\"\n        self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()\n        self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()\n        self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()\n\n        for _ in range(self.num_feat_levels):\n            self.tl_pool.append(\n                BiCornerPool(\n                    self.in_channels, ['top', 'left'],\n                    out_channels=self.in_channels))\n            self.br_pool.append(\n                BiCornerPool(\n                    self.in_channels, ['bottom', 'right'],\n                    out_channels=self.in_channels))\n\n            self.tl_heat.append(\n                self._make_layers(\n                    out_channels=self.num_classes,\n                    in_channels=self.in_channels))\n            self.br_heat.append(\n                self._make_layers(\n                    out_channels=self.num_classes,\n                    in_channels=self.in_channels))\n\n            self.tl_off.append(\n                self._make_layers(\n                    out_channels=self.corner_offset_channels,\n                    in_channels=self.in_channels))\n            self.br_off.append(\n                self._make_layers(\n                    out_channels=self.corner_offset_channels,\n                    in_channels=self.in_channels))\n\n    def _init_corner_emb_layers(self) -> None:\n        \"\"\"Initialize corner embedding layers.\n\n        Only include corner embedding branch with two parts: prefix `tl_` for\n        top-left and `br_` for bottom-right.\n        \"\"\"\n        self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()\n\n        for _ in range(self.num_feat_levels):\n            self.tl_emb.append(\n                self._make_layers(\n                    out_channels=self.corner_emb_channels,\n                    in_channels=self.in_channels))\n            self.br_emb.append(\n                self._make_layers(\n                    out_channels=self.corner_emb_channels,\n                    in_channels=self.in_channels))\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers for CornerHead.\n\n        Including two parts: corner keypoint layers and corner embedding layers\n        \"\"\"\n        self._init_corner_kpt_layers()\n        if self.with_corner_emb:\n            self._init_corner_emb_layers()\n\n    def init_weights(self) -> None:\n        super().init_weights()\n        bias_init = bias_init_with_prob(0.1)\n        for i in range(self.num_feat_levels):\n            # The initialization of parameters are different between\n            # nn.Conv2d and ConvModule. Our experiments show that\n            # using the original initialization of nn.Conv2d increases\n            # the final mAP by about 0.2%\n            self.tl_heat[i][-1].conv.reset_parameters()\n            self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)\n            self.br_heat[i][-1].conv.reset_parameters()\n            self.br_heat[i][-1].conv.bias.data.fill_(bias_init)\n            self.tl_off[i][-1].conv.reset_parameters()\n            self.br_off[i][-1].conv.reset_parameters()\n            if self.with_corner_emb:\n                self.tl_emb[i][-1].conv.reset_parameters()\n                self.br_emb[i][-1].conv.reset_parameters()\n\n    def forward(self, feats: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of corner heatmaps, offset heatmaps and\n            embedding heatmaps.\n                - tl_heats (list[Tensor]): Top-left corner heatmaps for all\n                  levels, each is a 4D-tensor, the channels number is\n                  num_classes.\n                - br_heats (list[Tensor]): Bottom-right corner heatmaps for all\n                  levels, each is a 4D-tensor, the channels number is\n                  num_classes.\n                - tl_embs (list[Tensor] | list[None]): Top-left embedding\n                  heatmaps for all levels, each is a 4D-tensor or None.\n                  If not None, the channels number is corner_emb_channels.\n                - br_embs (list[Tensor] | list[None]): Bottom-right embedding\n                  heatmaps for all levels, each is a 4D-tensor or None.\n                  If not None, the channels number is corner_emb_channels.\n                - tl_offs (list[Tensor]): Top-left offset heatmaps for all\n                  levels, each is a 4D-tensor. The channels number is\n                  corner_offset_channels.\n                - br_offs (list[Tensor]): Bottom-right offset heatmaps for all\n                  levels, each is a 4D-tensor. The channels number is\n                  corner_offset_channels.\n        \"\"\"\n        lvl_ind = list(range(self.num_feat_levels))\n        return multi_apply(self.forward_single, feats, lvl_ind)\n\n    def forward_single(self,\n                       x: Tensor,\n                       lvl_ind: int,\n                       return_pool: bool = False) -> List[Tensor]:\n        \"\"\"Forward feature of a single level.\n\n        Args:\n            x (Tensor): Feature of a single level.\n            lvl_ind (int): Level index of current feature.\n            return_pool (bool): Return corner pool feature or not.\n                Defaults to False.\n\n        Returns:\n            tuple[Tensor]: A tuple of CornerHead's output for current feature\n            level. Containing the following Tensors:\n\n                - tl_heat (Tensor): Predicted top-left corner heatmap.\n                - br_heat (Tensor): Predicted bottom-right corner heatmap.\n                - tl_emb (Tensor | None): Predicted top-left embedding heatmap.\n                  None for `self.with_corner_emb == False`.\n                - br_emb (Tensor | None): Predicted bottom-right embedding\n                  heatmap. None for `self.with_corner_emb == False`.\n                - tl_off (Tensor): Predicted top-left offset heatmap.\n                - br_off (Tensor): Predicted bottom-right offset heatmap.\n                - tl_pool (Tensor): Top-left corner pool feature. Not must\n                  have.\n                - br_pool (Tensor): Bottom-right corner pool feature. Not must\n                  have.\n        \"\"\"\n        tl_pool = self.tl_pool[lvl_ind](x)\n        tl_heat = self.tl_heat[lvl_ind](tl_pool)\n        br_pool = self.br_pool[lvl_ind](x)\n        br_heat = self.br_heat[lvl_ind](br_pool)\n\n        tl_emb, br_emb = None, None\n        if self.with_corner_emb:\n            tl_emb = self.tl_emb[lvl_ind](tl_pool)\n            br_emb = self.br_emb[lvl_ind](br_pool)\n\n        tl_off = self.tl_off[lvl_ind](tl_pool)\n        br_off = self.br_off[lvl_ind](br_pool)\n\n        result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]\n        if return_pool:\n            result_list.append(tl_pool)\n            result_list.append(br_pool)\n\n        return result_list\n\n    def get_targets(self,\n                    gt_bboxes: List[Tensor],\n                    gt_labels: List[Tensor],\n                    feat_shape: Sequence[int],\n                    img_shape: Sequence[int],\n                    with_corner_emb: bool = False,\n                    with_guiding_shift: bool = False,\n                    with_centripetal_shift: bool = False) -> dict:\n        \"\"\"Generate corner targets.\n\n        Including corner heatmap, corner offset.\n\n        Optional: corner embedding, corner guiding shift, centripetal shift.\n\n        For CornerNet, we generate corner heatmap, corner offset and corner\n        embedding from this function.\n\n        For CentripetalNet, we generate corner heatmap, corner offset, guiding\n        shift and centripetal shift from this function.\n\n        Args:\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each\n                has shape (num_gt, 4).\n            gt_labels (list[Tensor]): Ground truth labels of each box, each has\n                shape (num_gt, ).\n            feat_shape (Sequence[int]): Shape of output feature,\n                [batch, channel, height, width].\n            img_shape (Sequence[int]): Shape of input image,\n                [height, width, channel].\n            with_corner_emb (bool): Generate corner embedding target or not.\n                Defaults to False.\n            with_guiding_shift (bool): Generate guiding shift target or not.\n                Defaults to False.\n            with_centripetal_shift (bool): Generate centripetal shift target or\n                not. Defaults to False.\n\n        Returns:\n            dict: Ground truth of corner heatmap, corner offset, corner\n            embedding, guiding shift and centripetal shift. Containing the\n            following keys:\n\n                - topleft_heatmap (Tensor): Ground truth top-left corner\n                  heatmap.\n                - bottomright_heatmap (Tensor): Ground truth bottom-right\n                  corner heatmap.\n                - topleft_offset (Tensor): Ground truth top-left corner offset.\n                - bottomright_offset (Tensor): Ground truth bottom-right corner\n                  offset.\n                - corner_embedding (list[list[list[int]]]): Ground truth corner\n                  embedding. Not must have.\n                - topleft_guiding_shift (Tensor): Ground truth top-left corner\n                  guiding shift. Not must have.\n                - bottomright_guiding_shift (Tensor): Ground truth bottom-right\n                  corner guiding shift. Not must have.\n                - topleft_centripetal_shift (Tensor): Ground truth top-left\n                  corner centripetal shift. Not must have.\n                - bottomright_centripetal_shift (Tensor): Ground truth\n                  bottom-right corner centripetal shift. Not must have.\n        \"\"\"\n        batch_size, _, height, width = feat_shape\n        img_h, img_w = img_shape[:2]\n\n        width_ratio = float(width / img_w)\n        height_ratio = float(height / img_h)\n\n        gt_tl_heatmap = gt_bboxes[-1].new_zeros(\n            [batch_size, self.num_classes, height, width])\n        gt_br_heatmap = gt_bboxes[-1].new_zeros(\n            [batch_size, self.num_classes, height, width])\n        gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n        gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n\n        if with_corner_emb:\n            match = []\n\n        # Guiding shift is a kind of offset, from center to corner\n        if with_guiding_shift:\n            gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n            gt_br_guiding_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n        # Centripetal shift is also a kind of offset, from center to corner\n        # and normalized by log.\n        if with_centripetal_shift:\n            gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n            gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n\n        for batch_id in range(batch_size):\n            # Ground truth of corner embedding per image is a list of coord set\n            corner_match = []\n            for box_id in range(len(gt_labels[batch_id])):\n                left, top, right, bottom = gt_bboxes[batch_id][box_id]\n                center_x = (left + right) / 2.0\n                center_y = (top + bottom) / 2.0\n                label = gt_labels[batch_id][box_id]\n\n                # Use coords in the feature level to generate ground truth\n                scale_left = left * width_ratio\n                scale_right = right * width_ratio\n                scale_top = top * height_ratio\n                scale_bottom = bottom * height_ratio\n                scale_center_x = center_x * width_ratio\n                scale_center_y = center_y * height_ratio\n\n                # Int coords on feature map/ground truth tensor\n                left_idx = int(min(scale_left, width - 1))\n                right_idx = int(min(scale_right, width - 1))\n                top_idx = int(min(scale_top, height - 1))\n                bottom_idx = int(min(scale_bottom, height - 1))\n\n                # Generate gaussian heatmap\n                scale_box_width = ceil(scale_right - scale_left)\n                scale_box_height = ceil(scale_bottom - scale_top)\n                radius = gaussian_radius((scale_box_height, scale_box_width),\n                                         min_overlap=0.3)\n                radius = max(0, int(radius))\n                gt_tl_heatmap[batch_id, label] = gen_gaussian_target(\n                    gt_tl_heatmap[batch_id, label], [left_idx, top_idx],\n                    radius)\n                gt_br_heatmap[batch_id, label] = gen_gaussian_target(\n                    gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],\n                    radius)\n\n                # Generate corner offset\n                left_offset = scale_left - left_idx\n                top_offset = scale_top - top_idx\n                right_offset = scale_right - right_idx\n                bottom_offset = scale_bottom - bottom_idx\n                gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset\n                gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset\n                gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset\n                gt_br_offset[batch_id, 1, bottom_idx,\n                             right_idx] = bottom_offset\n\n                # Generate corner embedding\n                if with_corner_emb:\n                    corner_match.append([[top_idx, left_idx],\n                                         [bottom_idx, right_idx]])\n                # Generate guiding shift\n                if with_guiding_shift:\n                    gt_tl_guiding_shift[batch_id, 0, top_idx,\n                                        left_idx] = scale_center_x - left_idx\n                    gt_tl_guiding_shift[batch_id, 1, top_idx,\n                                        left_idx] = scale_center_y - top_idx\n                    gt_br_guiding_shift[batch_id, 0, bottom_idx,\n                                        right_idx] = right_idx - scale_center_x\n                    gt_br_guiding_shift[\n                        batch_id, 1, bottom_idx,\n                        right_idx] = bottom_idx - scale_center_y\n                # Generate centripetal shift\n                if with_centripetal_shift:\n                    gt_tl_centripetal_shift[batch_id, 0, top_idx,\n                                            left_idx] = log(scale_center_x -\n                                                            scale_left)\n                    gt_tl_centripetal_shift[batch_id, 1, top_idx,\n                                            left_idx] = log(scale_center_y -\n                                                            scale_top)\n                    gt_br_centripetal_shift[batch_id, 0, bottom_idx,\n                                            right_idx] = log(scale_right -\n                                                             scale_center_x)\n                    gt_br_centripetal_shift[batch_id, 1, bottom_idx,\n                                            right_idx] = log(scale_bottom -\n                                                             scale_center_y)\n\n            if with_corner_emb:\n                match.append(corner_match)\n\n        target_result = dict(\n            topleft_heatmap=gt_tl_heatmap,\n            topleft_offset=gt_tl_offset,\n            bottomright_heatmap=gt_br_heatmap,\n            bottomright_offset=gt_br_offset)\n\n        if with_corner_emb:\n            target_result.update(corner_embedding=match)\n        if with_guiding_shift:\n            target_result.update(\n                topleft_guiding_shift=gt_tl_guiding_shift,\n                bottomright_guiding_shift=gt_br_guiding_shift)\n        if with_centripetal_shift:\n            target_result.update(\n                topleft_centripetal_shift=gt_tl_centripetal_shift,\n                bottomright_centripetal_shift=gt_br_centripetal_shift)\n\n        return target_result\n\n    def loss_by_feat(\n            self,\n            tl_heats: List[Tensor],\n            br_heats: List[Tensor],\n            tl_embs: List[Tensor],\n            br_embs: List[Tensor],\n            tl_offs: List[Tensor],\n            br_offs: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_embs (list[Tensor]): Top-left corner embeddings for each level\n                with shape (N, corner_emb_channels, H, W).\n            br_embs (list[Tensor]): Bottom-right corner embeddings for each\n                level with shape (N, corner_emb_channels, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Specify which bounding boxes can be ignored when computing\n                the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components. Containing the\n            following losses:\n\n                - det_loss (list[Tensor]): Corner keypoint losses of all\n                  feature levels.\n                - pull_loss (list[Tensor]): Part one of AssociativeEmbedding\n                  losses of all feature levels.\n                - push_loss (list[Tensor]): Part two of AssociativeEmbedding\n                  losses of all feature levels.\n                - off_loss (list[Tensor]): Corner offset losses of all feature\n                  levels.\n        \"\"\"\n        gt_bboxes = [\n            gt_instances.bboxes for gt_instances in batch_gt_instances\n        ]\n        gt_labels = [\n            gt_instances.labels for gt_instances in batch_gt_instances\n        ]\n\n        targets = self.get_targets(\n            gt_bboxes,\n            gt_labels,\n            tl_heats[-1].shape,\n            batch_img_metas[0]['batch_input_shape'],\n            with_corner_emb=self.with_corner_emb)\n        mlvl_targets = [targets for _ in range(self.num_feat_levels)]\n        det_losses, pull_losses, push_losses, off_losses = multi_apply(\n            self.loss_by_feat_single, tl_heats, br_heats, tl_embs, br_embs,\n            tl_offs, br_offs, mlvl_targets)\n        loss_dict = dict(det_loss=det_losses, off_loss=off_losses)\n        if self.with_corner_emb:\n            loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)\n        return loss_dict\n\n    def loss_by_feat_single(self, tl_hmp: Tensor, br_hmp: Tensor,\n                            tl_emb: Optional[Tensor], br_emb: Optional[Tensor],\n                            tl_off: Tensor, br_off: Tensor,\n                            targets: dict) -> Tuple[Tensor, ...]:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            tl_hmp (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_hmp (Tensor): Bottom-right corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            tl_emb (Tensor, optional): Top-left corner embedding for current\n                level with shape (N, corner_emb_channels, H, W).\n            br_emb (Tensor, optional): Bottom-right corner embedding for\n                current level with shape (N, corner_emb_channels, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            targets (dict): Corner target generated by `get_targets`.\n\n        Returns:\n            tuple[torch.Tensor]: Losses of the head's different branches\n            containing the following losses:\n\n                - det_loss (Tensor): Corner keypoint loss.\n                - pull_loss (Tensor): Part one of AssociativeEmbedding loss.\n                - push_loss (Tensor): Part two of AssociativeEmbedding loss.\n                - off_loss (Tensor): Corner offset loss.\n        \"\"\"\n        gt_tl_hmp = targets['topleft_heatmap']\n        gt_br_hmp = targets['bottomright_heatmap']\n        gt_tl_off = targets['topleft_offset']\n        gt_br_off = targets['bottomright_offset']\n        gt_embedding = targets['corner_embedding']\n\n        # Detection loss\n        tl_det_loss = self.loss_heatmap(\n            tl_hmp.sigmoid(),\n            gt_tl_hmp,\n            avg_factor=max(1,\n                           gt_tl_hmp.eq(1).sum()))\n        br_det_loss = self.loss_heatmap(\n            br_hmp.sigmoid(),\n            gt_br_hmp,\n            avg_factor=max(1,\n                           gt_br_hmp.eq(1).sum()))\n        det_loss = (tl_det_loss + br_det_loss) / 2.0\n\n        # AssociativeEmbedding loss\n        if self.with_corner_emb and self.loss_embedding is not None:\n            pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,\n                                                       gt_embedding)\n        else:\n            pull_loss, push_loss = None, None\n\n        # Offset loss\n        # We only compute the offset loss at the real corner position.\n        # The value of real corner would be 1 in heatmap ground truth.\n        # The mask is computed in class agnostic mode and its shape is\n        # batch * 1 * width * height.\n        tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_tl_hmp)\n        br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_br_hmp)\n        tl_off_loss = self.loss_offset(\n            tl_off,\n            gt_tl_off,\n            tl_off_mask,\n            avg_factor=max(1, tl_off_mask.sum()))\n        br_off_loss = self.loss_offset(\n            br_off,\n            gt_br_off,\n            br_off_mask,\n            avg_factor=max(1, br_off_mask.sum()))\n\n        off_loss = (tl_off_loss + br_off_loss) / 2.0\n\n        return det_loss, pull_loss, push_loss, off_loss\n\n    def predict_by_feat(self,\n                        tl_heats: List[Tensor],\n                        br_heats: List[Tensor],\n                        tl_embs: List[Tensor],\n                        br_embs: List[Tensor],\n                        tl_offs: List[Tensor],\n                        br_offs: List[Tensor],\n                        batch_img_metas: Optional[List[dict]] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_embs (list[Tensor]): Top-left corner embeddings for each level\n                with shape (N, corner_emb_channels, H, W).\n            br_embs (list[Tensor]): Bottom-right corner embeddings for each\n                level with shape (N, corner_emb_channels, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            batch_img_metas (list[dict], optional): Batch image meta info.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(\n            batch_img_metas)\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            result_list.append(\n                self._predict_by_feat_single(\n                    tl_heats[-1][img_id:img_id + 1, :],\n                    br_heats[-1][img_id:img_id + 1, :],\n                    tl_offs[-1][img_id:img_id + 1, :],\n                    br_offs[-1][img_id:img_id + 1, :],\n                    batch_img_metas[img_id],\n                    tl_emb=tl_embs[-1][img_id:img_id + 1, :],\n                    br_emb=br_embs[-1][img_id:img_id + 1, :],\n                    rescale=rescale,\n                    with_nms=with_nms))\n\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                tl_heat: Tensor,\n                                br_heat: Tensor,\n                                tl_off: Tensor,\n                                br_off: Tensor,\n                                img_meta: dict,\n                                tl_emb: Optional[Tensor] = None,\n                                br_emb: Optional[Tensor] = None,\n                                tl_centripetal_shift: Optional[Tensor] = None,\n                                br_centripetal_shift: Optional[Tensor] = None,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            tl_heat (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_heat (Tensor): Bottom-right corner heatmap for current level\n                with shape (N, num_classes, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            img_meta (dict): Meta information of current image, e.g.,\n                image size, scaling factor, etc.\n            tl_emb (Tensor): Top-left corner embedding for current level with\n                shape (N, corner_emb_channels, H, W).\n            br_emb (Tensor): Bottom-right corner embedding for current level\n                with shape (N, corner_emb_channels, H, W).\n            tl_centripetal_shift: Top-left corner's centripetal shift for\n                current level with shape (N, 2, H, W).\n            br_centripetal_shift: Bottom-right corner's centripetal shift for\n                current level with shape (N, 2, H, W).\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        if isinstance(img_meta, (list, tuple)):\n            img_meta = img_meta[0]\n\n        batch_bboxes, batch_scores, batch_clses = self._decode_heatmap(\n            tl_heat=tl_heat.sigmoid(),\n            br_heat=br_heat.sigmoid(),\n            tl_off=tl_off,\n            br_off=br_off,\n            tl_emb=tl_emb,\n            br_emb=br_emb,\n            tl_centripetal_shift=tl_centripetal_shift,\n            br_centripetal_shift=br_centripetal_shift,\n            img_meta=img_meta,\n            k=self.test_cfg.corner_topk,\n            kernel=self.test_cfg.local_maximum_kernel,\n            distance_threshold=self.test_cfg.distance_threshold)\n\n        if rescale and 'scale_factor' in img_meta:\n            batch_bboxes /= batch_bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n\n        bboxes = batch_bboxes.view([-1, 4])\n        scores = batch_scores.view(-1)\n        clses = batch_clses.view(-1)\n\n        det_bboxes = torch.cat([bboxes, scores.unsqueeze(-1)], -1)\n        keepinds = (det_bboxes[:, -1] > -0.1)\n        det_bboxes = det_bboxes[keepinds]\n        det_labels = clses[keepinds]\n\n        if with_nms:\n            det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels,\n                                                      self.test_cfg)\n\n        results = InstanceData()\n        results.bboxes = det_bboxes[..., :4]\n        results.scores = det_bboxes[..., 4]\n        results.labels = det_labels\n        return results\n\n    def _bboxes_nms(self, bboxes: Tensor, labels: Tensor,\n                    cfg: ConfigDict) -> Tuple[Tensor, Tensor]:\n        \"\"\"bboxes nms.\"\"\"\n        if 'nms_cfg' in cfg:\n            warning.warn('nms_cfg in test_cfg will be deprecated. '\n                         'Please rename it as nms')\n        if 'nms' not in cfg:\n            cfg.nms = cfg.nms_cfg\n\n        if labels.numel() > 0:\n            max_num = cfg.max_per_img\n            bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,\n                                                             -1].contiguous(),\n                                       labels, cfg.nms)\n            if max_num > 0:\n                bboxes = bboxes[:max_num]\n                labels = labels[keep][:max_num]\n\n        return bboxes, labels\n\n    def _decode_heatmap(self,\n                        tl_heat: Tensor,\n                        br_heat: Tensor,\n                        tl_off: Tensor,\n                        br_off: Tensor,\n                        tl_emb: Optional[Tensor] = None,\n                        br_emb: Optional[Tensor] = None,\n                        tl_centripetal_shift: Optional[Tensor] = None,\n                        br_centripetal_shift: Optional[Tensor] = None,\n                        img_meta: Optional[dict] = None,\n                        k: int = 100,\n                        kernel: int = 3,\n                        distance_threshold: float = 0.5,\n                        num_dets: int = 1000) -> Tuple[Tensor, Tensor, Tensor]:\n        \"\"\"Transform outputs into detections raw bbox prediction.\n\n        Args:\n            tl_heat (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_heat (Tensor): Bottom-right corner heatmap for current level\n                with shape (N, num_classes, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            tl_emb (Tensor, Optional): Top-left corner embedding for current\n                level with shape (N, corner_emb_channels, H, W).\n            br_emb (Tensor, Optional): Bottom-right corner embedding for\n                current level with shape (N, corner_emb_channels, H, W).\n            tl_centripetal_shift (Tensor, Optional): Top-left centripetal shift\n                for current level with shape (N, 2, H, W).\n            br_centripetal_shift (Tensor, Optional): Bottom-right centripetal\n                shift for current level with shape (N, 2, H, W).\n            img_meta (dict): Meta information of current image, e.g.,\n                image size, scaling factor, etc.\n            k (int): Get top k corner keypoints from heatmap.\n            kernel (int): Max pooling kernel for extract local maximum pixels.\n            distance_threshold (float): Distance threshold. Top-left and\n                bottom-right corner keypoints with feature distance less than\n                the threshold will be regarded as keypoints from same object.\n            num_dets (int): Num of raw boxes before doing nms.\n\n        Returns:\n            tuple[torch.Tensor]: Decoded output of CornerHead, containing the\n            following Tensors:\n\n            - bboxes (Tensor): Coords of each box.\n            - scores (Tensor): Scores of each box.\n            - clses (Tensor): Categories of each box.\n        \"\"\"\n        with_embedding = tl_emb is not None and br_emb is not None\n        with_centripetal_shift = (\n            tl_centripetal_shift is not None\n            and br_centripetal_shift is not None)\n        assert with_embedding + with_centripetal_shift == 1\n        batch, _, height, width = tl_heat.size()\n        if torch.onnx.is_in_onnx_export():\n            inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2]\n        else:\n            inp_h, inp_w = img_meta['batch_input_shape'][:2]\n\n        # perform nms on heatmaps\n        tl_heat = get_local_maximum(tl_heat, kernel=kernel)\n        br_heat = get_local_maximum(br_heat, kernel=kernel)\n\n        tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap(\n            tl_heat, k=k)\n        br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap(\n            br_heat, k=k)\n\n        # We use repeat instead of expand here because expand is a\n        # shallow-copy function. Thus it could cause unexpected testing result\n        # sometimes. Using expand will decrease about 10% mAP during testing\n        # compared to repeat.\n        tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)\n        tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)\n        br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)\n        br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)\n\n        tl_off = transpose_and_gather_feat(tl_off, tl_inds)\n        tl_off = tl_off.view(batch, k, 1, 2)\n        br_off = transpose_and_gather_feat(br_off, br_inds)\n        br_off = br_off.view(batch, 1, k, 2)\n\n        tl_xs = tl_xs + tl_off[..., 0]\n        tl_ys = tl_ys + tl_off[..., 1]\n        br_xs = br_xs + br_off[..., 0]\n        br_ys = br_ys + br_off[..., 1]\n\n        if with_centripetal_shift:\n            tl_centripetal_shift = transpose_and_gather_feat(\n                tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()\n            br_centripetal_shift = transpose_and_gather_feat(\n                br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()\n\n            tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]\n            tl_ctys = tl_ys + tl_centripetal_shift[..., 1]\n            br_ctxs = br_xs - br_centripetal_shift[..., 0]\n            br_ctys = br_ys - br_centripetal_shift[..., 1]\n\n        # all possible boxes based on top k corners (ignoring class)\n        tl_xs *= (inp_w / width)\n        tl_ys *= (inp_h / height)\n        br_xs *= (inp_w / width)\n        br_ys *= (inp_h / height)\n\n        if with_centripetal_shift:\n            tl_ctxs *= (inp_w / width)\n            tl_ctys *= (inp_h / height)\n            br_ctxs *= (inp_w / width)\n            br_ctys *= (inp_h / height)\n\n        x_off, y_off = 0, 0  # no crop\n        if not torch.onnx.is_in_onnx_export():\n            # since `RandomCenterCropPad` is done on CPU with numpy and it's\n            # not dynamic traceable when exporting to ONNX, thus 'border'\n            # does not appears as key in 'img_meta'. As a tmp solution,\n            # we move this 'border' handle part to the postprocess after\n            # finished exporting to ONNX, which is handle in\n            # `mmdet/core/export/model_wrappers.py`. Though difference between\n            # pytorch and exported onnx model, it might be ignored since\n            # comparable performance is achieved between them (e.g. 40.4 vs\n            # 40.6 on COCO val2017, for CornerNet without test-time flip)\n            if 'border' in img_meta:\n                x_off = img_meta['border'][2]\n                y_off = img_meta['border'][0]\n\n        tl_xs -= x_off\n        tl_ys -= y_off\n        br_xs -= x_off\n        br_ys -= y_off\n\n        zeros = tl_xs.new_zeros(*tl_xs.size())\n        tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros)\n        tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros)\n        br_xs = torch.where(br_xs > 0.0, br_xs, zeros)\n        br_ys = torch.where(br_ys > 0.0, br_ys, zeros)\n\n        bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)\n        area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()\n\n        if with_centripetal_shift:\n            tl_ctxs -= x_off\n            tl_ctys -= y_off\n            br_ctxs -= x_off\n            br_ctys -= y_off\n\n            tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)\n            tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)\n            br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)\n            br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)\n\n            ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),\n                                    dim=3)\n            area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()\n\n            rcentral = torch.zeros_like(ct_bboxes)\n            # magic nums from paper section 4.1\n            mu = torch.ones_like(area_bboxes) / 2.4\n            mu[area_bboxes > 3500] = 1 / 2.1  # large bbox have smaller mu\n\n            bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2\n            bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2\n            rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -\n                                                       bboxes[..., 0]) / 2\n            rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -\n                                                       bboxes[..., 1]) / 2\n            rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -\n                                                       bboxes[..., 0]) / 2\n            rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -\n                                                       bboxes[..., 1]) / 2\n            area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *\n                             (rcentral[..., 3] - rcentral[..., 1])).abs()\n            dists = area_ct_bboxes / area_rcentral\n\n            tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (\n                ct_bboxes[..., 0] >= rcentral[..., 2])\n            tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (\n                ct_bboxes[..., 1] >= rcentral[..., 3])\n            br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (\n                ct_bboxes[..., 2] >= rcentral[..., 2])\n            br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (\n                ct_bboxes[..., 3] >= rcentral[..., 3])\n\n        if with_embedding:\n            tl_emb = transpose_and_gather_feat(tl_emb, tl_inds)\n            tl_emb = tl_emb.view(batch, k, 1)\n            br_emb = transpose_and_gather_feat(br_emb, br_inds)\n            br_emb = br_emb.view(batch, 1, k)\n            dists = torch.abs(tl_emb - br_emb)\n\n        tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)\n        br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)\n\n        scores = (tl_scores + br_scores) / 2  # scores for all possible boxes\n\n        # tl and br should have same class\n        tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)\n        br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)\n        cls_inds = (tl_clses != br_clses)\n\n        # reject boxes based on distances\n        dist_inds = dists > distance_threshold\n\n        # reject boxes based on widths and heights\n        width_inds = (br_xs <= tl_xs)\n        height_inds = (br_ys <= tl_ys)\n\n        # No use `scores[cls_inds]`, instead we use `torch.where` here.\n        # Since only 1-D indices with type 'tensor(bool)' are supported\n        # when exporting to ONNX, any other bool indices with more dimensions\n        # (e.g. 2-D bool tensor) as input parameter in node is invalid\n        negative_scores = -1 * torch.ones_like(scores)\n        scores = torch.where(cls_inds, negative_scores, scores)\n        scores = torch.where(width_inds, negative_scores, scores)\n        scores = torch.where(height_inds, negative_scores, scores)\n        scores = torch.where(dist_inds, negative_scores, scores)\n\n        if with_centripetal_shift:\n            scores[tl_ctx_inds] = -1\n            scores[tl_cty_inds] = -1\n            scores[br_ctx_inds] = -1\n            scores[br_cty_inds] = -1\n\n        scores = scores.view(batch, -1)\n        scores, inds = torch.topk(scores, num_dets)\n        scores = scores.unsqueeze(2)\n\n        bboxes = bboxes.view(batch, -1, 4)\n        bboxes = gather_feat(bboxes, inds)\n\n        clses = tl_clses.contiguous().view(batch, -1, 1)\n        clses = gather_feat(clses, inds)\n\n        return bboxes, scores, clses\n"
  },
  {
    "path": "mmdet/models/dense_heads/dab_detr_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import Linear\nfrom mmengine.model import bias_init_with_prob, constant_init\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList\nfrom ..layers import MLP, inverse_sigmoid\nfrom .conditional_detr_head import ConditionalDETRHead\n\n\n@MODELS.register_module()\nclass DABDETRHead(ConditionalDETRHead):\n    \"\"\"Head of DAB-DETR. DAB-DETR: Dynamic Anchor Boxes are Better Queries for\n    DETR.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2201.12329>`_ .\n    \"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the transformer head.\"\"\"\n        # cls branch\n        self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n        # reg branch\n        self.fc_reg = MLP(self.embed_dims, self.embed_dims, 4, 3)\n\n    def init_weights(self) -> None:\n        \"\"\"initialize weights.\"\"\"\n        if self.loss_cls.use_sigmoid:\n            bias_init = bias_init_with_prob(0.01)\n            nn.init.constant_(self.fc_cls.bias, bias_init)\n        constant_init(self.fc_reg.layers[-1], 0., bias=0.)\n\n    def forward(self, hidden_states: Tensor,\n                references: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"\"Forward function.\n\n        Args:\n            hidden_states (Tensor): Features from transformer decoder. If\n                `return_intermediate_dec` is True output has shape\n                (num_decoder_layers, bs, num_queries, dim), else has shape (1,\n                bs, num_queries, dim) which only contains the last layer\n                outputs.\n            references (Tensor): References from transformer decoder. If\n                `return_intermediate_dec` is True output has shape\n                (num_decoder_layers, bs, num_queries, 2/4), else has shape (1,\n                bs, num_queries, 2/4)\n                which only contains the last layer reference.\n        Returns:\n            tuple[Tensor]: results of head containing the following tensor.\n\n            - layers_cls_scores (Tensor): Outputs from the classification head,\n              shape (num_decoder_layers, bs, num_queries, cls_out_channels).\n              Note cls_out_channels should include background.\n            - layers_bbox_preds (Tensor): Sigmoid outputs from the regression\n              head with normalized coordinate format (cx, cy, w, h), has shape\n              (num_decoder_layers, bs, num_queries, 4).\n        \"\"\"\n        layers_cls_scores = self.fc_cls(hidden_states)\n        references_before_sigmoid = inverse_sigmoid(references, eps=1e-3)\n        tmp_reg_preds = self.fc_reg(hidden_states)\n        tmp_reg_preds[..., :references_before_sigmoid.\n                      size(-1)] += references_before_sigmoid\n        layers_bbox_preds = tmp_reg_preds.sigmoid()\n        return layers_cls_scores, layers_bbox_preds\n\n    def predict(self,\n                hidden_states: Tensor,\n                references: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network. Over-write\n        because img_metas are needed as inputs for bbox_head.\n\n        Args:\n            hidden_states (Tensor): Feature from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, dim).\n            references (Tensor): references from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, 2/4).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        last_layer_hidden_state = hidden_states[-1].unsqueeze(0)\n        last_layer_reference = references[-1].unsqueeze(0)\n        outs = self(last_layer_hidden_state, last_layer_reference)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n        return predictions\n"
  },
  {
    "path": "mmdet/models/dense_heads/ddod_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmengine.model import bias_init_with_prob, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..task_modules.prior_generators import anchor_inside_flags\nfrom ..utils import images_to_levels, multi_apply, unmap\nfrom .anchor_head import AnchorHead\n\nEPS = 1e-12\n\n\n@MODELS.register_module()\nclass DDODHead(AnchorHead):\n    \"\"\"Detection Head of `DDOD <https://arxiv.org/abs/2107.02963>`_.\n\n    DDOD head decomposes conjunctions lying in most current one-stage\n    detectors via label assignment disentanglement, spatial feature\n    disentanglement, and pyramid supervision disentanglement.\n\n    Args:\n        num_classes (int): Number of categories excluding the\n            background category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): The number of stacked Conv. Defaults to 4.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        use_dcn (bool): Use dcn, Same as ATSS when False. Defaults to True.\n        norm_cfg (:obj:`ConfigDict` or dict): Normal config of ddod head.\n            Defaults to dict(type='GN', num_groups=32, requires_grad=True).\n        loss_iou (:obj:`ConfigDict` or dict): Config of IoU loss. Defaults to\n            dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0).\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 use_dcn: bool = True,\n                 norm_cfg: ConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 loss_iou: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 **kwargs) -> None:\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.use_dcn = use_dcn\n        super().__init__(num_classes, in_channels, **kwargs)\n\n        if self.train_cfg:\n            self.cls_assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            self.reg_assigner = TASK_UTILS.build(\n                self.train_cfg['reg_assigner'])\n        self.loss_iou = MODELS.build(loss_iou)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=dict(type='DCN', deform_groups=1)\n                    if i == 0 and self.use_dcn else self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=dict(type='DCN', deform_groups=1)\n                    if i == 0 and self.use_dcn else self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.atss_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.atss_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n        self.atss_iou = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n        # we use the global list in loss\n        self.cls_num_pos_samples_per_level = [\n            0. for _ in range(len(self.prior_generator.strides))\n        ]\n        self.reg_num_pos_samples_per_level = [\n            0. for _ in range(len(self.prior_generator.strides))\n        ]\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        for m in self.cls_convs:\n            normal_init(m.conv, std=0.01)\n        for m in self.reg_convs:\n            normal_init(m.conv, std=0.01)\n        normal_init(self.atss_reg, std=0.01)\n        normal_init(self.atss_iou, std=0.01)\n        bias_cls = bias_init_with_prob(0.01)\n        normal_init(self.atss_cls, std=0.01, bias=bias_cls)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores, bbox predictions,\n            and iou predictions.\n\n            - cls_scores (list[Tensor]): Classification scores for all \\\n            scale levels, each is a 4D-tensor, the channels number is \\\n            num_base_priors * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for all \\\n            scale levels, each is a 4D-tensor, the channels number is \\\n            num_base_priors * 4.\n            - iou_preds (list[Tensor]): IoU scores for all scale levels, \\\n            each is a 4D-tensor, the channels number is num_base_priors * 1.\n        \"\"\"\n        return multi_apply(self.forward_single, x, self.scales)\n\n    def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n\n        Returns:\n            tuple:\n\n            - cls_score (Tensor): Cls scores for a single scale level \\\n            the channels number is num_base_priors * num_classes.\n            - bbox_pred (Tensor): Box energies / deltas for a single \\\n            scale level, the channels number is num_base_priors * 4.\n            - iou_pred (Tensor): Iou for a single scale level, the \\\n            channel number is (N, num_base_priors * 1, H, W).\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.atss_cls(cls_feat)\n        # we just follow atss, not apply exp in bbox_pred\n        bbox_pred = scale(self.atss_reg(reg_feat)).float()\n        iou_pred = self.atss_iou(reg_feat)\n        return cls_score, bbox_pred, iou_pred\n\n    def loss_cls_by_feat_single(self, cls_score: Tensor, labels: Tensor,\n                                label_weights: Tensor,\n                                reweight_factor: List[float],\n                                avg_factor: float) -> Tuple[Tensor]:\n        \"\"\"Compute cls loss of a single scale level.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_base_priors * num_classes, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            reweight_factor (List[float]): Reweight factor for cls and reg\n                loss.\n            avg_factor (float): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n\n        Returns:\n            Tuple[Tensor]: A tuple of loss components.\n        \"\"\"\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=avg_factor)\n        return reweight_factor * loss_cls,\n\n    def loss_reg_by_feat_single(self, anchors: Tensor, bbox_pred: Tensor,\n                                iou_pred: Tensor, labels,\n                                label_weights: Tensor, bbox_targets: Tensor,\n                                bbox_weights: Tensor,\n                                reweight_factor: List[float],\n                                avg_factor: float) -> Tuple[Tensor, Tensor]:\n        \"\"\"Compute reg loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_base_priors * 4, H, W).\n            iou_pred (Tensor): Iou for a single scale level, the\n                channel number is (N, num_base_priors * 1, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            bbox_weights (Tensor): BBox weights of all anchors in the\n                image with shape (N, 4)\n            reweight_factor (List[float]): Reweight factor for cls and reg\n                loss.\n            avg_factor (float): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n        Returns:\n            Tuple[Tensor, Tensor]: A tuple of loss components.\n        \"\"\"\n        anchors = anchors.reshape(-1, 4)\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, )\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        bbox_weights = bbox_weights.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        iou_targets = label_weights.new_zeros(labels.shape)\n        iou_weights = label_weights.new_zeros(labels.shape)\n        iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero(\n            as_tuple=False)] = 1.\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    &\n                    (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchors, pos_bbox_pred)\n            pos_decode_bbox_targets = self.bbox_coder.decode(\n                pos_anchors, pos_bbox_targets)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                avg_factor=avg_factor)\n\n            iou_targets[pos_inds] = bbox_overlaps(\n                pos_decode_bbox_pred.detach(),\n                pos_decode_bbox_targets,\n                is_aligned=True)\n            loss_iou = self.loss_iou(\n                iou_pred, iou_targets, iou_weights, avg_factor=avg_factor)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            loss_iou = iou_pred.sum() * 0\n\n        return reweight_factor * loss_bbox, reweight_factor * loss_iou\n\n    def calc_reweight_factor(self, labels_list: List[Tensor]) -> List[float]:\n        \"\"\"Compute reweight_factor for regression and classification loss.\"\"\"\n        # get pos samples for each level\n        bg_class_ind = self.num_classes\n        for ii, each_level_label in enumerate(labels_list):\n            pos_inds = ((each_level_label >= 0) &\n                        (each_level_label < bg_class_ind)).nonzero(\n                            as_tuple=False).squeeze(1)\n            self.cls_num_pos_samples_per_level[ii] += len(pos_inds)\n        # get reweight factor from 1 ~ 2 with bilinear interpolation\n        min_pos_samples = min(self.cls_num_pos_samples_per_level)\n        max_pos_samples = max(self.cls_num_pos_samples_per_level)\n        interval = 1. / (max_pos_samples - min_pos_samples + 1e-10)\n        reweight_factor_per_level = []\n        for pos_samples in self.cls_num_pos_samples_per_level:\n            factor = 2. - (pos_samples - min_pos_samples) * interval\n            reweight_factor_per_level.append(factor)\n        return reweight_factor_per_level\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            iou_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_base_priors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_base_priors * 4, H, W)\n            iou_preds (list[Tensor]): Score factor for all scale level,\n                each is a 4D-tensor, has shape (batch_size, 1, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        # calculate common vars for cls and reg assigners at once\n        targets_com = self.process_predictions_and_anchors(\n            anchor_list, valid_flag_list, cls_scores, bbox_preds,\n            batch_img_metas, batch_gt_instances_ignore)\n        (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list,\n         bbox_pred_list, batch_gt_instances_ignore) = targets_com\n\n        # classification branch assigner\n        cls_targets = self.get_cls_targets(\n            anchor_list,\n            valid_flag_list,\n            num_level_anchors_list,\n            cls_score_list,\n            bbox_pred_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_targets\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n        avg_factor = max(avg_factor, 1.0)\n\n        reweight_factor_per_level = self.calc_reweight_factor(labels_list)\n\n        cls_losses_cls, = multi_apply(\n            self.loss_cls_by_feat_single,\n            cls_scores,\n            labels_list,\n            label_weights_list,\n            reweight_factor_per_level,\n            avg_factor=avg_factor)\n\n        # regression branch assigner\n        reg_targets = self.get_reg_targets(\n            anchor_list,\n            valid_flag_list,\n            num_level_anchors_list,\n            cls_score_list,\n            bbox_pred_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = reg_targets\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n        avg_factor = max(avg_factor, 1.0)\n\n        reweight_factor_per_level = self.calc_reweight_factor(labels_list)\n\n        reg_losses_bbox, reg_losses_iou = multi_apply(\n            self.loss_reg_by_feat_single,\n            reg_anchor_list,\n            bbox_preds,\n            iou_preds,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            reweight_factor_per_level,\n            avg_factor=avg_factor)\n\n        return dict(\n            loss_cls=cls_losses_cls,\n            loss_bbox=reg_losses_bbox,\n            loss_iou=reg_losses_iou)\n\n    def process_predictions_and_anchors(\n            self,\n            anchor_list: List[List[Tensor]],\n            valid_flag_list: List[List[Tensor]],\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> tuple:\n        \"\"\"Compute common vars for regression and classification targets.\n\n        Args:\n            anchor_list (List[List[Tensor]]): anchors of each image.\n            valid_flag_list (List[List[Tensor]]): Valid flags of each image.\n            cls_scores (List[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Return:\n            tuple[Tensor]: A tuple of common loss vars.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        anchor_list_ = []\n        valid_flag_list_ = []\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list_.append(torch.cat(anchor_list[i]))\n            valid_flag_list_.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None for _ in range(num_imgs)]\n\n        num_levels = len(cls_scores)\n        cls_score_list = []\n        bbox_pred_list = []\n\n        mlvl_cls_score_list = [\n            cls_score.permute(0, 2, 3, 1).reshape(\n                num_imgs, -1, self.num_base_priors * self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        mlvl_bbox_pred_list = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.num_base_priors * 4)\n            for bbox_pred in bbox_preds\n        ]\n\n        for i in range(num_imgs):\n            mlvl_cls_tensor_list = [\n                mlvl_cls_score_list[j][i] for j in range(num_levels)\n            ]\n            mlvl_bbox_tensor_list = [\n                mlvl_bbox_pred_list[j][i] for j in range(num_levels)\n            ]\n            cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0)\n            cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0)\n            cls_score_list.append(cat_mlvl_cls_score)\n            bbox_pred_list.append(cat_mlvl_bbox_pred)\n        return (anchor_list_, valid_flag_list_, num_level_anchors_list,\n                cls_score_list, bbox_pred_list, batch_gt_instances_ignore)\n\n    def get_cls_targets(self,\n                        anchor_list: List[Tensor],\n                        valid_flag_list: List[Tensor],\n                        num_level_anchors_list: List[int],\n                        cls_score_list: List[Tensor],\n                        bbox_pred_list: List[Tensor],\n                        batch_gt_instances: InstanceList,\n                        batch_img_metas: List[dict],\n                        batch_gt_instances_ignore: OptInstanceList = None,\n                        unmap_outputs: bool = True) -> tuple:\n        \"\"\"Get cls targets for DDOD head.\n\n        This method is almost the same as `AnchorHead.get_targets()`.\n        Besides returning the targets as the parent  method does,\n        it also returns the anchors as the first element of the\n        returned tuple.\n\n        Args:\n            anchor_list (list[Tensor]): anchors of each image.\n            valid_flag_list (list[Tensor]): Valid flags of each image.\n            num_level_anchors_list (list[Tensor]): Number of anchors of each\n                scale level of all image.\n            cls_score_list (list[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes.\n            bbox_pred_list (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Return:\n            tuple[Tensor]: A tuple of cls targets components.\n        \"\"\"\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list,\n         sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             anchor_list,\n             valid_flag_list,\n             cls_score_list,\n             bbox_pred_list,\n             num_level_anchors_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             unmap_outputs=unmap_outputs,\n             is_cls_assigner=True)\n        # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n        # When using sampling method, avg_factor is usually the sum of\n        # positive and negative priors. When using `PseudoSampler`,\n        # `avg_factor` is usually equal to the number of positive priors.\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0])\n        labels_list = images_to_levels(all_labels, num_level_anchors_list[0])\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors_list[0])\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors_list[0])\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors_list[0])\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, avg_factor)\n\n    def get_reg_targets(self,\n                        anchor_list: List[Tensor],\n                        valid_flag_list: List[Tensor],\n                        num_level_anchors_list: List[int],\n                        cls_score_list: List[Tensor],\n                        bbox_pred_list: List[Tensor],\n                        batch_gt_instances: InstanceList,\n                        batch_img_metas: List[dict],\n                        batch_gt_instances_ignore: OptInstanceList = None,\n                        unmap_outputs: bool = True) -> tuple:\n        \"\"\"Get reg targets for DDOD head.\n\n        This method is almost the same as `AnchorHead.get_targets()` when\n        is_cls_assigner is False. Besides returning the targets as the parent\n        method does, it also returns the anchors as the first element of the\n        returned tuple.\n\n        Args:\n            anchor_list (list[Tensor]): anchors of each image.\n            valid_flag_list (list[Tensor]): Valid flags of each image.\n            num_level_anchors_list (list[Tensor]): Number of anchors of each\n                scale level of all image.\n            cls_score_list (list[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes.\n            bbox_pred_list (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Return:\n            tuple[Tensor]: A tuple of reg targets components.\n        \"\"\"\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list,\n         sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             anchor_list,\n             valid_flag_list,\n             cls_score_list,\n             bbox_pred_list,\n             num_level_anchors_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             unmap_outputs=unmap_outputs,\n             is_cls_assigner=False)\n        # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n        # When using sampling method, avg_factor is usually the sum of\n        # positive and negative priors. When using `PseudoSampler`,\n        # `avg_factor` is usually equal to the number of positive priors.\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0])\n        labels_list = images_to_levels(all_labels, num_level_anchors_list[0])\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors_list[0])\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors_list[0])\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors_list[0])\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, avg_factor)\n\n    def _get_targets_single(self,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            cls_scores: Tensor,\n                            bbox_preds: Tensor,\n                            num_level_anchors: List[int],\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True,\n                            is_cls_assigner: bool = True) -> tuple:\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image,\n                which are concatenated into a single tensor of shape\n                (num_base_priors, 4).\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                shape (num_base_priors,).\n            cls_scores (Tensor): Classification scores for all scale\n                levels of the image.\n            bbox_preds (Tensor): Box energies / deltas for all scale\n                levels of the image.\n            num_level_anchors (List[int]): Number of anchors of each\n                scale level.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n            is_cls_assigner (bool): Classification or regression.\n                Defaults to True.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n            - anchors (Tensor): all anchors in the image with shape (N, 4).\n            - labels (Tensor): Labels of all anchors in the image with \\\n            shape (N, ).\n            - label_weights (Tensor): Label weights of all anchor in the \\\n            image with shape (N, ).\n            - bbox_targets (Tensor): BBox targets of all anchors in the \\\n            image with shape (N, 4).\n            - bbox_weights (Tensor): BBox weights of all anchors in the \\\n            image with shape (N, 4)\n            - pos_inds (Tensor): Indices of positive anchor with shape \\\n            (num_pos, ).\n            - neg_inds (Tensor): Indices of negative anchor with shape \\\n            (num_neg, ).\n            - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        num_level_anchors_inside = self.get_num_level_anchors_inside(\n            num_level_anchors, inside_flags)\n        bbox_preds_valid = bbox_preds[inside_flags, :]\n        cls_scores_valid = cls_scores[inside_flags, :]\n\n        assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner\n\n        # decode prediction out of assigner\n        bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid)\n        pred_instances = InstanceData(\n            priors=anchors, bboxes=bbox_preds_valid, scores=cls_scores_valid)\n\n        assign_result = assigner.assign(\n            pred_instances=pred_instances,\n            num_level_priors=num_level_anchors_inside,\n            gt_instances=gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n        sampling_result = self.sampler.sample(\n            assign_result=assign_result,\n            pred_instances=pred_instances,\n            gt_instances=gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            pos_bbox_targets = self.bbox_coder.encode(\n                sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n                pos_inds, neg_inds, sampling_result)\n\n    def get_num_level_anchors_inside(self, num_level_anchors: List[int],\n                                     inside_flags: Tensor) -> List[int]:\n        \"\"\"Get the anchors of each scale level inside.\n\n        Args:\n            num_level_anchors (list[int]): Number of anchors of each\n                scale level.\n            inside_flags (Tensor): Multi level inside flags of the image,\n                which are concatenated into a single tensor of\n                shape (num_base_priors,).\n\n        Returns:\n            list[int]: Number of anchors of each scale level inside.\n        \"\"\"\n        split_inside_flags = torch.split(inside_flags, num_level_anchors)\n        num_level_anchors_inside = [\n            int(flags.sum()) for flags in split_inside_flags\n        ]\n        return num_level_anchors_inside\n"
  },
  {
    "path": "mmdet/models/dense_heads/deformable_detr_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import Dict, List, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Linear\nfrom mmengine.model import bias_init_with_prob, constant_init\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList, OptInstanceList\nfrom ..layers import inverse_sigmoid\nfrom .detr_head import DETRHead\n\n\n@MODELS.register_module()\nclass DeformableDETRHead(DETRHead):\n    r\"\"\"Head of DeformDETR: Deformable DETR: Deformable Transformers for\n    End-to-End Object Detection.\n\n    Code is modified from the `official github repo\n    <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2010.04159>`_ .\n\n    Args:\n        share_pred_layer (bool): Whether to share parameters for all the\n            prediction layers. Defaults to `False`.\n        num_pred_layer (int): The number of the prediction layers.\n            Defaults to 6.\n        as_two_stage (bool, optional): Whether to generate the proposal\n            from the outputs of encoder. Defaults to `False`.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 share_pred_layer: bool = False,\n                 num_pred_layer: int = 6,\n                 as_two_stage: bool = False,\n                 **kwargs) -> None:\n        self.share_pred_layer = share_pred_layer\n        self.num_pred_layer = num_pred_layer\n        self.as_two_stage = as_two_stage\n\n        super().__init__(*args, **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize classification branch and regression branch of head.\"\"\"\n        fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n        reg_branch = []\n        for _ in range(self.num_reg_fcs):\n            reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n            reg_branch.append(nn.ReLU())\n        reg_branch.append(Linear(self.embed_dims, 4))\n        reg_branch = nn.Sequential(*reg_branch)\n\n        if self.share_pred_layer:\n            self.cls_branches = nn.ModuleList(\n                [fc_cls for _ in range(self.num_pred_layer)])\n            self.reg_branches = nn.ModuleList(\n                [reg_branch for _ in range(self.num_pred_layer)])\n        else:\n            self.cls_branches = nn.ModuleList(\n                [copy.deepcopy(fc_cls) for _ in range(self.num_pred_layer)])\n            self.reg_branches = nn.ModuleList([\n                copy.deepcopy(reg_branch) for _ in range(self.num_pred_layer)\n            ])\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the Deformable DETR head.\"\"\"\n        if self.loss_cls.use_sigmoid:\n            bias_init = bias_init_with_prob(0.01)\n            for m in self.cls_branches:\n                nn.init.constant_(m.bias, bias_init)\n        for m in self.reg_branches:\n            constant_init(m[-1], 0, bias=0)\n        nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0)\n        if self.as_two_stage:\n            for m in self.reg_branches:\n                nn.init.constant_(m[-1].bias.data[2:], 0.0)\n\n    def forward(self, hidden_states: Tensor,\n                references: List[Tensor]) -> Tuple[Tensor]:\n        \"\"\"Forward function.\n\n        Args:\n            hidden_states (Tensor): Hidden states output from each decoder\n                layer, has shape (num_decoder_layers, bs, num_queries, dim).\n            references (list[Tensor]): List of the reference from the decoder.\n                The first reference is the `init_reference` (initial) and the\n                other num_decoder_layers(6) references are `inter_references`\n                (intermediate). The `init_reference` has shape (bs,\n                num_queries, 4) when `as_two_stage` of the detector is `True`,\n                otherwise (bs, num_queries, 2). Each `inter_reference` has\n                shape (bs, num_queries, 4) when `with_box_refine` of the\n                detector is `True`, otherwise (bs, num_queries, 2). The\n                coordinates are arranged as (cx, cy) when the last dimension is\n                2, and (cx, cy, w, h) when it is 4.\n\n        Returns:\n            tuple[Tensor]: results of head containing the following tensor.\n\n            - all_layers_outputs_classes (Tensor): Outputs from the\n              classification head, has shape (num_decoder_layers, bs,\n              num_queries, cls_out_channels).\n            - all_layers_outputs_coords (Tensor): Sigmoid outputs from the\n              regression head with normalized coordinate format (cx, cy, w,\n              h), has shape (num_decoder_layers, bs, num_queries, 4) with the\n              last dimension arranged as (cx, cy, w, h).\n        \"\"\"\n        all_layers_outputs_classes = []\n        all_layers_outputs_coords = []\n\n        for layer_id in range(hidden_states.shape[0]):\n            reference = inverse_sigmoid(references[layer_id])\n            # NOTE The last reference will not be used.\n            hidden_state = hidden_states[layer_id]\n            outputs_class = self.cls_branches[layer_id](hidden_state)\n            tmp_reg_preds = self.reg_branches[layer_id](hidden_state)\n            if reference.shape[-1] == 4:\n                # When `layer` is 0 and `as_two_stage` of the detector\n                # is `True`, or when `layer` is greater than 0 and\n                # `with_box_refine` of the detector is `True`.\n                tmp_reg_preds += reference\n            else:\n                # When `layer` is 0 and `as_two_stage` of the detector\n                # is `False`, or when `layer` is greater than 0 and\n                # `with_box_refine` of the detector is `False`.\n                assert reference.shape[-1] == 2\n                tmp_reg_preds[..., :2] += reference\n            outputs_coord = tmp_reg_preds.sigmoid()\n            all_layers_outputs_classes.append(outputs_class)\n            all_layers_outputs_coords.append(outputs_coord)\n\n        all_layers_outputs_classes = torch.stack(all_layers_outputs_classes)\n        all_layers_outputs_coords = torch.stack(all_layers_outputs_coords)\n\n        return all_layers_outputs_classes, all_layers_outputs_coords\n\n    def loss(self, hidden_states: Tensor, references: List[Tensor],\n             enc_outputs_class: Tensor, enc_outputs_coord: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the queries of the upstream network.\n\n        Args:\n            hidden_states (Tensor): Hidden states output from each decoder\n                layer, has shape (num_decoder_layers, num_queries, bs, dim).\n            references (list[Tensor]): List of the reference from the decoder.\n                The first reference is the `init_reference` (initial) and the\n                other num_decoder_layers(6) references are `inter_references`\n                (intermediate). The `init_reference` has shape (bs,\n                num_queries, 4) when `as_two_stage` of the detector is `True`,\n                otherwise (bs, num_queries, 2). Each `inter_reference` has\n                shape (bs, num_queries, 4) when `with_box_refine` of the\n                detector is `True`, otherwise (bs, num_queries, 2). The\n                coordinates are arranged as (cx, cy) when the last dimension is\n                2, and (cx, cy, w, h) when it is 4.\n            enc_outputs_class (Tensor): The score of each point on encode\n                feature map, has shape (bs, num_feat_points, cls_out_channels).\n                Only when `as_two_stage` is `True` it would be passed in,\n                otherwise it would be `None`.\n            enc_outputs_coord (Tensor): The proposal generate from the encode\n                feature map, has shape (bs, num_feat_points, 4) with the last\n                dimension arranged as (cx, cy, w, h). Only when `as_two_stage`\n                is `True` it would be passed in, otherwise it would be `None`.\n            batch_data_samples (list[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        batch_gt_instances = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n\n        outs = self(hidden_states, references)\n        loss_inputs = outs + (enc_outputs_class, enc_outputs_coord,\n                              batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*loss_inputs)\n        return losses\n\n    def loss_by_feat(\n        self,\n        all_layers_cls_scores: Tensor,\n        all_layers_bbox_preds: Tensor,\n        enc_cls_scores: Tensor,\n        enc_bbox_preds: Tensor,\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Loss function.\n\n        Args:\n            all_layers_cls_scores (Tensor): Classification scores of all\n                decoder layers, has shape (num_decoder_layers, bs, num_queries,\n                cls_out_channels).\n            all_layers_bbox_preds (Tensor): Regression outputs of all decoder\n                layers. Each is a 4D-tensor with normalized coordinate format\n                (cx, cy, w, h) and has shape (num_decoder_layers, bs,\n                num_queries, 4) with the last dimension arranged as\n                (cx, cy, w, h).\n            enc_cls_scores (Tensor): The score of each point on encode\n                feature map, has shape (bs, num_feat_points, cls_out_channels).\n                Only when `as_two_stage` is `True` it would be passes in,\n                otherwise, it would be `None`.\n            enc_bbox_preds (Tensor): The proposal generate from the encode\n                feature map, has shape (bs, num_feat_points, 4) with the last\n                dimension arranged as (cx, cy, w, h). Only when `as_two_stage`\n                is `True` it would be passed in, otherwise it would be `None`.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        loss_dict = super().loss_by_feat(all_layers_cls_scores,\n                                         all_layers_bbox_preds,\n                                         batch_gt_instances, batch_img_metas,\n                                         batch_gt_instances_ignore)\n\n        # loss of proposal generated from encode feature map.\n        if enc_cls_scores is not None:\n            proposal_gt_instances = copy.deepcopy(batch_gt_instances)\n            for i in range(len(proposal_gt_instances)):\n                proposal_gt_instances[i].labels = torch.zeros_like(\n                    proposal_gt_instances[i].labels)\n            enc_loss_cls, enc_losses_bbox, enc_losses_iou = \\\n                self.loss_by_feat_single(\n                    enc_cls_scores, enc_bbox_preds,\n                    batch_gt_instances=proposal_gt_instances,\n                    batch_img_metas=batch_img_metas)\n            loss_dict['enc_loss_cls'] = enc_loss_cls\n            loss_dict['enc_loss_bbox'] = enc_losses_bbox\n            loss_dict['enc_loss_iou'] = enc_losses_iou\n        return loss_dict\n\n    def predict(self,\n                hidden_states: Tensor,\n                references: List[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> InstanceList:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the queries of the upstream network.\n\n        Args:\n            hidden_states (Tensor): Hidden states output from each decoder\n                layer, has shape (num_decoder_layers, num_queries, bs, dim).\n            references (list[Tensor]): List of the reference from the decoder.\n                The first reference is the `init_reference` (initial) and the\n                other num_decoder_layers(6) references are `inter_references`\n                (intermediate). The `init_reference` has shape (bs,\n                num_queries, 4) when `as_two_stage` of the detector is `True`,\n                otherwise (bs, num_queries, 2). Each `inter_reference` has\n                shape (bs, num_queries, 4) when `with_box_refine` of the\n                detector is `True`, otherwise (bs, num_queries, 2). The\n                coordinates are arranged as (cx, cy) when the last dimension is\n                2, and (cx, cy, w, h) when it is 4.\n            batch_data_samples (list[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): If `True`, return boxes in original\n                image space. Defaults to `True`.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        outs = self(hidden_states, references)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n        return predictions\n\n    def predict_by_feat(self,\n                        all_layers_cls_scores: Tensor,\n                        all_layers_bbox_preds: Tensor,\n                        batch_img_metas: List[Dict],\n                        rescale: bool = False) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Args:\n            all_layers_cls_scores (Tensor): Classification scores of all\n                decoder layers, has shape (num_decoder_layers, bs, num_queries,\n                cls_out_channels).\n            all_layers_bbox_preds (Tensor): Regression outputs of all decoder\n                layers. Each is a 4D-tensor with normalized coordinate format\n                (cx, cy, w, h) and shape (num_decoder_layers, bs, num_queries,\n                4) with the last dimension arranged as (cx, cy, w, h).\n            batch_img_metas (list[dict]): Meta information of each image.\n            rescale (bool, optional): If `True`, return boxes in original\n                image space. Default `False`.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        cls_scores = all_layers_cls_scores[-1]\n        bbox_preds = all_layers_bbox_preds[-1]\n\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_score = cls_scores[img_id]\n            bbox_pred = bbox_preds[img_id]\n            img_meta = batch_img_metas[img_id]\n            results = self._predict_by_feat_single(cls_score, bbox_pred,\n                                                   img_meta, rescale)\n            result_list.append(results)\n        return result_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/dense_test_mixins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport sys\nimport warnings\nfrom inspect import signature\n\nimport torch\nfrom mmcv.ops import batched_nms\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.structures.bbox import bbox_mapping_back\nfrom ..test_time_augs import merge_aug_proposals\n\nif sys.version_info >= (3, 7):\n    from mmdet.utils.contextmanagers import completed\n\n\nclass BBoxTestMixin(object):\n    \"\"\"Mixin class for testing det bboxes via DenseHead.\"\"\"\n\n    def simple_test_bboxes(self, feats, img_metas, rescale=False):\n        \"\"\"Test det bboxes without test-time augmentation, can be applied in\n        DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n        etc.\n\n        Args:\n            feats (tuple[torch.Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each\n                image after the post process. \\\n                Each item usually contains following keys. \\\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance,)\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances,).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        warnings.warn('You are calling `simple_test_bboxes` in '\n                      '`dense_test_mixins`, but the `dense_test_mixins`'\n                      'will be deprecated soon. Please use '\n                      '`simple_test` instead.')\n        outs = self.forward(feats)\n        results_list = self.get_results(\n            *outs, img_metas=img_metas, rescale=rescale)\n        return results_list\n\n    def aug_test_bboxes(self, feats, img_metas, rescale=False):\n        \"\"\"Test det bboxes with test time augmentation, can be applied in\n        DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n        etc.\n\n        Args:\n            feats (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains features for all images in the batch.\n            img_metas (list[list[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is ``bboxes`` with shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n                The shape of the second tensor in the tuple is ``labels``\n                with shape (n,). The length of list should always be 1.\n        \"\"\"\n\n        warnings.warn('You are calling `aug_test_bboxes` in '\n                      '`dense_test_mixins`, but the `dense_test_mixins`'\n                      'will be deprecated soon. Please use '\n                      '`aug_test` instead.')\n        # check with_nms argument\n        gb_sig = signature(self.get_results)\n        gb_args = [p.name for p in gb_sig.parameters.values()]\n        gbs_sig = signature(self._get_results_single)\n        gbs_args = [p.name for p in gbs_sig.parameters.values()]\n        assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \\\n            f'{self.__class__.__name__}' \\\n            ' does not support test-time augmentation'\n\n        aug_bboxes = []\n        aug_scores = []\n        aug_labels = []\n        for x, img_meta in zip(feats, img_metas):\n            # only one image in the batch\n            outs = self.forward(x)\n            bbox_outputs = self.get_results(\n                *outs,\n                img_metas=img_meta,\n                cfg=self.test_cfg,\n                rescale=False,\n                with_nms=False)[0]\n            aug_bboxes.append(bbox_outputs.bboxes)\n            aug_scores.append(bbox_outputs.scores)\n            if len(bbox_outputs) >= 3:\n                aug_labels.append(bbox_outputs.labels)\n\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = self.merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas)\n        merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None\n\n        if merged_bboxes.numel() == 0:\n            det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)\n            return [\n                (det_bboxes, merged_labels),\n            ]\n\n        det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,\n                                            merged_labels, self.test_cfg.nms)\n        det_bboxes = det_bboxes[:self.test_cfg.max_per_img]\n        det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]\n\n        if rescale:\n            _det_bboxes = det_bboxes\n        else:\n            _det_bboxes = det_bboxes.clone()\n            _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n                img_metas[0][0]['scale_factor'])\n\n        results = InstanceData()\n        results.bboxes = _det_bboxes[:, :4]\n        results.scores = _det_bboxes[:, 4]\n        results.labels = det_labels\n        return [results]\n\n    def aug_test_rpn(self, feats, img_metas):\n        \"\"\"Test with augmentation for only for ``RPNHead`` and its variants,\n        e.g., ``GARPNHead``, etc.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                        a 4D-tensor.\n            img_metas (list[dict]): Meta info of each image.\n\n        Returns:\n            list[Tensor]: Proposals of each image, each item has shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n        \"\"\"\n        samples_per_gpu = len(img_metas[0])\n        aug_proposals = [[] for _ in range(samples_per_gpu)]\n        for x, img_meta in zip(feats, img_metas):\n            results_list = self.simple_test_rpn(x, img_meta)\n            for i, results in enumerate(results_list):\n                proposals = torch.cat(\n                    [results.bboxes, results.scores[:, None]], dim=-1)\n                aug_proposals[i].append(proposals)\n        # reorganize the order of 'img_metas' to match the dimensions\n        # of 'aug_proposals'\n        aug_img_metas = []\n        for i in range(samples_per_gpu):\n            aug_img_meta = []\n            for j in range(len(img_metas)):\n                aug_img_meta.append(img_metas[j][i])\n            aug_img_metas.append(aug_img_meta)\n        # after merging, proposals will be rescaled to the original image size\n\n        merged_proposals = []\n        for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas):\n            merged_proposal = merge_aug_proposals(proposals, aug_img_meta,\n                                                  self.test_cfg)\n            results = InstanceData()\n            results.bboxes = merged_proposal[:, :4]\n            results.scores = merged_proposal[:, 4]\n            merged_proposals.append(results)\n        return merged_proposals\n\n    if sys.version_info >= (3, 7):\n\n        async def async_simple_test_rpn(self, x, img_metas):\n            sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)\n            async with completed(\n                    __name__, 'rpn_head_forward',\n                    sleep_interval=sleep_interval):\n                rpn_outs = self(x)\n\n            proposal_list = self.get_results(*rpn_outs, img_metas=img_metas)\n            return proposal_list\n\n    def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):\n        \"\"\"Merge augmented detection bboxes and scores.\n\n        Args:\n            aug_bboxes (list[Tensor]): shape (n, 4*#class)\n            aug_scores (list[Tensor] or None): shape (n, #class)\n            img_shapes (list[Tensor]): shape (3, ).\n\n        Returns:\n            tuple[Tensor]: ``bboxes`` with shape (n,4), where\n            4 represent (tl_x, tl_y, br_x, br_y)\n            and ``scores`` with shape (n,).\n        \"\"\"\n        recovered_bboxes = []\n        for bboxes, img_info in zip(aug_bboxes, img_metas):\n            img_shape = img_info[0]['img_shape']\n            scale_factor = img_info[0]['scale_factor']\n            flip = img_info[0]['flip']\n            flip_direction = img_info[0]['flip_direction']\n            bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,\n                                       flip_direction)\n            recovered_bboxes.append(bboxes)\n        bboxes = torch.cat(recovered_bboxes, dim=0)\n        if aug_scores is None:\n            return bboxes\n        else:\n            scores = torch.cat(aug_scores, dim=0)\n            return bboxes, scores\n"
  },
  {
    "path": "mmdet/models/dense_heads/detr_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Linear\nfrom mmcv.cnn.bricks.transformer import FFN\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh\nfrom mmdet.utils import (ConfigType, InstanceList, OptInstanceList,\n                         OptMultiConfig, reduce_mean)\nfrom ..utils import multi_apply\n\n\n@MODELS.register_module()\nclass DETRHead(BaseModule):\n    r\"\"\"Head of DETR. DETR:End-to-End Object Detection with Transformers.\n\n    More details can be found in the `paper\n    <https://arxiv.org/pdf/2005.12872>`_ .\n\n    Args:\n        num_classes (int): Number of categories excluding the background.\n        embed_dims (int): The dims of Transformer embedding.\n        num_reg_fcs (int): Number of fully-connected layers used in `FFN`,\n            which is then used for the regression head. Defaults to 2.\n        sync_cls_avg_factor (bool): Whether to sync the `avg_factor` of\n            all ranks. Default to `False`.\n        loss_cls (:obj:`ConfigDict` or dict): Config of the classification\n            loss. Defaults to `CrossEntropyLoss`.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of the regression bbox\n            loss. Defaults to `L1Loss`.\n        loss_iou (:obj:`ConfigDict` or dict): Config of the regression iou\n            loss. Defaults to `GIoULoss`.\n        train_cfg (:obj:`ConfigDict` or dict): Training config of transformer\n            head.\n        test_cfg (:obj:`ConfigDict` or dict): Testing config of transformer\n            head.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    _version = 2\n\n    def __init__(\n            self,\n            num_classes: int,\n            embed_dims: int = 256,\n            num_reg_fcs: int = 2,\n            sync_cls_avg_factor: bool = False,\n            loss_cls: ConfigType = dict(\n                type='CrossEntropyLoss',\n                bg_cls_weight=0.1,\n                use_sigmoid=False,\n                loss_weight=1.0,\n                class_weight=1.0),\n            loss_bbox: ConfigType = dict(type='L1Loss', loss_weight=5.0),\n            loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0),\n            train_cfg: ConfigType = dict(\n                assigner=dict(\n                    type='HungarianAssigner',\n                    match_costs=[\n                        dict(type='ClassificationCost', weight=1.),\n                        dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),\n                        dict(type='IoUCost', iou_mode='giou', weight=2.0)\n                    ])),\n            test_cfg: ConfigType = dict(max_per_img=100),\n            init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.bg_cls_weight = 0\n        self.sync_cls_avg_factor = sync_cls_avg_factor\n        class_weight = loss_cls.get('class_weight', None)\n        if class_weight is not None and (self.__class__ is DETRHead):\n            assert isinstance(class_weight, float), 'Expected ' \\\n                'class_weight to have type float. Found ' \\\n                f'{type(class_weight)}.'\n            # NOTE following the official DETR repo, bg_cls_weight means\n            # relative classification weight of the no-object class.\n            bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight)\n            assert isinstance(bg_cls_weight, float), 'Expected ' \\\n                'bg_cls_weight to have type float. Found ' \\\n                f'{type(bg_cls_weight)}.'\n            class_weight = torch.ones(num_classes + 1) * class_weight\n            # set background class as the last indice\n            class_weight[num_classes] = bg_cls_weight\n            loss_cls.update({'class_weight': class_weight})\n            if 'bg_cls_weight' in loss_cls:\n                loss_cls.pop('bg_cls_weight')\n            self.bg_cls_weight = bg_cls_weight\n\n        if train_cfg:\n            assert 'assigner' in train_cfg, 'assigner should be provided ' \\\n                                            'when train_cfg is set.'\n            assigner = train_cfg['assigner']\n            self.assigner = TASK_UTILS.build(assigner)\n            if train_cfg.get('sampler', None) is not None:\n                raise RuntimeError('DETR do not build sampler.')\n        self.num_classes = num_classes\n        self.embed_dims = embed_dims\n        self.num_reg_fcs = num_reg_fcs\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox = MODELS.build(loss_bbox)\n        self.loss_iou = MODELS.build(loss_iou)\n\n        if self.loss_cls.use_sigmoid:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the transformer head.\"\"\"\n        # cls branch\n        self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n        # reg branch\n        self.activate = nn.ReLU()\n        self.reg_ffn = FFN(\n            self.embed_dims,\n            self.embed_dims,\n            self.num_reg_fcs,\n            dict(type='ReLU', inplace=True),\n            dropout=0.0,\n            add_residual=False)\n        # NOTE the activations of reg_branch here is the same as\n        # those in transformer, but they are actually different\n        # in DAB-DETR (prelu in transformer and relu in reg_branch)\n        self.fc_reg = Linear(self.embed_dims, 4)\n\n    def forward(self, hidden_states: Tensor) -> Tuple[Tensor]:\n        \"\"\"\"Forward function.\n\n        Args:\n            hidden_states (Tensor): Features from transformer decoder. If\n                `return_intermediate_dec` in detr.py is True output has shape\n                (num_decoder_layers, bs, num_queries, dim), else has shape\n                (1, bs, num_queries, dim) which only contains the last layer\n                outputs.\n        Returns:\n            tuple[Tensor]: results of head containing the following tensor.\n\n            - layers_cls_scores (Tensor): Outputs from the classification head,\n              shape (num_decoder_layers, bs, num_queries, cls_out_channels).\n              Note cls_out_channels should include background.\n            - layers_bbox_preds (Tensor): Sigmoid outputs from the regression\n              head with normalized coordinate format (cx, cy, w, h), has shape\n              (num_decoder_layers, bs, num_queries, 4).\n        \"\"\"\n        layers_cls_scores = self.fc_cls(hidden_states)\n        layers_bbox_preds = self.fc_reg(\n            self.activate(self.reg_ffn(hidden_states))).sigmoid()\n        return layers_cls_scores, layers_bbox_preds\n\n    def loss(self, hidden_states: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\n\n        Args:\n            hidden_states (Tensor): Feature from the transformer decoder, has\n                shape (num_decoder_layers, bs, num_queries, cls_out_channels)\n                or (num_decoder_layers, num_queries, bs, cls_out_channels).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        batch_gt_instances = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n\n        outs = self(hidden_states)\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*loss_inputs)\n        return losses\n\n    def loss_by_feat(\n        self,\n        all_layers_cls_scores: Tensor,\n        all_layers_bbox_preds: Tensor,\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"\"Loss function.\n\n        Only outputs from the last feature level are used for computing\n        losses by default.\n\n        Args:\n            all_layers_cls_scores (Tensor): Classification outputs\n                of each decoder layers. Each is a 4D-tensor, has shape\n                (num_decoder_layers, bs, num_queries, cls_out_channels).\n            all_layers_bbox_preds (Tensor): Sigmoid regression\n                outputs of each decoder layers. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and shape\n                (num_decoder_layers, bs, num_queries, 4).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert batch_gt_instances_ignore is None, \\\n            f'{self.__class__.__name__} only supports ' \\\n            'for batch_gt_instances_ignore setting to None.'\n\n        losses_cls, losses_bbox, losses_iou = multi_apply(\n            self.loss_by_feat_single,\n            all_layers_cls_scores,\n            all_layers_bbox_preds,\n            batch_gt_instances=batch_gt_instances,\n            batch_img_metas=batch_img_metas)\n\n        loss_dict = dict()\n        # loss from the last decoder layer\n        loss_dict['loss_cls'] = losses_cls[-1]\n        loss_dict['loss_bbox'] = losses_bbox[-1]\n        loss_dict['loss_iou'] = losses_iou[-1]\n        # loss from other decoder layers\n        num_dec_layer = 0\n        for loss_cls_i, loss_bbox_i, loss_iou_i in \\\n                zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]):\n            loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i\n            loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i\n            loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i\n            num_dec_layer += 1\n        return loss_dict\n\n    def loss_by_feat_single(self, cls_scores: Tensor, bbox_preds: Tensor,\n                            batch_gt_instances: InstanceList,\n                            batch_img_metas: List[dict]) -> Tuple[Tensor]:\n        \"\"\"Loss function for outputs from a single decoder layer of a single\n        feature level.\n\n        Args:\n            cls_scores (Tensor): Box score logits from a single decoder layer\n                for all images, has shape (bs, num_queries, cls_out_channels).\n            bbox_preds (Tensor): Sigmoid outputs from a single decoder layer\n                for all images, with normalized coordinate (cx, cy, w, h) and\n                shape (bs, num_queries, 4).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n\n        Returns:\n            Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and\n            `loss_iou`.\n        \"\"\"\n        num_imgs = cls_scores.size(0)\n        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]\n        bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]\n        cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,\n                                           batch_gt_instances, batch_img_metas)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        labels = torch.cat(labels_list, 0)\n        label_weights = torch.cat(label_weights_list, 0)\n        bbox_targets = torch.cat(bbox_targets_list, 0)\n        bbox_weights = torch.cat(bbox_weights_list, 0)\n\n        # classification loss\n        cls_scores = cls_scores.reshape(-1, self.cls_out_channels)\n        # construct weighted avg_factor to match with the official DETR repo\n        cls_avg_factor = num_total_pos * 1.0 + \\\n            num_total_neg * self.bg_cls_weight\n        if self.sync_cls_avg_factor:\n            cls_avg_factor = reduce_mean(\n                cls_scores.new_tensor([cls_avg_factor]))\n        cls_avg_factor = max(cls_avg_factor, 1)\n\n        loss_cls = self.loss_cls(\n            cls_scores, labels, label_weights, avg_factor=cls_avg_factor)\n\n        # Compute the average number of gt boxes across all gpus, for\n        # normalization purposes\n        num_total_pos = loss_cls.new_tensor([num_total_pos])\n        num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()\n\n        # construct factors used for rescale bboxes\n        factors = []\n        for img_meta, bbox_pred in zip(batch_img_metas, bbox_preds):\n            img_h, img_w, = img_meta['img_shape']\n            factor = bbox_pred.new_tensor([img_w, img_h, img_w,\n                                           img_h]).unsqueeze(0).repeat(\n                                               bbox_pred.size(0), 1)\n            factors.append(factor)\n        factors = torch.cat(factors, 0)\n\n        # DETR regress the relative position of boxes (cxcywh) in the image,\n        # thus the learning target is normalized by the image size. So here\n        # we need to re-scale them for calculating IoU loss\n        bbox_preds = bbox_preds.reshape(-1, 4)\n        bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors\n        bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors\n\n        # regression IoU loss, defaultly GIoU loss\n        loss_iou = self.loss_iou(\n            bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)\n\n        # regression L1 loss\n        loss_bbox = self.loss_bbox(\n            bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)\n        return loss_cls, loss_bbox, loss_iou\n\n    def get_targets(self, cls_scores_list: List[Tensor],\n                    bbox_preds_list: List[Tensor],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict]) -> tuple:\n        \"\"\"Compute regression and classification targets for a batch image.\n\n        Outputs from a single decoder layer of a single feature level are used.\n\n        Args:\n            cls_scores_list (list[Tensor]): Box score logits from a single\n                decoder layer for each image, has shape [num_queries,\n                cls_out_channels].\n            bbox_preds_list (list[Tensor]): Sigmoid outputs from a single\n                decoder layer for each image, with normalized coordinate\n                (cx, cy, w, h) and shape [num_queries, 4].\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n\n        Returns:\n            tuple: a tuple containing the following targets.\n\n            - labels_list (list[Tensor]): Labels for all images.\n            - label_weights_list (list[Tensor]): Label weights for all images.\n            - bbox_targets_list (list[Tensor]): BBox targets for all images.\n            - bbox_weights_list (list[Tensor]): BBox weights for all images.\n            - num_total_pos (int): Number of positive samples in all images.\n            - num_total_neg (int): Number of negative samples in all images.\n        \"\"\"\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         pos_inds_list,\n         neg_inds_list) = multi_apply(self._get_targets_single,\n                                      cls_scores_list, bbox_preds_list,\n                                      batch_gt_instances, batch_img_metas)\n        num_total_pos = sum((inds.numel() for inds in pos_inds_list))\n        num_total_neg = sum((inds.numel() for inds in neg_inds_list))\n        return (labels_list, label_weights_list, bbox_targets_list,\n                bbox_weights_list, num_total_pos, num_total_neg)\n\n    def _get_targets_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict) -> tuple:\n        \"\"\"Compute regression and classification targets for one image.\n\n        Outputs from a single decoder layer of a single feature level are used.\n\n        Args:\n            cls_score (Tensor): Box score logits from a single decoder layer\n                for one image. Shape [num_queries, cls_out_channels].\n            bbox_pred (Tensor): Sigmoid outputs from a single decoder layer\n                for one image, with normalized coordinate (cx, cy, w, h) and\n                shape [num_queries, 4].\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for one image.\n\n        Returns:\n            tuple[Tensor]: a tuple containing the following for one image.\n\n            - labels (Tensor): Labels of each image.\n            - label_weights (Tensor]): Label weights of each image.\n            - bbox_targets (Tensor): BBox targets of each image.\n            - bbox_weights (Tensor): BBox weights of each image.\n            - pos_inds (Tensor): Sampled positive indices for each image.\n            - neg_inds (Tensor): Sampled negative indices for each image.\n        \"\"\"\n        img_h, img_w = img_meta['img_shape']\n        factor = bbox_pred.new_tensor([img_w, img_h, img_w,\n                                       img_h]).unsqueeze(0)\n        num_bboxes = bbox_pred.size(0)\n        # convert bbox_pred from xywh, normalized to xyxy, unnormalized\n        bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)\n        bbox_pred = bbox_pred * factor\n\n        pred_instances = InstanceData(scores=cls_score, bboxes=bbox_pred)\n        # assigner and sampler\n        assign_result = self.assigner.assign(\n            pred_instances=pred_instances,\n            gt_instances=gt_instances,\n            img_meta=img_meta)\n\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        pos_inds = torch.nonzero(\n            assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()\n        neg_inds = torch.nonzero(\n            assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()\n        pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n        pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds.long(), :]\n\n        # label targets\n        labels = gt_bboxes.new_full((num_bboxes, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[pos_assigned_gt_inds]\n        label_weights = gt_bboxes.new_ones(num_bboxes)\n\n        # bbox targets\n        bbox_targets = torch.zeros_like(bbox_pred)\n        bbox_weights = torch.zeros_like(bbox_pred)\n        bbox_weights[pos_inds] = 1.0\n\n        # DETR regress the relative position of boxes (cxcywh) in the image.\n        # Thus the learning target should be normalized by the image size, also\n        # the box format should be converted from defaultly x1y1x2y2 to cxcywh.\n        pos_gt_bboxes_normalized = pos_gt_bboxes / factor\n        pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)\n        bbox_targets[pos_inds] = pos_gt_bboxes_targets\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds)\n\n    def loss_and_predict(\n            self, hidden_states: Tuple[Tensor],\n            batch_data_samples: SampleList) -> Tuple[dict, InstanceList]:\n        \"\"\"Perform forward propagation of the head, then calculate loss and\n        predictions from the features and data samples. Over-write because\n        img_metas are needed as inputs for bbox_head.\n\n        Args:\n            hidden_states (tuple[Tensor]): Feature from the transformer\n                decoder, has shape (num_decoder_layers, bs, num_queries, dim).\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns:\n            tuple: the return value is a tuple contains:\n\n            - losses: (dict[str, Tensor]): A dictionary of loss components.\n            - predictions (list[:obj:`InstanceData`]): Detection\n              results of each image after the post process.\n        \"\"\"\n        batch_gt_instances = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n\n        outs = self(hidden_states)\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(*loss_inputs)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas)\n        return losses, predictions\n\n    def predict(self,\n                hidden_states: Tuple[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network. Over-write\n        because img_metas are needed as inputs for bbox_head.\n\n        Args:\n            hidden_states (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        last_layer_hidden_state = hidden_states[-1].unsqueeze(0)\n        outs = self(last_layer_hidden_state)\n\n        predictions = self.predict_by_feat(\n            *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n\n        return predictions\n\n    def predict_by_feat(self,\n                        layer_cls_scores: Tensor,\n                        layer_bbox_preds: Tensor,\n                        batch_img_metas: List[dict],\n                        rescale: bool = True) -> InstanceList:\n        \"\"\"Transform network outputs for a batch into bbox predictions.\n\n        Args:\n            layer_cls_scores (Tensor): Classification outputs of the last or\n                all decoder layer. Each is a 4D-tensor, has shape\n                (num_decoder_layers, bs, num_queries, cls_out_channels).\n            layer_bbox_preds (Tensor): Sigmoid regression outputs of the last\n                or all decoder layer. Each is a 4D-tensor with normalized\n                coordinate format (cx, cy, w, h) and shape\n                (num_decoder_layers, bs, num_queries, 4).\n            batch_img_metas (list[dict]): Meta information of each image.\n            rescale (bool, optional): If `True`, return boxes in original\n                image space. Defaults to `True`.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        # NOTE only using outputs from the last feature level,\n        # and only the outputs from the last decoder layer is used.\n        cls_scores = layer_cls_scores[-1]\n        bbox_preds = layer_bbox_preds[-1]\n\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_score = cls_scores[img_id]\n            bbox_pred = bbox_preds[img_id]\n            img_meta = batch_img_metas[img_id]\n            results = self._predict_by_feat_single(cls_score, bbox_pred,\n                                                   img_meta, rescale)\n            result_list.append(results)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_score: Tensor,\n                                bbox_pred: Tensor,\n                                img_meta: dict,\n                                rescale: bool = True) -> InstanceData:\n        \"\"\"Transform outputs from the last decoder layer into bbox predictions\n        for each image.\n\n        Args:\n            cls_score (Tensor): Box score logits from the last decoder layer\n                for each image. Shape [num_queries, cls_out_channels].\n            bbox_pred (Tensor): Sigmoid outputs from the last decoder layer\n                for each image, with coordinate format (cx, cy, w, h) and\n                shape [num_queries, 4].\n            img_meta (dict): Image meta info.\n            rescale (bool): If True, return boxes in original image\n                space. Default True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_score) == len(bbox_pred)  # num_queries\n        max_per_img = self.test_cfg.get('max_per_img', len(cls_score))\n        img_shape = img_meta['img_shape']\n        # exclude background\n        if self.loss_cls.use_sigmoid:\n            cls_score = cls_score.sigmoid()\n            scores, indexes = cls_score.view(-1).topk(max_per_img)\n            det_labels = indexes % self.num_classes\n            bbox_index = indexes // self.num_classes\n            bbox_pred = bbox_pred[bbox_index]\n        else:\n            scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1)\n            scores, bbox_index = scores.topk(max_per_img)\n            bbox_pred = bbox_pred[bbox_index]\n            det_labels = det_labels[bbox_index]\n\n        det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)\n        det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]\n        det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]\n        det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])\n        det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            det_bboxes /= det_bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n\n        results = InstanceData()\n        results.bboxes = det_bboxes\n        results.scores = scores\n        results.labels = det_labels\n        return results\n"
  },
  {
    "path": "mmdet/models/dense_heads/dino_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Tuple\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh\nfrom mmdet.utils import InstanceList, OptInstanceList, reduce_mean\nfrom ..utils import multi_apply\nfrom .deformable_detr_head import DeformableDETRHead\n\n\n@MODELS.register_module()\nclass DINOHead(DeformableDETRHead):\n    r\"\"\"Head of the DINO: DETR with Improved DeNoising Anchor Boxes\n    for End-to-End Object Detection\n\n    Code is modified from the `official github repo\n    <https://github.com/IDEA-Research/DINO>`_.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2203.03605>`_ .\n    \"\"\"\n\n    def loss(self, hidden_states: Tensor, references: List[Tensor],\n             enc_outputs_class: Tensor, enc_outputs_coord: Tensor,\n             batch_data_samples: SampleList, dn_meta: Dict[str, int]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the queries of the upstream network.\n\n        Args:\n            hidden_states (Tensor): Hidden states output from each decoder\n                layer, has shape (num_decoder_layers, bs, num_queries_total,\n                dim), where `num_queries_total` is the sum of\n                `num_denoising_queries` and `num_matching_queries` when\n                `self.training` is `True`, else `num_matching_queries`.\n            references (list[Tensor]): List of the reference from the decoder.\n                The first reference is the `init_reference` (initial) and the\n                other num_decoder_layers(6) references are `inter_references`\n                (intermediate). The `init_reference` has shape (bs,\n                num_queries_total, 4) and each `inter_reference` has shape\n                (bs, num_queries, 4) with the last dimension arranged as\n                (cx, cy, w, h).\n            enc_outputs_class (Tensor): The score of each point on encode\n                feature map, has shape (bs, num_feat_points, cls_out_channels).\n            enc_outputs_coord (Tensor): The proposal generate from the\n                encode feature map, has shape (bs, num_feat_points, 4) with the\n                last dimension arranged as (cx, cy, w, h).\n            batch_data_samples (list[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'. It will be used for split outputs of\n              denoising and matching parts and loss calculation.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        batch_gt_instances = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n\n        outs = self(hidden_states, references)\n        loss_inputs = outs + (enc_outputs_class, enc_outputs_coord,\n                              batch_gt_instances, batch_img_metas, dn_meta)\n        losses = self.loss_by_feat(*loss_inputs)\n        return losses\n\n    def loss_by_feat(\n        self,\n        all_layers_cls_scores: Tensor,\n        all_layers_bbox_preds: Tensor,\n        enc_cls_scores: Tensor,\n        enc_bbox_preds: Tensor,\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        dn_meta: Dict[str, int],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Loss function.\n\n        Args:\n            all_layers_cls_scores (Tensor): Classification scores of all\n                decoder layers, has shape (num_decoder_layers, bs,\n                num_queries_total, cls_out_channels), where\n                `num_queries_total` is the sum of `num_denoising_queries`\n                and `num_matching_queries`.\n            all_layers_bbox_preds (Tensor): Regression outputs of all decoder\n                layers. Each is a 4D-tensor with normalized coordinate format\n                (cx, cy, w, h) and has shape (num_decoder_layers, bs,\n                num_queries_total, 4).\n            enc_cls_scores (Tensor): The score of each point on encode\n                feature map, has shape (bs, num_feat_points, cls_out_channels).\n            enc_bbox_preds (Tensor): The proposal generate from the encode\n                feature map, has shape (bs, num_feat_points, 4) with the last\n                dimension arranged as (cx, cy, w, h).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            dn_meta (Dict[str, int]): The dictionary saves information about\n                group collation, including 'num_denoising_queries' and\n                'num_denoising_groups'. It will be used for split outputs of\n                denoising and matching parts and loss calculation.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        # extract denoising and matching part of outputs\n        (all_layers_matching_cls_scores, all_layers_matching_bbox_preds,\n         all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds) = \\\n            self.split_outputs(\n                all_layers_cls_scores, all_layers_bbox_preds, dn_meta)\n\n        loss_dict = super(DeformableDETRHead, self).loss_by_feat(\n            all_layers_matching_cls_scores, all_layers_matching_bbox_preds,\n            batch_gt_instances, batch_img_metas, batch_gt_instances_ignore)\n        # NOTE DETRHead.loss_by_feat but not DeformableDETRHead.loss_by_feat\n        # is called, because the encoder loss calculations are different\n        # between DINO and DeformableDETR.\n\n        # loss of proposal generated from encode feature map.\n        if enc_cls_scores is not None:\n            # NOTE The enc_loss calculation of the DINO is\n            # different from that of Deformable DETR.\n            enc_loss_cls, enc_losses_bbox, enc_losses_iou = \\\n                self.loss_by_feat_single(\n                    enc_cls_scores, enc_bbox_preds,\n                    batch_gt_instances=batch_gt_instances,\n                    batch_img_metas=batch_img_metas)\n            loss_dict['enc_loss_cls'] = enc_loss_cls\n            loss_dict['enc_loss_bbox'] = enc_losses_bbox\n            loss_dict['enc_loss_iou'] = enc_losses_iou\n\n        if all_layers_denoising_cls_scores is not None:\n            # calculate denoising loss from all decoder layers\n            dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn(\n                all_layers_denoising_cls_scores,\n                all_layers_denoising_bbox_preds,\n                batch_gt_instances=batch_gt_instances,\n                batch_img_metas=batch_img_metas,\n                dn_meta=dn_meta)\n            # collate denoising loss\n            loss_dict['dn_loss_cls'] = dn_losses_cls[-1]\n            loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]\n            loss_dict['dn_loss_iou'] = dn_losses_iou[-1]\n            for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \\\n                    enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1],\n                                  dn_losses_iou[:-1])):\n                loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i\n                loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i\n                loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i\n        return loss_dict\n\n    def loss_dn(self, all_layers_denoising_cls_scores: Tensor,\n                all_layers_denoising_bbox_preds: Tensor,\n                batch_gt_instances: InstanceList, batch_img_metas: List[dict],\n                dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]:\n        \"\"\"Calculate denoising loss.\n\n        Args:\n            all_layers_denoising_cls_scores (Tensor): Classification scores of\n                all decoder layers in denoising part, has shape (\n                num_decoder_layers, bs, num_denoising_queries,\n                cls_out_channels).\n            all_layers_denoising_bbox_preds (Tensor): Regression outputs of all\n                decoder layers in denoising part. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and has shape\n                (num_decoder_layers, bs, num_denoising_queries, 4).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'. It will be used for split outputs of\n              denoising and matching parts and loss calculation.\n\n        Returns:\n            Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou\n            of each decoder layers.\n        \"\"\"\n        return multi_apply(\n            self._loss_dn_single,\n            all_layers_denoising_cls_scores,\n            all_layers_denoising_bbox_preds,\n            batch_gt_instances=batch_gt_instances,\n            batch_img_metas=batch_img_metas,\n            dn_meta=dn_meta)\n\n    def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor,\n                        batch_gt_instances: InstanceList,\n                        batch_img_metas: List[dict],\n                        dn_meta: Dict[str, int]) -> Tuple[Tensor]:\n        \"\"\"Denoising loss for outputs from a single decoder layer.\n\n        Args:\n            dn_cls_scores (Tensor): Classification scores of a single decoder\n                layer in denoising part, has shape (bs, num_denoising_queries,\n                cls_out_channels).\n            dn_bbox_preds (Tensor): Regression outputs of a single decoder\n                layer in denoising part. Each is a 4D-tensor with normalized\n                coordinate format (cx, cy, w, h) and has shape\n                (bs, num_denoising_queries, 4).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'. It will be used for split outputs of\n              denoising and matching parts and loss calculation.\n\n        Returns:\n            Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and\n            `loss_iou`.\n        \"\"\"\n        cls_reg_targets = self.get_dn_targets(batch_gt_instances,\n                                              batch_img_metas, dn_meta)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        labels = torch.cat(labels_list, 0)\n        label_weights = torch.cat(label_weights_list, 0)\n        bbox_targets = torch.cat(bbox_targets_list, 0)\n        bbox_weights = torch.cat(bbox_weights_list, 0)\n\n        # classification loss\n        cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels)\n        # construct weighted avg_factor to match with the official DETR repo\n        cls_avg_factor = \\\n            num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight\n        if self.sync_cls_avg_factor:\n            cls_avg_factor = reduce_mean(\n                cls_scores.new_tensor([cls_avg_factor]))\n        cls_avg_factor = max(cls_avg_factor, 1)\n\n        if len(cls_scores) > 0:\n            loss_cls = self.loss_cls(\n                cls_scores, labels, label_weights, avg_factor=cls_avg_factor)\n        else:\n            loss_cls = torch.zeros(\n                1, dtype=cls_scores.dtype, device=cls_scores.device)\n\n        # Compute the average number of gt boxes across all gpus, for\n        # normalization purposes\n        num_total_pos = loss_cls.new_tensor([num_total_pos])\n        num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()\n\n        # construct factors used for rescale bboxes\n        factors = []\n        for img_meta, bbox_pred in zip(batch_img_metas, dn_bbox_preds):\n            img_h, img_w = img_meta['img_shape']\n            factor = bbox_pred.new_tensor([img_w, img_h, img_w,\n                                           img_h]).unsqueeze(0).repeat(\n                                               bbox_pred.size(0), 1)\n            factors.append(factor)\n        factors = torch.cat(factors)\n\n        # DETR regress the relative position of boxes (cxcywh) in the image,\n        # thus the learning target is normalized by the image size. So here\n        # we need to re-scale them for calculating IoU loss\n        bbox_preds = dn_bbox_preds.reshape(-1, 4)\n        bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors\n        bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors\n\n        # regression IoU loss, defaultly GIoU loss\n        loss_iou = self.loss_iou(\n            bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)\n\n        # regression L1 loss\n        loss_bbox = self.loss_bbox(\n            bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)\n        return loss_cls, loss_bbox, loss_iou\n\n    def get_dn_targets(self, batch_gt_instances: InstanceList,\n                       batch_img_metas: dict, dn_meta: Dict[str,\n                                                            int]) -> tuple:\n        \"\"\"Get targets in denoising part for a batch of images.\n\n        Args:\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'. It will be used for split outputs of\n              denoising and matching parts and loss calculation.\n\n        Returns:\n            tuple: a tuple containing the following targets.\n\n            - labels_list (list[Tensor]): Labels for all images.\n            - label_weights_list (list[Tensor]): Label weights for all images.\n            - bbox_targets_list (list[Tensor]): BBox targets for all images.\n            - bbox_weights_list (list[Tensor]): BBox weights for all images.\n            - num_total_pos (int): Number of positive samples in all images.\n            - num_total_neg (int): Number of negative samples in all images.\n        \"\"\"\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_dn_targets_single,\n             batch_gt_instances,\n             batch_img_metas,\n             dn_meta=dn_meta)\n        num_total_pos = sum((inds.numel() for inds in pos_inds_list))\n        num_total_neg = sum((inds.numel() for inds in neg_inds_list))\n        return (labels_list, label_weights_list, bbox_targets_list,\n                bbox_weights_list, num_total_pos, num_total_neg)\n\n    def _get_dn_targets_single(self, gt_instances: InstanceData,\n                               img_meta: dict, dn_meta: Dict[str,\n                                                             int]) -> tuple:\n        \"\"\"Get targets in denoising part for one image.\n\n        Args:\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for one image.\n            dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'. It will be used for split outputs of\n              denoising and matching parts and loss calculation.\n\n        Returns:\n            tuple[Tensor]: a tuple containing the following for one image.\n\n            - labels (Tensor): Labels of each image.\n            - label_weights (Tensor]): Label weights of each image.\n            - bbox_targets (Tensor): BBox targets of each image.\n            - bbox_weights (Tensor): BBox weights of each image.\n            - pos_inds (Tensor): Sampled positive indices for each image.\n            - neg_inds (Tensor): Sampled negative indices for each image.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        num_groups = dn_meta['num_denoising_groups']\n        num_denoising_queries = dn_meta['num_denoising_queries']\n        num_queries_each_group = int(num_denoising_queries / num_groups)\n        device = gt_bboxes.device\n\n        if len(gt_labels) > 0:\n            t = torch.arange(len(gt_labels), dtype=torch.long, device=device)\n            t = t.unsqueeze(0).repeat(num_groups, 1)\n            pos_assigned_gt_inds = t.flatten()\n            pos_inds = torch.arange(\n                num_groups, dtype=torch.long, device=device)\n            pos_inds = pos_inds.unsqueeze(1) * num_queries_each_group + t\n            pos_inds = pos_inds.flatten()\n        else:\n            pos_inds = pos_assigned_gt_inds = \\\n                gt_bboxes.new_tensor([], dtype=torch.long)\n\n        neg_inds = pos_inds + num_queries_each_group // 2\n\n        # label targets\n        labels = gt_bboxes.new_full((num_denoising_queries, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[pos_assigned_gt_inds]\n        label_weights = gt_bboxes.new_ones(num_denoising_queries)\n\n        # bbox targets\n        bbox_targets = torch.zeros(num_denoising_queries, 4, device=device)\n        bbox_weights = torch.zeros(num_denoising_queries, 4, device=device)\n        bbox_weights[pos_inds] = 1.0\n        img_h, img_w = img_meta['img_shape']\n\n        # DETR regress the relative position of boxes (cxcywh) in the image.\n        # Thus the learning target should be normalized by the image size, also\n        # the box format should be converted from defaultly x1y1x2y2 to cxcywh.\n        factor = gt_bboxes.new_tensor([img_w, img_h, img_w,\n                                       img_h]).unsqueeze(0)\n        gt_bboxes_normalized = gt_bboxes / factor\n        gt_bboxes_targets = bbox_xyxy_to_cxcywh(gt_bboxes_normalized)\n        bbox_targets[pos_inds] = gt_bboxes_targets.repeat([num_groups, 1])\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds)\n\n    @staticmethod\n    def split_outputs(all_layers_cls_scores: Tensor,\n                      all_layers_bbox_preds: Tensor,\n                      dn_meta: Dict[str, int]) -> Tuple[Tensor]:\n        \"\"\"Split outputs of the denoising part and the matching part.\n\n        For the total outputs of `num_queries_total` length, the former\n        `num_denoising_queries` outputs are from denoising queries, and\n        the rest `num_matching_queries` ones are from matching queries,\n        where `num_queries_total` is the sum of `num_denoising_queries` and\n        `num_matching_queries`.\n\n        Args:\n            all_layers_cls_scores (Tensor): Classification scores of all\n                decoder layers, has shape (num_decoder_layers, bs,\n                num_queries_total, cls_out_channels).\n            all_layers_bbox_preds (Tensor): Regression outputs of all decoder\n                layers. Each is a 4D-tensor with normalized coordinate format\n                (cx, cy, w, h) and has shape (num_decoder_layers, bs,\n                num_queries_total, 4).\n            dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'.\n\n        Returns:\n            Tuple[Tensor]: a tuple containing the following outputs.\n\n            - all_layers_matching_cls_scores (Tensor): Classification scores\n              of all decoder layers in matching part, has shape\n              (num_decoder_layers, bs, num_matching_queries, cls_out_channels).\n            - all_layers_matching_bbox_preds (Tensor): Regression outputs of\n              all decoder layers in matching part. Each is a 4D-tensor with\n              normalized coordinate format (cx, cy, w, h) and has shape\n              (num_decoder_layers, bs, num_matching_queries, 4).\n            - all_layers_denoising_cls_scores (Tensor): Classification scores\n              of all decoder layers in denoising part, has shape\n              (num_decoder_layers, bs, num_denoising_queries,\n              cls_out_channels).\n            - all_layers_denoising_bbox_preds (Tensor): Regression outputs of\n              all decoder layers in denoising part. Each is a 4D-tensor with\n              normalized coordinate format (cx, cy, w, h) and has shape\n              (num_decoder_layers, bs, num_denoising_queries, 4).\n        \"\"\"\n        num_denoising_queries = dn_meta['num_denoising_queries']\n        if dn_meta is not None:\n            all_layers_denoising_cls_scores = \\\n                all_layers_cls_scores[:, :, : num_denoising_queries, :]\n            all_layers_denoising_bbox_preds = \\\n                all_layers_bbox_preds[:, :, : num_denoising_queries, :]\n            all_layers_matching_cls_scores = \\\n                all_layers_cls_scores[:, :, num_denoising_queries:, :]\n            all_layers_matching_bbox_preds = \\\n                all_layers_bbox_preds[:, :, num_denoising_queries:, :]\n        else:\n            all_layers_denoising_cls_scores = None\n            all_layers_denoising_bbox_preds = None\n            all_layers_matching_cls_scores = all_layers_cls_scores\n            all_layers_matching_bbox_preds = all_layers_bbox_preds\n        return (all_layers_matching_cls_scores, all_layers_matching_bbox_preds,\n                all_layers_denoising_cls_scores,\n                all_layers_denoising_bbox_preds)\n"
  },
  {
    "path": "mmdet/models/dense_heads/embedding_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox_cxcywh_to_xyxy\nfrom mmdet.structures.det_data_sample import SampleList\nfrom mmdet.utils import InstanceList, OptConfigType\n\n\n@MODELS.register_module()\nclass EmbeddingRPNHead(BaseModule):\n    \"\"\"RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .\n\n    Unlike traditional RPNHead, this module does not need FPN input, but just\n    decode `init_proposal_bboxes` and expand the first dimension of\n    `init_proposal_bboxes` and `init_proposal_features` to the batch_size.\n\n    Args:\n        num_proposals (int): Number of init_proposals. Defaults to 100.\n        proposal_feature_channel (int): Channel number of\n            init_proposal_feature. Defaults to 256.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_proposals: int = 100,\n                 proposal_feature_channel: int = 256,\n                 init_cfg: OptConfigType = None,\n                 **kwargs) -> None:\n        # `**kwargs` is necessary to avoid some potential error.\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg)\n        self.num_proposals = num_proposals\n        self.proposal_feature_channel = proposal_feature_channel\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize a sparse set of proposal boxes and proposal features.\"\"\"\n        self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)\n        self.init_proposal_features = nn.Embedding(\n            self.num_proposals, self.proposal_feature_channel)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize the init_proposal_bboxes as normalized.\n\n        [c_x, c_y, w, h], and we initialize it to the size of  the entire\n        image.\n        \"\"\"\n        super().init_weights()\n        nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)\n        nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)\n\n    def _decode_init_proposals(self, x: List[Tensor],\n                               batch_data_samples: SampleList) -> InstanceList:\n        \"\"\"Decode init_proposal_bboxes according to the size of images and\n        expand dimension of init_proposal_features to batch_size.\n\n        Args:\n            x (list[Tensor]): List of FPN features.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            List[:obj:`InstanceData`:] Detection results of each image.\n            Each item usually contains following keys.\n\n            - proposals: Decoded proposal bboxes,\n              has shape (num_proposals, 4).\n            - features: init_proposal_features, expanded proposal\n              features, has shape\n              (num_proposals, proposal_feature_channel).\n            - imgs_whwh: Tensor with shape\n              (num_proposals, 4), the dimension means\n              [img_width, img_height, img_width, img_height].\n        \"\"\"\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n\n        proposals = self.init_proposal_bboxes.weight.clone()\n        proposals = bbox_cxcywh_to_xyxy(proposals)\n        imgs_whwh = []\n        for meta in batch_img_metas:\n            h, w = meta['img_shape'][:2]\n            imgs_whwh.append(x[0].new_tensor([[w, h, w, h]]))\n        imgs_whwh = torch.cat(imgs_whwh, dim=0)\n        imgs_whwh = imgs_whwh[:, None, :]\n        proposals = proposals * imgs_whwh\n\n        rpn_results_list = []\n        for idx in range(len(batch_img_metas)):\n            rpn_results = InstanceData()\n            rpn_results.bboxes = proposals[idx]\n            rpn_results.imgs_whwh = imgs_whwh[idx].repeat(\n                self.num_proposals, 1)\n            rpn_results.features = self.init_proposal_features.weight.clone()\n            rpn_results_list.append(rpn_results)\n        return rpn_results_list\n\n    def loss(self, *args, **kwargs):\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\"\"\"\n        raise NotImplementedError(\n            'EmbeddingRPNHead does not have `loss`, please use '\n            '`predict` or `loss_and_predict` instead.')\n\n    def predict(self, x: List[Tensor], batch_data_samples: SampleList,\n                **kwargs) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network.\"\"\"\n        # `**kwargs` is necessary to avoid some potential error.\n        return self._decode_init_proposals(\n            x=x, batch_data_samples=batch_data_samples)\n\n    def loss_and_predict(self, x: List[Tensor], batch_data_samples: SampleList,\n                         **kwargs) -> tuple:\n        \"\"\"Perform forward propagation of the head, then calculate loss and\n        predictions from the features and data samples.\"\"\"\n        # `**kwargs` is necessary to avoid some potential error.\n        predictions = self._decode_init_proposals(\n            x=x, batch_data_samples=batch_data_samples)\n\n        return dict(), predictions\n"
  },
  {
    "path": "mmdet/models/dense_heads/fcos_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Scale\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig,\n                         OptInstanceList, RangeType, reduce_mean)\nfrom ..utils import multi_apply\nfrom .anchor_free_head import AnchorFreeHead\n\nINF = 1e8\n\n\n@MODELS.register_module()\nclass FCOSHead(AnchorFreeHead):\n    \"\"\"Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_.\n\n    The FCOS head does not use anchor boxes. Instead bounding boxes are\n    predicted at each pixel and a centerness measure is used to suppress\n    low-quality predictions.\n    Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training\n    tricks used in official repo, which will bring remarkable mAP gains\n    of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for\n    more detail.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points\n            in multiple feature levels. Defaults to (4, 8, 16, 32, 64).\n        regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple\n            level points.\n        center_sampling (bool): If true, use center sampling.\n            Defaults to False.\n        center_sample_radius (float): Radius of center sampling.\n            Defaults to 1.5.\n        norm_on_bbox (bool): If true, normalize the regression targets with\n            FPN strides. Defaults to False.\n        centerness_on_reg (bool): If true, position centerness on the\n            regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.\n            Defaults to False.\n        conv_bias (bool or str): If specified as `auto`, it will be decided by\n            the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n            None, otherwise False. Defaults to \"auto\".\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.\n        loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness\n            loss.\n        norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and\n            config norm layer.  Defaults to\n            ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n\n    Example:\n        >>> self = FCOSHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_score, bbox_pred, centerness = self.forward(feats)\n        >>> assert len(cls_score) == len(self.scales)\n    \"\"\"  # noqa: E501\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256),\n                                              (256, 512), (512, INF)),\n                 center_sampling: bool = False,\n                 center_sample_radius: float = 1.5,\n                 norm_on_bbox: bool = False,\n                 centerness_on_reg: bool = False,\n                 loss_cls: ConfigType = dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0),\n                 loss_centerness: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 norm_cfg: ConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        self.regress_ranges = regress_ranges\n        self.center_sampling = center_sampling\n        self.center_sample_radius = center_sample_radius\n        self.norm_on_bbox = norm_on_bbox\n        self.centerness_on_reg = centerness_on_reg\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            norm_cfg=norm_cfg,\n            init_cfg=init_cfg,\n            **kwargs)\n        self.loss_centerness = MODELS.build(loss_centerness)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        super()._init_layers()\n        self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n    def forward(\n            self, x: Tuple[Tensor]\n    ) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of each level outputs.\n\n            - cls_scores (list[Tensor]): Box scores for each scale level, \\\n            each is a 4D-tensor, the channel number is \\\n            num_points * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for each \\\n            scale level, each is a 4D-tensor, the channel number is \\\n            num_points * 4.\n            - centernesses (list[Tensor]): centerness for each scale level, \\\n            each is a 4D-tensor, the channel number is num_points * 1.\n        \"\"\"\n        return multi_apply(self.forward_single, x, self.scales, self.strides)\n\n    def forward_single(self, x: Tensor, scale: Scale,\n                       stride: int) -> Tuple[Tensor, Tensor, Tensor]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps, only\n                used to normalize the bbox prediction when self.norm_on_bbox\n                is True.\n\n        Returns:\n            tuple: scores for each class, bbox predictions and centerness\n            predictions of input feature maps.\n        \"\"\"\n        cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)\n        if self.centerness_on_reg:\n            centerness = self.conv_centerness(reg_feat)\n        else:\n            centerness = self.conv_centerness(cls_feat)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        if self.norm_on_bbox:\n            # bbox_pred needed for gradient computation has been modified\n            # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n            # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n            bbox_pred = bbox_pred.clamp(min=0)\n            if not self.training:\n                bbox_pred *= stride\n        else:\n            bbox_pred = bbox_pred.exp()\n        return cls_score, bbox_pred, centerness\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        centernesses: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            centernesses (list[Tensor]): centerness for each scale level, each\n                is a 4D-tensor, the channel number is num_points * 1.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(centernesses)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        labels, bbox_targets = self.get_targets(all_level_points,\n                                                batch_gt_instances)\n\n        num_imgs = cls_scores[0].size(0)\n        # flatten cls_scores, bbox_preds and centerness\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_centerness = [\n            centerness.permute(0, 2, 3, 1).reshape(-1)\n            for centerness in centernesses\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_centerness = torch.cat(flatten_centerness)\n        flatten_labels = torch.cat(labels)\n        flatten_bbox_targets = torch.cat(bbox_targets)\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((flatten_labels >= 0)\n                    & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)\n        num_pos = torch.tensor(\n            len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)\n        num_pos = max(reduce_mean(num_pos), 1.0)\n        loss_cls = self.loss_cls(\n            flatten_cls_scores, flatten_labels, avg_factor=num_pos)\n\n        pos_bbox_preds = flatten_bbox_preds[pos_inds]\n        pos_centerness = flatten_centerness[pos_inds]\n        pos_bbox_targets = flatten_bbox_targets[pos_inds]\n        pos_centerness_targets = self.centerness_target(pos_bbox_targets)\n        # centerness weighted iou loss\n        centerness_denorm = max(\n            reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)\n\n        if len(pos_inds) > 0:\n            pos_points = flatten_points[pos_inds]\n            pos_decoded_bbox_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_preds)\n            pos_decoded_target_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_targets)\n            loss_bbox = self.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds,\n                weight=pos_centerness_targets,\n                avg_factor=centerness_denorm)\n            loss_centerness = self.loss_centerness(\n                pos_centerness, pos_centerness_targets, avg_factor=num_pos)\n        else:\n            loss_bbox = pos_bbox_preds.sum()\n            loss_centerness = pos_centerness.sum()\n\n        return dict(\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            loss_centerness=loss_centerness)\n\n    def get_targets(\n            self, points: List[Tensor], batch_gt_instances: InstanceList\n    ) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Compute regression, classification and centerness targets for points\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple: Targets of each level.\n\n            - concat_lvl_labels (list[Tensor]): Labels of each level.\n            - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \\\n            level.\n        \"\"\"\n        assert len(points) == len(self.regress_ranges)\n        num_levels = len(points)\n        # expand regress ranges to align with points\n        expanded_regress_ranges = [\n            points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n                points[i]) for i in range(num_levels)\n        ]\n        # concat all levels points and regress ranges\n        concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n        concat_points = torch.cat(points, dim=0)\n\n        # the number of points per img, per lvl\n        num_points = [center.size(0) for center in points]\n\n        # get labels and bbox_targets of each image\n        labels_list, bbox_targets_list = multi_apply(\n            self._get_targets_single,\n            batch_gt_instances,\n            points=concat_points,\n            regress_ranges=concat_regress_ranges,\n            num_points_per_lvl=num_points)\n\n        # split to per img, per level\n        labels_list = [labels.split(num_points, 0) for labels in labels_list]\n        bbox_targets_list = [\n            bbox_targets.split(num_points, 0)\n            for bbox_targets in bbox_targets_list\n        ]\n\n        # concat per level image\n        concat_lvl_labels = []\n        concat_lvl_bbox_targets = []\n        for i in range(num_levels):\n            concat_lvl_labels.append(\n                torch.cat([labels[i] for labels in labels_list]))\n            bbox_targets = torch.cat(\n                [bbox_targets[i] for bbox_targets in bbox_targets_list])\n            if self.norm_on_bbox:\n                bbox_targets = bbox_targets / self.strides[i]\n            concat_lvl_bbox_targets.append(bbox_targets)\n        return concat_lvl_labels, concat_lvl_bbox_targets\n\n    def _get_targets_single(\n            self, gt_instances: InstanceData, points: Tensor,\n            regress_ranges: Tensor,\n            num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor]:\n        \"\"\"Compute regression and classification targets for a single image.\"\"\"\n        num_points = points.size(0)\n        num_gts = len(gt_instances)\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n\n        if num_gts == 0:\n            return gt_labels.new_full((num_points,), self.num_classes), \\\n                   gt_bboxes.new_zeros((num_points, 4))\n\n        areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n            gt_bboxes[:, 3] - gt_bboxes[:, 1])\n        # TODO: figure out why these two are different\n        # areas = areas[None].expand(num_points, num_gts)\n        areas = areas[None].repeat(num_points, 1)\n        regress_ranges = regress_ranges[:, None, :].expand(\n            num_points, num_gts, 2)\n        gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n        xs, ys = points[:, 0], points[:, 1]\n        xs = xs[:, None].expand(num_points, num_gts)\n        ys = ys[:, None].expand(num_points, num_gts)\n\n        left = xs - gt_bboxes[..., 0]\n        right = gt_bboxes[..., 2] - xs\n        top = ys - gt_bboxes[..., 1]\n        bottom = gt_bboxes[..., 3] - ys\n        bbox_targets = torch.stack((left, top, right, bottom), -1)\n\n        if self.center_sampling:\n            # condition1: inside a `center bbox`\n            radius = self.center_sample_radius\n            center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2\n            center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2\n            center_gts = torch.zeros_like(gt_bboxes)\n            stride = center_xs.new_zeros(center_xs.shape)\n\n            # project the points on current lvl back to the `original` sizes\n            lvl_begin = 0\n            for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):\n                lvl_end = lvl_begin + num_points_lvl\n                stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius\n                lvl_begin = lvl_end\n\n            x_mins = center_xs - stride\n            y_mins = center_ys - stride\n            x_maxs = center_xs + stride\n            y_maxs = center_ys + stride\n            center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],\n                                             x_mins, gt_bboxes[..., 0])\n            center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],\n                                             y_mins, gt_bboxes[..., 1])\n            center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],\n                                             gt_bboxes[..., 2], x_maxs)\n            center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],\n                                             gt_bboxes[..., 3], y_maxs)\n\n            cb_dist_left = xs - center_gts[..., 0]\n            cb_dist_right = center_gts[..., 2] - xs\n            cb_dist_top = ys - center_gts[..., 1]\n            cb_dist_bottom = center_gts[..., 3] - ys\n            center_bbox = torch.stack(\n                (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)\n            inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0\n        else:\n            # condition1: inside a gt bbox\n            inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n\n        # condition2: limit the regression range for each location\n        max_regress_distance = bbox_targets.max(-1)[0]\n        inside_regress_range = (\n            (max_regress_distance >= regress_ranges[..., 0])\n            & (max_regress_distance <= regress_ranges[..., 1]))\n\n        # if there are still more than one objects for a location,\n        # we choose the one with minimal area\n        areas[inside_gt_bbox_mask == 0] = INF\n        areas[inside_regress_range == 0] = INF\n        min_area, min_area_inds = areas.min(dim=1)\n\n        labels = gt_labels[min_area_inds]\n        labels[min_area == INF] = self.num_classes  # set as BG\n        bbox_targets = bbox_targets[range(num_points), min_area_inds]\n\n        return labels, bbox_targets\n\n    def centerness_target(self, pos_bbox_targets: Tensor) -> Tensor:\n        \"\"\"Compute centerness targets.\n\n        Args:\n            pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape\n                (num_pos, 4)\n\n        Returns:\n            Tensor: Centerness target.\n        \"\"\"\n        # only calculate pos centerness targets, otherwise there may be nan\n        left_right = pos_bbox_targets[:, [0, 2]]\n        top_bottom = pos_bbox_targets[:, [1, 3]]\n        if len(left_right) == 0:\n            centerness_targets = left_right[..., 0]\n        else:\n            centerness_targets = (\n                left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (\n                    top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])\n        return torch.sqrt(centerness_targets)\n"
  },
  {
    "path": "mmdet/models/dense_heads/fovea_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import DeformConv2d\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig\nfrom ..utils import filter_scores_and_topk, multi_apply\nfrom .anchor_free_head import AnchorFreeHead\n\nINF = 1e8\n\n\nclass FeatureAlign(BaseModule):\n    \"\"\"Feature Align Module.\n\n    Feature Align Module is implemented based on DCN v1.\n    It uses anchor shape prediction rather than feature map to\n    predict offsets of deform conv layer.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        out_channels (int): Number of channels in the output feature map.\n        kernel_size (int): Size of the convolution kernel.\n            ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.\n        deform_groups: (int): Group number of DCN in\n            FeatureAdaption module.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        out_channels: int,\n        kernel_size: int = 3,\n        deform_groups: int = 4,\n        init_cfg: OptMultiConfig = dict(\n            type='Normal',\n            layer='Conv2d',\n            std=0.1,\n            override=dict(type='Normal', name='conv_adaption', std=0.01))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        offset_channels = kernel_size * kernel_size * 2\n        self.conv_offset = nn.Conv2d(\n            4, deform_groups * offset_channels, 1, bias=False)\n        self.conv_adaption = DeformConv2d(\n            in_channels,\n            out_channels,\n            kernel_size=kernel_size,\n            padding=(kernel_size - 1) // 2,\n            deform_groups=deform_groups)\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x: Tensor, shape: Tensor) -> Tensor:\n        \"\"\"Forward function of feature align module.\n\n        Args:\n            x (Tensor): Features from the upstream network.\n            shape (Tensor): Exponential of bbox predictions.\n\n        Returns:\n            x (Tensor): The aligned features.\n        \"\"\"\n        offset = self.conv_offset(shape)\n        x = self.relu(self.conv_adaption(x, offset))\n        return x\n\n\n@MODELS.register_module()\nclass FoveaHead(AnchorFreeHead):\n    \"\"\"Detection Head of `FoveaBox: Beyond Anchor-based Object Detector.\n\n    <https://arxiv.org/abs/1904.03797>`_.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        base_edge_list (list[int]): List of edges.\n        scale_ranges (list[tuple]): Range of scales.\n        sigma (float): Super parameter of ``FoveaHead``.\n        with_deform (bool):  Whether use deform conv.\n        deform_groups (int): Deformable conv group size.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 base_edge_list: List[int] = (16, 32, 64, 128, 256),\n                 scale_ranges: List[tuple] = ((8, 32), (16, 64), (32, 128),\n                                              (64, 256), (128, 512)),\n                 sigma: float = 0.4,\n                 with_deform: bool = False,\n                 deform_groups: int = 4,\n                 init_cfg: OptMultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        self.base_edge_list = base_edge_list\n        self.scale_ranges = scale_ranges\n        self.sigma = sigma\n        self.with_deform = with_deform\n        self.deform_groups = deform_groups\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        # box branch\n        super()._init_reg_convs()\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n        # cls branch\n        if not self.with_deform:\n            super()._init_cls_convs()\n            self.conv_cls = nn.Conv2d(\n                self.feat_channels, self.cls_out_channels, 3, padding=1)\n        else:\n            self.cls_convs = nn.ModuleList()\n            self.cls_convs.append(\n                ConvModule(\n                    self.feat_channels, (self.feat_channels * 4),\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.norm_cfg is None))\n            self.cls_convs.append(\n                ConvModule((self.feat_channels * 4), (self.feat_channels * 4),\n                           1,\n                           stride=1,\n                           padding=0,\n                           conv_cfg=self.conv_cfg,\n                           norm_cfg=self.norm_cfg,\n                           bias=self.norm_cfg is None))\n            self.feature_adaption = FeatureAlign(\n                self.feat_channels,\n                self.feat_channels,\n                kernel_size=3,\n                deform_groups=self.deform_groups)\n            self.conv_cls = nn.Conv2d(\n                int(self.feat_channels * 4),\n                self.cls_out_channels,\n                3,\n                padding=1)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n\n        Returns:\n            tuple: scores for each class and bbox predictions of input\n            feature maps.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for reg_layer in self.reg_convs:\n            reg_feat = reg_layer(reg_feat)\n        bbox_pred = self.conv_reg(reg_feat)\n        if self.with_deform:\n            cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp())\n        for cls_layer in self.cls_convs:\n            cls_feat = cls_layer(cls_feat)\n        cls_score = self.conv_cls(cls_feat)\n        return cls_score, bbox_pred\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_priors * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_priors * 4.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        num_imgs = cls_scores[0].size(0)\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_labels, flatten_bbox_targets = self.get_targets(\n            batch_gt_instances, featmap_sizes, priors)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        pos_inds = ((flatten_labels >= 0)\n                    & (flatten_labels < self.num_classes)).nonzero().view(-1)\n        num_pos = len(pos_inds)\n\n        loss_cls = self.loss_cls(\n            flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)\n        if num_pos > 0:\n            pos_bbox_preds = flatten_bbox_preds[pos_inds]\n            pos_bbox_targets = flatten_bbox_targets[pos_inds]\n            pos_weights = pos_bbox_targets.new_ones(pos_bbox_targets.size())\n            loss_bbox = self.loss_bbox(\n                pos_bbox_preds,\n                pos_bbox_targets,\n                pos_weights,\n                avg_factor=num_pos)\n        else:\n            loss_bbox = torch.tensor(\n                0,\n                dtype=flatten_bbox_preds.dtype,\n                device=flatten_bbox_preds.device)\n        return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)\n\n    def get_targets(\n            self, batch_gt_instances: InstanceList, featmap_sizes: List[tuple],\n            priors_list: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Compute regression and classification for priors in multiple images.\n\n        Args:\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            featmap_sizes (list[tuple]): Size tuple of feature maps.\n            priors_list (list[Tensor]): Priors list of each fpn level, each has\n                shape (num_priors, 2).\n\n        Returns:\n            tuple: Targets of each level.\n\n            - flatten_labels (list[Tensor]): Labels of each level.\n            - flatten_bbox_targets (list[Tensor]): BBox targets of each\n              level.\n        \"\"\"\n        label_list, bbox_target_list = multi_apply(\n            self._get_targets_single,\n            batch_gt_instances,\n            featmap_size_list=featmap_sizes,\n            priors_list=priors_list)\n        flatten_labels = [\n            torch.cat([\n                labels_level_img.flatten() for labels_level_img in labels_level\n            ]) for labels_level in zip(*label_list)\n        ]\n        flatten_bbox_targets = [\n            torch.cat([\n                bbox_targets_level_img.reshape(-1, 4)\n                for bbox_targets_level_img in bbox_targets_level\n            ]) for bbox_targets_level in zip(*bbox_target_list)\n        ]\n        flatten_labels = torch.cat(flatten_labels)\n        flatten_bbox_targets = torch.cat(flatten_bbox_targets)\n        return flatten_labels, flatten_bbox_targets\n\n    def _get_targets_single(self,\n                            gt_instances: InstanceData,\n                            featmap_size_list: List[tuple] = None,\n                            priors_list: List[Tensor] = None) -> tuple:\n        \"\"\"Compute regression and classification targets for a single image.\n\n        Args:\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            featmap_size_list (list[tuple]): Size tuple of feature maps.\n            priors_list (list[Tensor]): Priors of each fpn level, each has\n                shape (num_priors, 2).\n\n        Returns:\n            tuple:\n\n            - label_list (list[Tensor]): Labels of all anchors in the image.\n            - box_target_list (list[Tensor]): BBox targets of all anchors in\n              the image.\n        \"\"\"\n        gt_bboxes_raw = gt_instances.bboxes\n        gt_labels_raw = gt_instances.labels\n        gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *\n                              (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))\n        label_list = []\n        bbox_target_list = []\n        # for each pyramid, find the cls and box target\n        for base_len, (lower_bound, upper_bound), stride, featmap_size, \\\n            priors in zip(self.base_edge_list, self.scale_ranges,\n                          self.strides, featmap_size_list, priors_list):\n            # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n            priors = priors.view(*featmap_size, 2)\n            x, y = priors[..., 0], priors[..., 1]\n            labels = gt_labels_raw.new_full(featmap_size, self.num_classes)\n            bbox_targets = gt_bboxes_raw.new_ones(featmap_size[0],\n                                                  featmap_size[1], 4)\n            # scale assignment\n            hit_indices = ((gt_areas >= lower_bound) &\n                           (gt_areas <= upper_bound)).nonzero().flatten()\n            if len(hit_indices) == 0:\n                label_list.append(labels)\n                bbox_target_list.append(torch.log(bbox_targets))\n                continue\n            _, hit_index_order = torch.sort(-gt_areas[hit_indices])\n            hit_indices = hit_indices[hit_index_order]\n            gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride\n            gt_labels = gt_labels_raw[hit_indices]\n            half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0])\n            half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1])\n            # valid fovea area: left, right, top, down\n            pos_left = torch.ceil(\n                gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \\\n                clamp(0, featmap_size[1] - 1)\n            pos_right = torch.floor(\n                gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \\\n                clamp(0, featmap_size[1] - 1)\n            pos_top = torch.ceil(\n                gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \\\n                clamp(0, featmap_size[0] - 1)\n            pos_down = torch.floor(\n                gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \\\n                clamp(0, featmap_size[0] - 1)\n            for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \\\n                    zip(pos_left, pos_top, pos_right, pos_down, gt_labels,\n                        gt_bboxes_raw[hit_indices, :]):\n                labels[py1:py2 + 1, px1:px2 + 1] = label\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \\\n                    (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \\\n                    (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \\\n                    (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \\\n                    (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len\n            bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.)\n            label_list.append(labels)\n            bbox_target_list.append(torch.log(bbox_targets))\n        return label_list, bbox_target_list\n\n    # Same as base_dense_head/_predict_by_feat_single except self._bbox_decode\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: Optional[ConfigDict] = None,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 2).\n            img_meta (dict): Image meta info.\n            cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_score_list) == len(bbox_pred_list)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list, self.strides,\n                              self.base_edge_list, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n\n            scores = cls_score.permute(1, 2, 0).reshape(\n                -1, self.cls_out_channels).sigmoid()\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, _, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape)\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bboxes)\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n    def _bbox_decode(self, priors: Tensor, bbox_pred: Tensor, base_len: int,\n                     max_shape: int) -> Tensor:\n        \"\"\"Function to decode bbox.\n\n        Args:\n            priors (Tensor): Center proiors of an image, has shape\n                (num_instances, 2).\n            bbox_preds (Tensor): Box energies / deltas for all instances,\n                has shape (batch_size, num_instances, 4).\n            base_len (int): The base length.\n            max_shape (int): The max shape of bbox.\n\n        Returns:\n            Tensor: Decoded bboxes in (tl_x, tl_y, br_x, br_y) format. Has\n            shape (batch_size, num_instances, 4).\n        \"\"\"\n        bbox_pred = bbox_pred.exp()\n\n        y = priors[:, 1]\n        x = priors[:, 0]\n        x1 = (x - base_len * bbox_pred[:, 0]). \\\n            clamp(min=0, max=max_shape[1] - 1)\n        y1 = (y - base_len * bbox_pred[:, 1]). \\\n            clamp(min=0, max=max_shape[0] - 1)\n        x2 = (x + base_len * bbox_pred[:, 2]). \\\n            clamp(min=0, max=max_shape[1] - 1)\n        y2 = (y + base_len * bbox_pred[:, 3]). \\\n            clamp(min=0, max=max_shape[0] - 1)\n        decoded_bboxes = torch.stack([x1, y1, x2, y2], -1)\n        return decoded_bboxes\n"
  },
  {
    "path": "mmdet/models/dense_heads/free_anchor_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import InstanceList, OptConfigType, OptInstanceList\nfrom ..utils import multi_apply\nfrom .retina_head import RetinaHead\n\nEPS = 1e-12\n\n\n@MODELS.register_module()\nclass FreeAnchorRetinaHead(RetinaHead):\n    \"\"\"FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Defaults to 4.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to\n            construct and config conv layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to\n            construct and config norm layer. Defaults to\n            norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).\n        pre_anchor_topk (int): Number of boxes that be token in each bag.\n            Defaults to 50\n        bbox_thr (float): The threshold of the saturated linear function.\n            It is usually the same with the IoU threshold used in NMS.\n            Defaults to 0.6.\n        gamma (float): Gamma parameter in focal loss. Defaults to 2.0.\n        alpha (float): Alpha parameter in focal loss. Defaults to 0.5.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 pre_anchor_topk: int = 50,\n                 bbox_thr: float = 0.6,\n                 gamma: float = 2.0,\n                 alpha: float = 0.5,\n                 **kwargs) -> None:\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            stacked_convs=stacked_convs,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            **kwargs)\n\n        self.pre_anchor_topk = pre_anchor_topk\n        self.bbox_thr = bbox_thr\n        self.gamma = gamma\n        self.alpha = alpha\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, _ = self.get_anchors(\n            featmap_sizes=featmap_sizes,\n            batch_img_metas=batch_img_metas,\n            device=device)\n        concat_anchor_list = [torch.cat(anchor) for anchor in anchor_list]\n\n        # concatenate each level\n        cls_scores = [\n            cls.permute(0, 2, 3,\n                        1).reshape(cls.size(0), -1, self.cls_out_channels)\n            for cls in cls_scores\n        ]\n        bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        cls_scores = torch.cat(cls_scores, dim=1)\n        cls_probs = torch.sigmoid(cls_scores)\n        bbox_preds = torch.cat(bbox_preds, dim=1)\n\n        box_probs, positive_losses, num_pos_list = multi_apply(\n            self.positive_loss_single, cls_probs, bbox_preds,\n            concat_anchor_list, batch_gt_instances)\n\n        num_pos = sum(num_pos_list)\n        positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)\n\n        # box_prob: P{a_{j} \\in A_{+}}\n        box_probs = torch.stack(box_probs, dim=0)\n\n        # negative_loss:\n        # \\sum_{j}{ FL((1 - P{a_{j} \\in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||\n        negative_loss = self.negative_bag_loss(cls_probs, box_probs).sum() / \\\n            max(1, num_pos * self.pre_anchor_topk)\n\n        # avoid the absence of gradients in regression subnet\n        # when no ground-truth in a batch\n        if num_pos == 0:\n            positive_loss = bbox_preds.sum() * 0\n\n        losses = {\n            'positive_bag_loss': positive_loss,\n            'negative_bag_loss': negative_loss\n        }\n        return losses\n\n    def positive_loss_single(self, cls_prob: Tensor, bbox_pred: Tensor,\n                             flat_anchors: Tensor,\n                             gt_instances: InstanceData) -> tuple:\n        \"\"\"Compute positive loss.\n\n        Args:\n            cls_prob (Tensor): Classification probability of shape\n                (num_anchors, num_classes).\n            bbox_pred (Tensor): Box probability of shape (num_anchors, 4).\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors, 4)\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple:\n\n                - box_prob (Tensor): Box probability of shape (num_anchors, 4).\n                - positive_loss (Tensor): Positive loss of shape (num_pos, ).\n                - num_pos (int): positive samples indexes.\n        \"\"\"\n\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        with torch.no_grad():\n            if len(gt_bboxes) == 0:\n                image_box_prob = torch.zeros(\n                    flat_anchors.size(0),\n                    self.cls_out_channels).type_as(bbox_pred)\n            else:\n                # box_localization: a_{j}^{loc}, shape: [j, 4]\n                pred_boxes = self.bbox_coder.decode(flat_anchors, bbox_pred)\n\n                # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]\n                object_box_iou = bbox_overlaps(gt_bboxes, pred_boxes)\n\n                # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]\n                t1 = self.bbox_thr\n                t2 = object_box_iou.max(\n                    dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)\n                object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp(\n                    min=0, max=1)\n\n                # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]\n                num_obj = gt_labels.size(0)\n                indices = torch.stack(\n                    [torch.arange(num_obj).type_as(gt_labels), gt_labels],\n                    dim=0)\n                object_cls_box_prob = torch.sparse_coo_tensor(\n                    indices, object_box_prob)\n\n                # image_box_iou: P{a_{j} \\in A_{+}}, shape: [c, j]\n                \"\"\"\n                from \"start\" to \"end\" implement:\n                image_box_iou = torch.sparse.max(object_cls_box_prob,\n                                                 dim=0).t()\n\n                \"\"\"\n                # start\n                box_cls_prob = torch.sparse.sum(\n                    object_cls_box_prob, dim=0).to_dense()\n\n                indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()\n                if indices.numel() == 0:\n                    image_box_prob = torch.zeros(\n                        flat_anchors.size(0),\n                        self.cls_out_channels).type_as(object_box_prob)\n                else:\n                    nonzero_box_prob = torch.where(\n                        (gt_labels.unsqueeze(dim=-1) == indices[0]),\n                        object_box_prob[:, indices[1]],\n                        torch.tensor(\n                            [0]).type_as(object_box_prob)).max(dim=0).values\n\n                    # upmap to shape [j, c]\n                    image_box_prob = torch.sparse_coo_tensor(\n                        indices.flip([0]),\n                        nonzero_box_prob,\n                        size=(flat_anchors.size(0),\n                              self.cls_out_channels)).to_dense()\n                # end\n            box_prob = image_box_prob\n\n        # construct bags for objects\n        match_quality_matrix = bbox_overlaps(gt_bboxes, flat_anchors)\n        _, matched = torch.topk(\n            match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False)\n        del match_quality_matrix\n\n        # matched_cls_prob: P_{ij}^{cls}\n        matched_cls_prob = torch.gather(\n            cls_prob[matched], 2,\n            gt_labels.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,\n                                            1)).squeeze(2)\n\n        # matched_box_prob: P_{ij}^{loc}\n        matched_anchors = flat_anchors[matched]\n        matched_object_targets = self.bbox_coder.encode(\n            matched_anchors,\n            gt_bboxes.unsqueeze(dim=1).expand_as(matched_anchors))\n        loss_bbox = self.loss_bbox(\n            bbox_pred[matched],\n            matched_object_targets,\n            reduction_override='none').sum(-1)\n        matched_box_prob = torch.exp(-loss_bbox)\n\n        # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}\n        num_pos = len(gt_bboxes)\n        positive_loss = self.positive_bag_loss(matched_cls_prob,\n                                               matched_box_prob)\n\n        return box_prob, positive_loss, num_pos\n\n    def positive_bag_loss(self, matched_cls_prob: Tensor,\n                          matched_box_prob: Tensor) -> Tensor:\n        \"\"\"Compute positive bag loss.\n\n        :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`.\n\n        :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples.\n\n        :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples.\n\n        Args:\n            matched_cls_prob (Tensor): Classification probability of matched\n                samples in shape (num_gt, pre_anchor_topk).\n            matched_box_prob (Tensor): BBox probability of matched samples,\n                in shape (num_gt, pre_anchor_topk).\n\n        Returns:\n            Tensor: Positive bag loss in shape (num_gt,).\n        \"\"\"  # noqa: E501, W605\n        # bag_prob = Mean-max(matched_prob)\n        matched_prob = matched_cls_prob * matched_box_prob\n        weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None)\n        weight /= weight.sum(dim=1).unsqueeze(dim=-1)\n        bag_prob = (weight * matched_prob).sum(dim=1)\n        # positive_bag_loss = -self.alpha * log(bag_prob)\n        return self.alpha * F.binary_cross_entropy(\n            bag_prob, torch.ones_like(bag_prob), reduction='none')\n\n    def negative_bag_loss(self, cls_prob: Tensor, box_prob: Tensor) -> Tensor:\n        \"\"\"Compute negative bag loss.\n\n        :math:`FL((1 - P_{a_{j} \\in A_{+}}) * (1 - P_{j}^{bg}))`.\n\n        :math:`P_{a_{j} \\in A_{+}}`: Box_probability of matched samples.\n\n        :math:`P_{j}^{bg}`: Classification probability of negative samples.\n\n        Args:\n            cls_prob (Tensor): Classification probability, in shape\n                (num_img, num_anchors, num_classes).\n            box_prob (Tensor): Box probability, in shape\n                (num_img, num_anchors, num_classes).\n\n        Returns:\n            Tensor: Negative bag loss in shape (num_img, num_anchors,\n            num_classes).\n        \"\"\"  # noqa: E501, W605\n        prob = cls_prob * (1 - box_prob)\n        # There are some cases when neg_prob = 0.\n        # This will cause the neg_prob.log() to be inf without clamp.\n        prob = prob.clamp(min=EPS, max=1 - EPS)\n        negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(\n            prob, torch.zeros_like(prob), reduction='none')\n        return (1 - self.alpha) * negative_bag_loss\n"
  },
  {
    "path": "mmdet/models/dense_heads/fsaf_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig\nfrom ..losses.accuracy import accuracy\nfrom ..losses.utils import weight_reduce_loss\nfrom ..task_modules.prior_generators import anchor_inside_flags\nfrom ..utils import images_to_levels, multi_apply, unmap\nfrom .retina_head import RetinaHead\n\n\n@MODELS.register_module()\nclass FSAFHead(RetinaHead):\n    \"\"\"Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_.\n\n    The head contains two subnetworks. The first classifies anchor boxes and\n    the second regresses deltas for the anchors (num_anchors is 1 for anchor-\n    free methods)\n\n    Args:\n        *args: Same as its base class in :class:`RetinaHead`\n        score_threshold (float, optional): The score_threshold to calculate\n            positive recall. If given, prediction scores lower than this value\n            is counted as incorrect prediction. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n        **kwargs: Same as its base class in :class:`RetinaHead`\n\n    Example:\n        >>> import torch\n        >>> self = FSAFHead(11, 7)\n        >>> x = torch.rand(1, 7, 32, 32)\n        >>> cls_score, bbox_pred = self.forward_single(x)\n        >>> # Each anchor predicts a score for each class except background\n        >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n        >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n        >>> assert cls_per_anchor == self.num_classes\n        >>> assert box_per_anchor == 4\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 score_threshold: Optional[float] = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        # The positive bias in self.retina_reg conv is to prevent predicted \\\n        #  bbox with 0 area\n        if init_cfg is None:\n            init_cfg = dict(\n                type='Normal',\n                layer='Conv2d',\n                std=0.01,\n                override=[\n                    dict(\n                        type='Normal',\n                        name='retina_cls',\n                        std=0.01,\n                        bias_prob=0.01),\n                    dict(\n                        type='Normal', name='retina_reg', std=0.01, bias=0.25)\n                ])\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n        self.score_threshold = score_threshold\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward feature map of a single scale level.\n\n        Args:\n            x (Tensor): Feature map of a single scale level.\n\n        Returns:\n            tuple[Tensor, Tensor]:\n\n            - cls_score (Tensor): Box scores for each scale level Has \\\n            shape (N, num_points * num_classes, H, W).\n            - bbox_pred (Tensor): Box energies / deltas for each scale \\\n            level with shape (N, num_points * 4, H, W).\n        \"\"\"\n        cls_score, bbox_pred = super().forward_single(x)\n        # relu: TBLR encoder only accepts positive bbox_pred\n        return cls_score, self.relu(bbox_pred)\n\n    def _get_targets_single(self,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Most of the codes are the same with the base class :obj: `AnchorHead`,\n        except that it also collects and returns the matched gt index in the\n        image (from 0 to num_gt-1). If the anchor bbox is not matched to any\n        gt, the corresponding value in pos_gt_inds is -1.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors, 4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors, ).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.  Defaults to True.\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # Assign gt and sample anchors\n        anchors = flat_anchors[inside_flags.type(torch.bool), :]\n\n        pred_instances = InstanceData(priors=anchors)\n        assign_result = self.assigner.assign(pred_instances, gt_instances,\n                                             gt_instances_ignore)\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(\n            (num_valid_anchors, self.cls_out_channels), dtype=torch.float)\n        pos_gt_inds = anchors.new_full((num_valid_anchors, ),\n                                       -1,\n                                       dtype=torch.long)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        if len(pos_inds) > 0:\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            else:\n                # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n                # is applied directly on the decoded bounding boxes, both\n                # the predicted boxes and regression targets should be with\n                # absolute coordinate format.\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            # The assigned gt_index for each anchor. (0-based)\n            pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # shadowed_labels is a tensor composed of tuples\n        #  (anchor_inds, class_label) that indicate those anchors lying in the\n        #  outer region of a gt or overlapped by another gt with a smaller\n        #  area.\n        #\n        # Therefore, only the shadowed labels are ignored for loss calculation.\n        # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`\n        shadowed_labels = assign_result.get_extra_property('shadowed_labels')\n        if shadowed_labels is not None and shadowed_labels.numel():\n            if len(shadowed_labels.shape) == 2:\n                idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]\n                assert (labels[idx_] != label_).all(), \\\n                    'One label cannot be both positive and ignored'\n                label_weights[idx_, label_] = 0\n            else:\n                label_weights[shadowed_labels] = 0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags,\n                fill=self.num_classes)  # fill bg label\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n            pos_gt_inds = unmap(\n                pos_gt_inds, num_total_anchors, inside_flags, fill=-1)\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds, sampling_result, pos_gt_inds)\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_points * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_points * 4, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        for i in range(len(bbox_preds)):  # loop over fpn level\n            # avoid 0 area of the predicted bbox\n            bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)\n        # TODO: It may directly use the base-class loss function.\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n        batch_size = len(batch_img_metas)\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            return_sampling_results=True)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor, sampling_results_list,\n         pos_assigned_gt_inds_list) = cls_reg_targets\n\n        num_gts = np.array(list(map(len, batch_gt_instances)))\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_by_feat_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            avg_factor=avg_factor)\n\n        # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned\n        # gt index of each anchor bbox in each fpn level.\n        cum_num_gts = list(np.cumsum(num_gts))  # length of batch_size\n        for i, assign in enumerate(pos_assigned_gt_inds_list):\n            # loop over fpn levels\n            for j in range(1, batch_size):\n                # loop over batch size\n                # Convert gt indices in each img to those in the batch\n                assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])\n            pos_assigned_gt_inds_list[i] = assign.flatten()\n            labels_list[i] = labels_list[i].flatten()\n        num_gts = num_gts.sum()  # total number of gt in the batch\n        # The unique label index of each gt in the batch\n        label_sequence = torch.arange(num_gts, device=device)\n        # Collect the average loss of each gt in each level\n        with torch.no_grad():\n            loss_levels, = multi_apply(\n                self.collect_loss_level_single,\n                losses_cls,\n                losses_bbox,\n                pos_assigned_gt_inds_list,\n                labels_seq=label_sequence)\n            # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level\n            loss_levels = torch.stack(loss_levels, dim=0)\n            # Locate the best fpn level for loss back-propagation\n            if loss_levels.numel() == 0:  # zero gt\n                argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)\n            else:\n                _, argmin = loss_levels.min(dim=0)\n\n        # Reweight the loss of each (anchor, label) pair, so that only those\n        #  at the best gt level are back-propagated.\n        losses_cls, losses_bbox, pos_inds = multi_apply(\n            self.reweight_loss_single,\n            losses_cls,\n            losses_bbox,\n            pos_assigned_gt_inds_list,\n            labels_list,\n            list(range(len(losses_cls))),\n            min_levels=argmin)\n        num_pos = torch.cat(pos_inds, 0).sum().float()\n        pos_recall = self.calculate_pos_recall(cls_scores, labels_list,\n                                               pos_inds)\n\n        if num_pos == 0:  # No gt\n            num_total_neg = sum(\n                [results.num_neg for results in sampling_results_list])\n            avg_factor = num_pos + num_total_neg\n        else:\n            avg_factor = num_pos\n        for i in range(len(losses_cls)):\n            losses_cls[i] /= avg_factor\n            losses_bbox[i] /= avg_factor\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            num_pos=num_pos / batch_size,\n            pos_recall=pos_recall)\n\n    def calculate_pos_recall(self, cls_scores: List[Tensor],\n                             labels_list: List[Tensor],\n                             pos_inds: List[Tensor]) -> Tensor:\n        \"\"\"Calculate positive recall with score threshold.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores at all fpn levels.\n                Each tensor is in shape (N, num_classes * num_anchors, H, W)\n            labels_list (list[Tensor]): The label that each anchor is assigned\n                to. Shape (N * H * W * num_anchors, )\n            pos_inds (list[Tensor]): List of bool tensors indicating whether\n                the anchor is assigned to a positive label.\n                Shape (N * H * W * num_anchors, )\n\n        Returns:\n            Tensor: A single float number indicating the positive recall.\n        \"\"\"\n        with torch.no_grad():\n            num_class = self.num_classes\n            scores = [\n                cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]\n                for cls, pos in zip(cls_scores, pos_inds)\n            ]\n            labels = [\n                label.reshape(-1)[pos]\n                for label, pos in zip(labels_list, pos_inds)\n            ]\n            scores = torch.cat(scores, dim=0)\n            labels = torch.cat(labels, dim=0)\n            if self.use_sigmoid_cls:\n                scores = scores.sigmoid()\n            else:\n                scores = scores.softmax(dim=1)\n\n            return accuracy(scores, labels, thresh=self.score_threshold)\n\n    def collect_loss_level_single(self, cls_loss: Tensor, reg_loss: Tensor,\n                                  assigned_gt_inds: Tensor,\n                                  labels_seq: Tensor) -> Tensor:\n        \"\"\"Get the average loss in each FPN level w.r.t. each gt label.\n\n        Args:\n            cls_loss (Tensor): Classification loss of each feature map pixel,\n              shape (num_anchor, num_class)\n            reg_loss (Tensor): Regression loss of each feature map pixel,\n              shape (num_anchor, 4)\n            assigned_gt_inds (Tensor): It indicates which gt the prior is\n              assigned to (0-based, -1: no assignment). shape (num_anchor),\n            labels_seq: The rank of labels. shape (num_gt)\n\n        Returns:\n            Tensor: shape (num_gt), average loss of each gt in this level\n        \"\"\"\n        if len(reg_loss.shape) == 2:  # iou loss has shape (num_prior, 4)\n            reg_loss = reg_loss.sum(dim=-1)  # sum loss in tblr dims\n        if len(cls_loss.shape) == 2:\n            cls_loss = cls_loss.sum(dim=-1)  # sum loss in class dims\n        loss = cls_loss + reg_loss\n        assert loss.size(0) == assigned_gt_inds.size(0)\n        # Default loss value is 1e6 for a layer where no anchor is positive\n        #  to ensure it will not be chosen to back-propagate gradient\n        losses_ = loss.new_full(labels_seq.shape, 1e6)\n        for i, l in enumerate(labels_seq):\n            match = assigned_gt_inds == l\n            if match.any():\n                losses_[i] = loss[match].mean()\n        return losses_,\n\n    def reweight_loss_single(self, cls_loss: Tensor, reg_loss: Tensor,\n                             assigned_gt_inds: Tensor, labels: Tensor,\n                             level: int, min_levels: Tensor) -> tuple:\n        \"\"\"Reweight loss values at each level.\n\n        Reassign loss values at each level by masking those where the\n        pre-calculated loss is too large. Then return the reduced losses.\n\n        Args:\n            cls_loss (Tensor): Element-wise classification loss.\n              Shape: (num_anchors, num_classes)\n            reg_loss (Tensor): Element-wise regression loss.\n              Shape: (num_anchors, 4)\n            assigned_gt_inds (Tensor): The gt indices that each anchor bbox\n              is assigned to. -1 denotes a negative anchor, otherwise it is the\n              gt index (0-based). Shape: (num_anchors, ),\n            labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).\n            level (int): The current level index in the pyramid\n              (0-4 for RetinaNet)\n            min_levels (Tensor): The best-matching level for each gt.\n              Shape: (num_gts, ),\n\n        Returns:\n            tuple:\n\n            - cls_loss: Reduced corrected classification loss. Scalar.\n            - reg_loss: Reduced corrected regression loss. Scalar.\n            - pos_flags (Tensor): Corrected bool tensor indicating the \\\n            final positive anchors. Shape: (num_anchors, ).\n        \"\"\"\n        loc_weight = torch.ones_like(reg_loss)\n        cls_weight = torch.ones_like(cls_loss)\n        pos_flags = assigned_gt_inds >= 0  # positive pixel flag\n        pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()\n\n        if pos_flags.any():  # pos pixels exist\n            pos_assigned_gt_inds = assigned_gt_inds[pos_flags]\n            zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)\n            neg_indices = pos_indices[zeroing_indices]\n\n            if neg_indices.numel():\n                pos_flags[neg_indices] = 0\n                loc_weight[neg_indices] = 0\n                # Only the weight corresponding to the label is\n                #  zeroed out if not selected\n                zeroing_labels = labels[neg_indices]\n                assert (zeroing_labels >= 0).all()\n                cls_weight[neg_indices, zeroing_labels] = 0\n\n        # Weighted loss for both cls and reg loss\n        cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')\n        reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')\n\n        return cls_loss, reg_loss, pos_flags\n"
  },
  {
    "path": "mmdet/models/dense_heads/ga_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import MaskedConv2d\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\nfrom .guided_anchor_head import FeatureAdaption, GuidedAnchorHead\n\n\n@MODELS.register_module()\nclass GARetinaHead(GuidedAnchorHead):\n    \"\"\"Guided-Anchor-based RetinaNet head.\"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        if init_cfg is None:\n            init_cfg = dict(\n                type='Normal',\n                layer='Conv2d',\n                std=0.01,\n                override=[\n                    dict(\n                        type='Normal',\n                        name='conv_loc',\n                        std=0.01,\n                        bias_prob=0.01),\n                    dict(\n                        type='Normal',\n                        name='retina_cls',\n                        std=0.01,\n                        bias_prob=0.01)\n                ])\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n        self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)\n        num_anchors = self.square_anchor_generator.num_base_priors[0]\n        self.conv_shape = nn.Conv2d(self.feat_channels, num_anchors * 2, 1)\n        self.feature_adaption_cls = FeatureAdaption(\n            self.feat_channels,\n            self.feat_channels,\n            kernel_size=3,\n            deform_groups=self.deform_groups)\n        self.feature_adaption_reg = FeatureAdaption(\n            self.feat_channels,\n            self.feat_channels,\n            kernel_size=3,\n            deform_groups=self.deform_groups)\n        self.retina_cls = MaskedConv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.retina_reg = MaskedConv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward feature map of a single scale level.\"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n\n        loc_pred = self.conv_loc(cls_feat)\n        shape_pred = self.conv_shape(reg_feat)\n\n        cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)\n        reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)\n\n        if not self.training:\n            mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr\n        else:\n            mask = None\n        cls_score = self.retina_cls(cls_feat, mask)\n        bbox_pred = self.retina_reg(reg_feat, mask)\n        return cls_score, bbox_pred, shape_pred, loc_pred\n"
  },
  {
    "path": "mmdet/models/dense_heads/ga_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.ops import nms\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList\nfrom .guided_anchor_head import GuidedAnchorHead\n\n\n@MODELS.register_module()\nclass GARPNHead(GuidedAnchorHead):\n    \"\"\"Guided-Anchor-based RPN head.\"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 num_classes: int = 1,\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_loc',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.rpn_conv = nn.Conv2d(\n            self.in_channels, self.feat_channels, 3, padding=1)\n        super(GARPNHead, self)._init_layers()\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward feature of a single scale level.\"\"\"\n\n        x = self.rpn_conv(x)\n        x = F.relu(x, inplace=True)\n        (cls_score, bbox_pred, shape_pred,\n         loc_pred) = super().forward_single(x)\n        return cls_score, bbox_pred, shape_pred, loc_pred\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            shape_preds: List[Tensor],\n            loc_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            shape_preds (list[Tensor]): shape predictions for each scale\n                level with shape (N, 1, H, W).\n            loc_preds (list[Tensor]): location predictions for each scale\n                level with shape (N, num_anchors * 2, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        losses = super().loss_by_feat(\n            cls_scores,\n            bbox_preds,\n            shape_preds,\n            loc_preds,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        return dict(\n            loss_rpn_cls=losses['loss_cls'],\n            loss_rpn_bbox=losses['loss_bbox'],\n            loss_anchor_shape=losses['loss_shape'],\n            loss_anchor_loc=losses['loss_loc'])\n\n    def _predict_by_feat_single(self,\n                                cls_scores: List[Tensor],\n                                bbox_preds: List[Tensor],\n                                mlvl_anchors: List[Tensor],\n                                mlvl_masks: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigType,\n                                rescale: bool = False) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            mlvl_anchors (list[Tensor]): Each element in the list is\n                the anchors of a single level in feature pyramid. it has\n                shape (num_priors, 4).\n            mlvl_masks (list[Tensor]): Each element in the list is location\n                masks of a single level.\n            img_meta (dict): Image meta info.\n            cfg (:obj:`ConfigDict` or dict): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4), the last\n              dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \\\n            'naive nms.'\n\n        mlvl_proposals = []\n        for idx in range(len(cls_scores)):\n            rpn_cls_score = cls_scores[idx]\n            rpn_bbox_pred = bbox_preds[idx]\n            anchors = mlvl_anchors[idx]\n            mask = mlvl_masks[idx]\n            assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]\n            # if no location is kept, end.\n            if mask.sum() == 0:\n                continue\n            rpn_cls_score = rpn_cls_score.permute(1, 2, 0)\n            if self.use_sigmoid_cls:\n                rpn_cls_score = rpn_cls_score.reshape(-1)\n                scores = rpn_cls_score.sigmoid()\n            else:\n                rpn_cls_score = rpn_cls_score.reshape(-1, 2)\n                # remind that we set FG labels to [0, num_class-1]\n                # since mmdet v2.0\n                # BG cat_id: num_class\n                scores = rpn_cls_score.softmax(dim=1)[:, :-1]\n            # filter scores, bbox_pred w.r.t. mask.\n            # anchors are filtered in get_anchors() beforehand.\n            scores = scores[mask]\n            rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,\n                                                                   4)[mask, :]\n            if scores.dim() == 0:\n                rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)\n                anchors = anchors.unsqueeze(0)\n                scores = scores.unsqueeze(0)\n            # filter anchors, bbox_pred, scores w.r.t. scores\n            if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:\n                _, topk_inds = scores.topk(cfg.nms_pre)\n                rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]\n                anchors = anchors[topk_inds, :]\n                scores = scores[topk_inds]\n            # get proposals w.r.t. anchors and rpn_bbox_pred\n            proposals = self.bbox_coder.decode(\n                anchors, rpn_bbox_pred, max_shape=img_meta['img_shape'])\n            # filter out too small bboxes\n            if cfg.min_bbox_size >= 0:\n                w = proposals[:, 2] - proposals[:, 0]\n                h = proposals[:, 3] - proposals[:, 1]\n                valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n                if not valid_mask.all():\n                    proposals = proposals[valid_mask]\n                    scores = scores[valid_mask]\n\n            # NMS in current level\n            proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)\n            proposals = proposals[:cfg.nms_post, :]\n            mlvl_proposals.append(proposals)\n        proposals = torch.cat(mlvl_proposals, 0)\n        if cfg.get('nms_across_levels', False):\n            # NMS across multi levels\n            proposals, _ = nms(proposals[:, :4], proposals[:, -1],\n                               cfg.nms.iou_threshold)\n            proposals = proposals[:cfg.max_per_img, :]\n        else:\n            scores = proposals[:, 4]\n            num = min(cfg.max_per_img, proposals.shape[0])\n            _, topk_inds = scores.topk(num)\n            proposals = proposals[topk_inds, :]\n\n        bboxes = proposals[:, :-1]\n        scores = proposals[:, -1]\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat(\n                (1, 2))\n\n        results = InstanceData()\n        results.bboxes = bboxes\n        results.scores = scores\n        results.labels = scores.new_zeros(scores.size(0), dtype=torch.long)\n        return results\n"
  },
  {
    "path": "mmdet/models/dense_heads/gfl_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..task_modules.prior_generators import anchor_inside_flags\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import (filter_scores_and_topk, images_to_levels, multi_apply,\n                     unmap)\nfrom .anchor_head import AnchorHead\n\n\nclass Integral(nn.Module):\n    \"\"\"A fixed layer for calculating integral result from distribution.\n\n    This layer calculates the target location by :math: ``sum{P(y_i) * y_i}``,\n    P(y_i) denotes the softmax vector that represents the discrete distribution\n    y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}\n\n    Args:\n        reg_max (int): The maximal value of the discrete set. Defaults to 16.\n            You may want to reset it according to your new dataset or related\n            settings.\n    \"\"\"\n\n    def __init__(self, reg_max: int = 16) -> None:\n        super().__init__()\n        self.reg_max = reg_max\n        self.register_buffer('project',\n                             torch.linspace(0, self.reg_max, self.reg_max + 1))\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward feature from the regression head to get integral result of\n        bounding box location.\n\n        Args:\n            x (Tensor): Features of the regression head, shape (N, 4*(n+1)),\n                n is self.reg_max.\n\n        Returns:\n            x (Tensor): Integral result of box locations, i.e., distance\n                offsets from the box center in four directions, shape (N, 4).\n        \"\"\"\n        x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)\n        x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)\n        return x\n\n\n@MODELS.register_module()\nclass GFLHead(AnchorHead):\n    \"\"\"Generalized Focal Loss: Learning Qualified and Distributed Bounding\n    Boxes for Dense Object Detection.\n\n    GFL head structure is similar with ATSS, however GFL uses\n    1) joint representation for classification and localization quality, and\n    2) flexible General distribution for bounding box locations,\n    which are supervised by\n    Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively\n\n    https://arxiv.org/abs/2006.04388\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Defaults to 4.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct\n            and config conv layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and\n            config norm layer. Default: dict(type='GN', num_groups=32,\n            requires_grad=True).\n        loss_qfl (:obj:`ConfigDict` or dict): Config of Quality Focal Loss\n            (QFL).\n        bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults\n             to 'DistancePointBBoxCoder'.\n        reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}``\n            in QFL setting. Defaults to 16.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`]): Initialization config dict.\n    Example:\n        >>> self = GFLHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_quality_score, bbox_pred = self.forward(feats)\n        >>> assert len(cls_quality_score) == len(self.scales)\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 loss_dfl: ConfigType = dict(\n                     type='DistributionFocalLoss', loss_weight=0.25),\n                 bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),\n                 reg_max: int = 16,\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='gfl_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.reg_max = reg_max\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            bbox_coder=bbox_coder,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            if self.train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler(context=self)\n\n        self.integral = Integral(self.reg_max)\n        self.loss_dfl = MODELS.build(loss_dfl)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU()\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        assert self.num_anchors == 1, 'anchor free version'\n        self.gfl_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.gfl_reg = nn.Conv2d(\n            self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n\n            - cls_scores (list[Tensor]): Classification and quality (IoU)\n              joint scores for all scale levels, each is a 4D-tensor,\n              the channel number is num_classes.\n            - bbox_preds (list[Tensor]): Box distribution logits for all\n              scale levels, each is a 4D-tensor, the channel number is\n              4*(n+1), n is max value of integral set.\n        \"\"\"\n        return multi_apply(self.forward_single, x, self.scales)\n\n    def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n\n        Returns:\n            tuple:\n\n            - cls_score (Tensor): Cls and quality joint scores for a single\n              scale level the channel number is num_classes.\n            - bbox_pred (Tensor): Box distribution logits for a single scale\n              level, the channel number is 4*(n+1), n is max value of\n              integral set.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.gfl_cls(cls_feat)\n        bbox_pred = scale(self.gfl_reg(reg_feat)).float()\n        return cls_score, bbox_pred\n\n    def anchor_center(self, anchors: Tensor) -> Tensor:\n        \"\"\"Get anchor centers from anchors.\n\n        Args:\n            anchors (Tensor): Anchor list with shape (N, 4), ``xyxy`` format.\n\n        Returns:\n            Tensor: Anchor centers with shape (N, 2), ``xy`` format.\n        \"\"\"\n        anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2\n        anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2\n        return torch.stack([anchors_cx, anchors_cy], dim=-1)\n\n    def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n                            bbox_pred: Tensor, labels: Tensor,\n                            label_weights: Tensor, bbox_targets: Tensor,\n                            stride: Tuple[int], avg_factor: int) -> dict:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            cls_score (Tensor): Cls and quality joint scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_pred (Tensor): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            stride (Tuple[int]): Stride in this scale level.\n            avg_factor (int): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        bbox_pred = bbox_pred.permute(0, 2, 3,\n                                      1).reshape(-1, 4 * (self.reg_max + 1))\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n        score = label_weights.new_zeros(labels.shape)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n            pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]\n\n            weight_targets = cls_score.detach().sigmoid()\n            weight_targets = weight_targets.max(dim=1)[0][pos_inds]\n            pos_bbox_pred_corners = self.integral(pos_bbox_pred)\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchor_centers, pos_bbox_pred_corners)\n            pos_decode_bbox_targets = pos_bbox_targets / stride[0]\n            score[pos_inds] = bbox_overlaps(\n                pos_decode_bbox_pred.detach(),\n                pos_decode_bbox_targets,\n                is_aligned=True)\n            pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)\n            target_corners = self.bbox_coder.encode(pos_anchor_centers,\n                                                    pos_decode_bbox_targets,\n                                                    self.reg_max).reshape(-1)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=weight_targets,\n                avg_factor=1.0)\n\n            # dfl loss\n            loss_dfl = self.loss_dfl(\n                pred_corners,\n                target_corners,\n                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n                avg_factor=4.0)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            loss_dfl = bbox_pred.sum() * 0\n            weight_targets = bbox_pred.new_tensor(0)\n\n        # cls (qfl) loss\n        loss_cls = self.loss_cls(\n            cls_score, (labels, score),\n            weight=label_weights,\n            avg_factor=avg_factor)\n\n        return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_reg_targets\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n        losses_cls, losses_bbox, losses_dfl,\\\n            avg_factor = multi_apply(\n                self.loss_by_feat_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                self.prior_generator.strides,\n                avg_factor=avg_factor)\n\n        avg_factor = sum(avg_factor)\n        avg_factor = reduce_mean(avg_factor).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))\n        losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image. GFL head does not need this value.\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (:obj: `ConfigDict`): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n            is False and mlvl_score_factor is None, return mlvl_bboxes and\n            mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n            mlvl_score_factor. Usually with_nms is False is used for aug\n            test. If with_nms is True, then return the following format\n\n            - det_bboxes (Tensor): Predicted bboxes with shape\n              [num_bboxes, 5], where the first 4 columns are bounding\n              box positions (tl_x, tl_y, br_x, br_y) and the 5-th\n              column are scores between 0 and 1.\n            - det_labels (Tensor): Predicted labels of the corresponding\n              box with shape [num_bboxes].\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate(\n                zip(cls_score_list, bbox_pred_list,\n                    self.prior_generator.strides, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            assert stride[0] == stride[1]\n\n            bbox_pred = bbox_pred.permute(1, 2, 0)\n            bbox_pred = self.integral(bbox_pred) * stride[0]\n\n            scores = cls_score.permute(1, 2, 0).reshape(\n                -1, self.cls_out_channels).sigmoid()\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, _, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            bboxes = self.bbox_coder.decode(\n                self.anchor_center(priors), bbox_pred, max_shape=img_shape)\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bboxes)\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n    def get_targets(self,\n                    anchor_list: List[Tensor],\n                    valid_flag_list: List[Tensor],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs=True) -> tuple:\n        \"\"\"Get targets for GFL head.\n\n        This method is almost the same as `AnchorHead.get_targets()`. Besides\n        returning the targets as the parent method does, it also returns the\n        anchors as the first element of the returned tuple.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list,\n         sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             anchor_list,\n             valid_flag_list,\n             num_level_anchors_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             unmap_outputs=unmap_outputs)\n        # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n        # When using sampling method, avg_factor is usually the sum of\n        # positive and negative priors. When using `PseudoSampler`,\n        # `avg_factor` is usually equal to the number of positive priors.\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, avg_factor)\n\n    def _get_targets_single(self,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            num_level_anchors: List[int],\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors, 4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            num_level_anchors (list[int]): Number of anchors of each scale\n                level.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n\n            - anchors (Tensor): All anchors in the image with shape (N, 4).\n            - labels (Tensor): Labels of all anchors in the image with\n              shape (N,).\n            - label_weights (Tensor): Label weights of all anchor in the\n              image with shape (N,).\n            - bbox_targets (Tensor): BBox targets of all anchors in the\n              image with shape (N, 4).\n            - bbox_weights (Tensor): BBox weights of all anchors in the\n              image with shape (N, 4).\n            - pos_inds (Tensor): Indices of positive anchor with shape\n              (num_pos,).\n            - neg_inds (Tensor): Indices of negative anchor with shape\n              (num_neg,).\n            - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n        num_level_anchors_inside = self.get_num_level_anchors_inside(\n            num_level_anchors, inside_flags)\n        pred_instances = InstanceData(priors=anchors)\n        assign_result = self.assigner.assign(\n            pred_instances=pred_instances,\n            num_level_priors=num_level_anchors_inside,\n            gt_instances=gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n\n        sampling_result = self.sampler.sample(\n            assign_result=assign_result,\n            pred_instances=pred_instances,\n            gt_instances=gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n                pos_inds, neg_inds, sampling_result)\n\n    def get_num_level_anchors_inside(self, num_level_anchors: List[int],\n                                     inside_flags: Tensor) -> List[int]:\n        \"\"\"Get the number of valid anchors in every level.\"\"\"\n\n        split_inside_flags = torch.split(inside_flags, num_level_anchors)\n        num_level_anchors_inside = [\n            int(flags.sum()) for flags in split_inside_flags\n        ]\n        return num_level_anchors_inside\n"
  },
  {
    "path": "mmdet/models/dense_heads/guided_anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.ops import DeformConv2d, MaskedConv2d\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptInstanceList)\nfrom ..layers import multiclass_nms\nfrom ..task_modules.prior_generators import anchor_inside_flags, calc_region\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import images_to_levels, multi_apply, unmap\nfrom .anchor_head import AnchorHead\n\n\nclass FeatureAdaption(BaseModule):\n    \"\"\"Feature Adaption Module.\n\n    Feature Adaption Module is implemented based on DCN v1.\n    It uses anchor shape prediction rather than feature map to\n    predict offsets of deform conv layer.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        out_channels (int): Number of channels in the output feature map.\n        kernel_size (int): Deformable conv kernel size. Defaults to 3.\n        deform_groups (int): Deformable conv group size. Defaults to 4.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \\\n            list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        out_channels: int,\n        kernel_size: int = 3,\n        deform_groups: int = 4,\n        init_cfg: MultiConfig = dict(\n            type='Normal',\n            layer='Conv2d',\n            std=0.1,\n            override=dict(type='Normal', name='conv_adaption', std=0.01))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        offset_channels = kernel_size * kernel_size * 2\n        self.conv_offset = nn.Conv2d(\n            2, deform_groups * offset_channels, 1, bias=False)\n        self.conv_adaption = DeformConv2d(\n            in_channels,\n            out_channels,\n            kernel_size=kernel_size,\n            padding=(kernel_size - 1) // 2,\n            deform_groups=deform_groups)\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x: Tensor, shape: Tensor) -> Tensor:\n        offset = self.conv_offset(shape.detach())\n        x = self.relu(self.conv_adaption(x, offset))\n        return x\n\n\n@MODELS.register_module()\nclass GuidedAnchorHead(AnchorHead):\n    \"\"\"Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).\n\n    This GuidedAnchorHead will predict high-quality feature guided\n    anchors and locations where anchors will be kept in inference.\n    There are mainly 3 categories of bounding-boxes.\n\n    - Sampled 9 pairs for target assignment. (approxes)\n    - The square boxes where the predicted anchors are based on. (squares)\n    - Guided anchors.\n\n    Please refer to https://arxiv.org/abs/1901.03278 for more details.\n\n    Args:\n        num_classes (int): Number of classes.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Defaults to 256.\n        approx_anchor_generator (:obj:`ConfigDict` or dict): Config dict\n            for approx generator\n        square_anchor_generator (:obj:`ConfigDict` or dict): Config dict\n            for square generator\n        anchor_coder (:obj:`ConfigDict` or dict): Config dict for anchor coder\n        bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Defaults to False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        deform_groups: (int): Group number of DCN in FeatureAdaption module.\n            Defaults to 4.\n        loc_filter_thr (float): Threshold to filter out unconcerned regions.\n            Defaults to 0.01.\n        loss_loc (:obj:`ConfigDict` or dict): Config of location loss.\n        loss_shape (:obj:`ConfigDict` or dict): Config of anchor shape loss.\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of bbox regression loss.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \\\n            list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int,\n        feat_channels: int = 256,\n        approx_anchor_generator: ConfigType = dict(\n            type='AnchorGenerator',\n            octave_base_scale=8,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        square_anchor_generator: ConfigType = dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            scales=[8],\n            strides=[4, 8, 16, 32, 64]),\n        anchor_coder: ConfigType = dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        bbox_coder: ConfigType = dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        reg_decoded_bbox: bool = False,\n        deform_groups: int = 4,\n        loc_filter_thr: float = 0.01,\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        loss_loc: ConfigType = dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_shape: ConfigType = dict(\n            type='BoundedIoULoss', beta=0.2, loss_weight=1.0),\n        loss_cls: ConfigType = dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox: ConfigType = dict(\n            type='SmoothL1Loss', beta=1.0, loss_weight=1.0),\n        init_cfg: MultiConfig = dict(\n            type='Normal',\n            layer='Conv2d',\n            std=0.01,\n            override=dict(\n                type='Normal', name='conv_loc', std=0.01, lbias_prob=0.01))\n    ) -> None:\n        super(AnchorHead, self).__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.deform_groups = deform_groups\n        self.loc_filter_thr = loc_filter_thr\n\n        # build approx_anchor_generator and square_anchor_generator\n        assert (approx_anchor_generator['octave_base_scale'] ==\n                square_anchor_generator['scales'][0])\n        assert (approx_anchor_generator['strides'] ==\n                square_anchor_generator['strides'])\n        self.approx_anchor_generator = TASK_UTILS.build(\n            approx_anchor_generator)\n        self.square_anchor_generator = TASK_UTILS.build(\n            square_anchor_generator)\n        self.approxs_per_octave = self.approx_anchor_generator \\\n            .num_base_priors[0]\n\n        self.reg_decoded_bbox = reg_decoded_bbox\n\n        # one anchor per location\n        self.num_base_priors = self.square_anchor_generator.num_base_priors[0]\n\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = self.num_classes\n        else:\n            self.cls_out_channels = self.num_classes + 1\n\n        # build bbox_coder\n        self.anchor_coder = TASK_UTILS.build(anchor_coder)\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n\n        # build losses\n        self.loss_loc = MODELS.build(loss_loc)\n        self.loss_shape = MODELS.build(loss_shape)\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox = MODELS.build(loss_bbox)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            # use PseudoSampler when no sampler in train_cfg\n            if train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler()\n\n            self.ga_assigner = TASK_UTILS.build(self.train_cfg['ga_assigner'])\n            if train_cfg.get('ga_sampler', None) is not None:\n                self.ga_sampler = TASK_UTILS.build(\n                    self.train_cfg['ga_sampler'],\n                    default_args=dict(context=self))\n            else:\n                self.ga_sampler = PseudoSampler()\n\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)\n        self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2,\n                                    1)\n        self.feature_adaption = FeatureAdaption(\n            self.in_channels,\n            self.feat_channels,\n            kernel_size=3,\n            deform_groups=self.deform_groups)\n        self.conv_cls = MaskedConv2d(\n            self.feat_channels, self.num_base_priors * self.cls_out_channels,\n            1)\n        self.conv_reg = MaskedConv2d(self.feat_channels,\n                                     self.num_base_priors * 4, 1)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward feature of a single scale level.\"\"\"\n        loc_pred = self.conv_loc(x)\n        shape_pred = self.conv_shape(x)\n        x = self.feature_adaption(x, shape_pred)\n        # masked conv is only used during inference for speed-up\n        if not self.training:\n            mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr\n        else:\n            mask = None\n        cls_score = self.conv_cls(x, mask)\n        bbox_pred = self.conv_reg(x, mask)\n        return cls_score, bbox_pred, shape_pred, loc_pred\n\n    def forward(self, x: List[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\"\"\"\n        return multi_apply(self.forward_single, x)\n\n    def get_sampled_approxs(self,\n                            featmap_sizes: List[Tuple[int, int]],\n                            batch_img_metas: List[dict],\n                            device: str = 'cuda') -> tuple:\n        \"\"\"Get sampled approxs and inside flags according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            batch_img_metas (list[dict]): Image meta info.\n            device (str): device for returned tensors\n\n        Returns:\n            tuple: approxes of each image, inside flags of each image\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # approxes for one time\n        multi_level_approxs = self.approx_anchor_generator.grid_priors(\n            featmap_sizes, device=device)\n        approxs_list = [multi_level_approxs for _ in range(num_imgs)]\n\n        # for each image, we compute inside flags of multi level approxes\n        inside_flag_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            multi_level_flags = []\n            multi_level_approxs = approxs_list[img_id]\n\n            # obtain valid flags for each approx first\n            multi_level_approx_flags = self.approx_anchor_generator \\\n                .valid_flags(featmap_sizes,\n                             img_meta['pad_shape'],\n                             device=device)\n\n            for i, flags in enumerate(multi_level_approx_flags):\n                approxs = multi_level_approxs[i]\n                inside_flags_list = []\n                for j in range(self.approxs_per_octave):\n                    split_valid_flags = flags[j::self.approxs_per_octave]\n                    split_approxs = approxs[j::self.approxs_per_octave, :]\n                    inside_flags = anchor_inside_flags(\n                        split_approxs, split_valid_flags,\n                        img_meta['img_shape'][:2],\n                        self.train_cfg['allowed_border'])\n                    inside_flags_list.append(inside_flags)\n                # inside_flag for a position is true if any anchor in this\n                # position is true\n                inside_flags = (\n                    torch.stack(inside_flags_list, 0).sum(dim=0) > 0)\n                multi_level_flags.append(inside_flags)\n            inside_flag_list.append(multi_level_flags)\n        return approxs_list, inside_flag_list\n\n    def get_anchors(self,\n                    featmap_sizes: List[Tuple[int, int]],\n                    shape_preds: List[Tensor],\n                    loc_preds: List[Tensor],\n                    batch_img_metas: List[dict],\n                    use_loc_filter: bool = False,\n                    device: str = 'cuda') -> tuple:\n        \"\"\"Get squares according to feature map sizes and guided anchors.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            shape_preds (list[tensor]): Multi-level shape predictions.\n            loc_preds (list[tensor]): Multi-level location predictions.\n            batch_img_metas (list[dict]): Image meta info.\n            use_loc_filter (bool): Use loc filter or not. Defaults to False\n            device (str): device for returned tensors.\n                Defaults to `cuda`.\n\n        Returns:\n            tuple: square approxs of each image, guided anchors of each image,\n            loc masks of each image.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        num_levels = len(featmap_sizes)\n\n        # since feature map sizes of all images are the same, we only compute\n        # squares for one time\n        multi_level_squares = self.square_anchor_generator.grid_priors(\n            featmap_sizes, device=device)\n        squares_list = [multi_level_squares for _ in range(num_imgs)]\n\n        # for each image, we compute multi level guided anchors\n        guided_anchors_list = []\n        loc_mask_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            multi_level_guided_anchors = []\n            multi_level_loc_mask = []\n            for i in range(num_levels):\n                squares = squares_list[img_id][i]\n                shape_pred = shape_preds[i][img_id]\n                loc_pred = loc_preds[i][img_id]\n                guided_anchors, loc_mask = self._get_guided_anchors_single(\n                    squares,\n                    shape_pred,\n                    loc_pred,\n                    use_loc_filter=use_loc_filter)\n                multi_level_guided_anchors.append(guided_anchors)\n                multi_level_loc_mask.append(loc_mask)\n            guided_anchors_list.append(multi_level_guided_anchors)\n            loc_mask_list.append(multi_level_loc_mask)\n        return squares_list, guided_anchors_list, loc_mask_list\n\n    def _get_guided_anchors_single(\n            self,\n            squares: Tensor,\n            shape_pred: Tensor,\n            loc_pred: Tensor,\n            use_loc_filter: bool = False) -> Tuple[Tensor]:\n        \"\"\"Get guided anchors and loc masks for a single level.\n\n        Args:\n            squares (tensor): Squares of a single level.\n            shape_pred (tensor): Shape predictions of a single level.\n            loc_pred (tensor): Loc predictions of a single level.\n            use_loc_filter (list[tensor]): Use loc filter or not.\n                Defaults to False.\n\n        Returns:\n            tuple: guided anchors, location masks\n        \"\"\"\n        # calculate location filtering mask\n        loc_pred = loc_pred.sigmoid().detach()\n        if use_loc_filter:\n            loc_mask = loc_pred >= self.loc_filter_thr\n        else:\n            loc_mask = loc_pred >= 0.0\n        mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors)\n        mask = mask.contiguous().view(-1)\n        # calculate guided anchors\n        squares = squares[mask]\n        anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(\n            -1, 2).detach()[mask]\n        bbox_deltas = anchor_deltas.new_full(squares.size(), 0)\n        bbox_deltas[:, 2:] = anchor_deltas\n        guided_anchors = self.anchor_coder.decode(\n            squares, bbox_deltas, wh_ratio_clip=1e-6)\n        return guided_anchors, mask\n\n    def ga_loc_targets(self, batch_gt_instances: InstanceList,\n                       featmap_sizes: List[Tuple[int, int]]) -> tuple:\n        \"\"\"Compute location targets for guided anchoring.\n\n        Each feature map is divided into positive, negative and ignore regions.\n        - positive regions: target 1, weight 1\n        - ignore regions: target 0, weight 0\n        - negative regions: target 0, weight 0.1\n\n        Args:\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            featmap_sizes (list[tuple]): Multi level sizes of each feature\n                maps.\n\n        Returns:\n            tuple: Returns a tuple containing location targets.\n        \"\"\"\n        anchor_scale = self.approx_anchor_generator.octave_base_scale\n        anchor_strides = self.approx_anchor_generator.strides\n        # Currently only supports same stride in x and y direction.\n        for stride in anchor_strides:\n            assert (stride[0] == stride[1])\n        anchor_strides = [stride[0] for stride in anchor_strides]\n\n        center_ratio = self.train_cfg['center_ratio']\n        ignore_ratio = self.train_cfg['ignore_ratio']\n        img_per_gpu = len(batch_gt_instances)\n        num_lvls = len(featmap_sizes)\n        r1 = (1 - center_ratio) / 2\n        r2 = (1 - ignore_ratio) / 2\n        all_loc_targets = []\n        all_loc_weights = []\n        all_ignore_map = []\n        for lvl_id in range(num_lvls):\n            h, w = featmap_sizes[lvl_id]\n            loc_targets = torch.zeros(\n                img_per_gpu,\n                1,\n                h,\n                w,\n                device=batch_gt_instances[0].bboxes.device,\n                dtype=torch.float32)\n            loc_weights = torch.full_like(loc_targets, -1)\n            ignore_map = torch.zeros_like(loc_targets)\n            all_loc_targets.append(loc_targets)\n            all_loc_weights.append(loc_weights)\n            all_ignore_map.append(ignore_map)\n        for img_id in range(img_per_gpu):\n            gt_bboxes = batch_gt_instances[img_id].bboxes\n            scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                               (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n            min_anchor_size = scale.new_full(\n                (1, ), float(anchor_scale * anchor_strides[0]))\n            # assign gt bboxes to different feature levels w.r.t. their scales\n            target_lvls = torch.floor(\n                torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)\n            target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()\n            for gt_id in range(gt_bboxes.size(0)):\n                lvl = target_lvls[gt_id].item()\n                # rescaled to corresponding feature map\n                gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]\n                # calculate ignore regions\n                ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(\n                    gt_, r2, featmap_sizes[lvl])\n                # calculate positive (center) regions\n                ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(\n                    gt_, r1, featmap_sizes[lvl])\n                all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,\n                                     ctr_x1:ctr_x2 + 1] = 1\n                all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,\n                                     ignore_x1:ignore_x2 + 1] = 0\n                all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,\n                                     ctr_x1:ctr_x2 + 1] = 1\n                # calculate ignore map on nearby low level feature\n                if lvl > 0:\n                    d_lvl = lvl - 1\n                    # rescaled to corresponding feature map\n                    gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]\n                    ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(\n                        gt_, r2, featmap_sizes[d_lvl])\n                    all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,\n                                          ignore_x1:ignore_x2 + 1] = 1\n                # calculate ignore map on nearby high level feature\n                if lvl < num_lvls - 1:\n                    u_lvl = lvl + 1\n                    # rescaled to corresponding feature map\n                    gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]\n                    ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(\n                        gt_, r2, featmap_sizes[u_lvl])\n                    all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,\n                                          ignore_x1:ignore_x2 + 1] = 1\n        for lvl_id in range(num_lvls):\n            # ignore negative regions w.r.t. ignore map\n            all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)\n                                    & (all_ignore_map[lvl_id] > 0)] = 0\n            # set negative regions with weight 0.1\n            all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1\n        # loc average factor to balance loss\n        loc_avg_factor = sum(\n            [t.size(0) * t.size(-1) * t.size(-2)\n             for t in all_loc_targets]) / 200\n        return all_loc_targets, all_loc_weights, loc_avg_factor\n\n    def _ga_shape_target_single(self,\n                                flat_approxs: Tensor,\n                                inside_flags: Tensor,\n                                flat_squares: Tensor,\n                                gt_instances: InstanceData,\n                                gt_instances_ignore: Optional[InstanceData],\n                                img_meta: dict,\n                                unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute guided anchoring targets.\n\n        This function returns sampled anchors and gt bboxes directly\n        rather than calculates regression targets.\n\n        Args:\n            flat_approxs (Tensor): flat approxs of a single image,\n                shape (n, 4)\n            inside_flags (Tensor): inside flags of a single image,\n                shape (n, ).\n            flat_squares (Tensor): flat squares of a single image,\n                shape (approxs_per_octave * n, 4)\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n            img_meta (dict): Meta info of a single image.\n            unmap_outputs (bool): unmap outputs or not.\n\n        Returns:\n            tuple: Returns a tuple containing shape targets of each image.\n        \"\"\"\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        num_square = flat_squares.size(0)\n        approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4)\n        approxs = approxs[inside_flags, ...]\n        squares = flat_squares[inside_flags, :]\n\n        pred_instances = InstanceData()\n        pred_instances.priors = squares\n        pred_instances.approxs = approxs\n\n        assign_result = self.ga_assigner.assign(\n            pred_instances=pred_instances,\n            gt_instances=gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n        sampling_result = self.ga_sampler.sample(\n            assign_result=assign_result,\n            pred_instances=pred_instances,\n            gt_instances=gt_instances)\n\n        bbox_anchors = torch.zeros_like(squares)\n        bbox_gts = torch.zeros_like(squares)\n        bbox_weights = torch.zeros_like(squares)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes\n            bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes\n            bbox_weights[pos_inds, :] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_squares.size(0)\n            bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)\n            bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds,\n                sampling_result)\n\n    def ga_shape_targets(self,\n                         approx_list: List[List[Tensor]],\n                         inside_flag_list: List[List[Tensor]],\n                         square_list: List[List[Tensor]],\n                         batch_gt_instances: InstanceList,\n                         batch_img_metas: List[dict],\n                         batch_gt_instances_ignore: OptInstanceList = None,\n                         unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute guided anchoring targets.\n\n        Args:\n            approx_list (list[list[Tensor]]): Multi level approxs of each\n                image.\n            inside_flag_list (list[list[Tensor]]): Multi level inside flags\n                of each image.\n            square_list (list[list[Tensor]]): Multi level squares of each\n                image.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): unmap outputs or not. Defaults to None.\n\n        Returns:\n            tuple:  Returns a tuple containing shape targets.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(approx_list) == len(inside_flag_list) == len(\n            square_list) == num_imgs\n        # anchor number of multi levels\n        num_level_squares = [squares.size(0) for squares in square_list[0]]\n        # concat all level anchors and flags to a single tensor\n        inside_flag_flat_list = []\n        approx_flat_list = []\n        square_flat_list = []\n        for i in range(num_imgs):\n            assert len(square_list[i]) == len(inside_flag_list[i])\n            inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))\n            approx_flat_list.append(torch.cat(approx_list[i]))\n            square_flat_list.append(torch.cat(square_list[i]))\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None for _ in range(num_imgs)]\n        (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,\n         neg_inds_list, sampling_results_list) = multi_apply(\n             self._ga_shape_target_single,\n             approx_flat_list,\n             inside_flag_flat_list,\n             square_flat_list,\n             batch_gt_instances,\n             batch_gt_instances_ignore,\n             batch_img_metas,\n             unmap_outputs=unmap_outputs)\n        # sampled anchors of all images\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        bbox_anchors_list = images_to_levels(all_bbox_anchors,\n                                             num_level_squares)\n        bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_squares)\n        return (bbox_anchors_list, bbox_gts_list, bbox_weights_list,\n                avg_factor)\n\n    def loss_shape_single(self, shape_pred: Tensor, bbox_anchors: Tensor,\n                          bbox_gts: Tensor, anchor_weights: Tensor,\n                          avg_factor: int) -> Tensor:\n        \"\"\"Compute shape loss in single level.\"\"\"\n        shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)\n        bbox_anchors = bbox_anchors.contiguous().view(-1, 4)\n        bbox_gts = bbox_gts.contiguous().view(-1, 4)\n        anchor_weights = anchor_weights.contiguous().view(-1, 4)\n        bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)\n        bbox_deltas[:, 2:] += shape_pred\n        # filter out negative samples to speed-up weighted_bounded_iou_loss\n        inds = torch.nonzero(\n            anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1)\n        bbox_deltas_ = bbox_deltas[inds]\n        bbox_anchors_ = bbox_anchors[inds]\n        bbox_gts_ = bbox_gts[inds]\n        anchor_weights_ = anchor_weights[inds]\n        pred_anchors_ = self.anchor_coder.decode(\n            bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6)\n        loss_shape = self.loss_shape(\n            pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=avg_factor)\n        return loss_shape\n\n    def loss_loc_single(self, loc_pred: Tensor, loc_target: Tensor,\n                        loc_weight: Tensor, avg_factor: float) -> Tensor:\n        \"\"\"Compute location loss in single level.\"\"\"\n        loss_loc = self.loss_loc(\n            loc_pred.reshape(-1, 1),\n            loc_target.reshape(-1).long(),\n            loc_weight.reshape(-1),\n            avg_factor=avg_factor)\n        return loss_loc\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            shape_preds: List[Tensor],\n            loc_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            shape_preds (list[Tensor]): shape predictions for each scale\n                level with shape (N, 1, H, W).\n            loc_preds (list[Tensor]): location predictions for each scale\n                level with shape (N, num_anchors * 2, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.approx_anchor_generator.num_levels\n\n        device = cls_scores[0].device\n\n        # get loc targets\n        loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets(\n            batch_gt_instances, featmap_sizes)\n\n        # get sampled approxes\n        approxs_list, inside_flag_list = self.get_sampled_approxs(\n            featmap_sizes, batch_img_metas, device=device)\n        # get squares and guided anchors\n        squares_list, guided_anchors_list, _ = self.get_anchors(\n            featmap_sizes,\n            shape_preds,\n            loc_preds,\n            batch_img_metas,\n            device=device)\n\n        # get shape targets\n        shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list,\n                                              squares_list, batch_gt_instances,\n                                              batch_img_metas)\n        (bbox_anchors_list, bbox_gts_list, anchor_weights_list,\n         ga_avg_factor) = shape_targets\n\n        # get anchor targets\n        cls_reg_targets = self.get_targets(\n            guided_anchors_list,\n            inside_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor) = cls_reg_targets\n\n        # anchor number of multi levels\n        num_level_anchors = [\n            anchors.size(0) for anchors in guided_anchors_list[0]\n        ]\n        # concat all level anchors to a single tensor\n        concat_anchor_list = []\n        for i in range(len(guided_anchors_list)):\n            concat_anchor_list.append(torch.cat(guided_anchors_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        # get classification and bbox regression losses\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_by_feat_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            avg_factor=avg_factor)\n\n        # get anchor location loss\n        losses_loc = []\n        for i in range(len(loc_preds)):\n            loss_loc = self.loss_loc_single(\n                loc_preds[i],\n                loc_targets[i],\n                loc_weights[i],\n                avg_factor=loc_avg_factor)\n            losses_loc.append(loss_loc)\n\n        # get anchor shape loss\n        losses_shape = []\n        for i in range(len(shape_preds)):\n            loss_shape = self.loss_shape_single(\n                shape_preds[i],\n                bbox_anchors_list[i],\n                bbox_gts_list[i],\n                anchor_weights_list[i],\n                avg_factor=ga_avg_factor)\n            losses_shape.append(loss_shape)\n\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            loss_shape=losses_shape,\n            loss_loc=losses_loc)\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        shape_preds: List[Tensor],\n                        loc_preds: List[Tensor],\n                        batch_img_metas: List[dict],\n                        cfg: OptConfigType = None,\n                        rescale: bool = False) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            shape_preds (list[Tensor]): shape predictions for each scale\n                level with shape (N, 1, H, W).\n            loc_preds (list[Tensor]): location predictions for each scale\n                level with shape (N, num_anchors * 2, H, W).\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n                Defaults to None.\n            cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4), the last\n              dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(\n            loc_preds)\n        num_levels = len(cls_scores)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        device = cls_scores[0].device\n        # get guided anchors\n        _, guided_anchors, loc_masks = self.get_anchors(\n            featmap_sizes,\n            shape_preds,\n            loc_preds,\n            batch_img_metas,\n            use_loc_filter=not self.training,\n            device=device)\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_score_list = [\n                cls_scores[i][img_id].detach() for i in range(num_levels)\n            ]\n            bbox_pred_list = [\n                bbox_preds[i][img_id].detach() for i in range(num_levels)\n            ]\n            guided_anchor_list = [\n                guided_anchors[img_id][i].detach() for i in range(num_levels)\n            ]\n            loc_mask_list = [\n                loc_masks[img_id][i].detach() for i in range(num_levels)\n            ]\n            proposals = self._predict_by_feat_single(\n                cls_scores=cls_score_list,\n                bbox_preds=bbox_pred_list,\n                mlvl_anchors=guided_anchor_list,\n                mlvl_masks=loc_mask_list,\n                img_meta=batch_img_metas[img_id],\n                cfg=cfg,\n                rescale=rescale)\n            result_list.append(proposals)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_scores: List[Tensor],\n                                bbox_preds: List[Tensor],\n                                mlvl_anchors: List[Tensor],\n                                mlvl_masks: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigType,\n                                rescale: bool = False) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            mlvl_anchors (list[Tensor]): Each element in the list is\n                the anchors of a single level in feature pyramid. it has\n                shape (num_priors, 4).\n            mlvl_masks (list[Tensor]): Each element in the list is location\n                masks of a single level.\n            img_meta (dict): Image meta info.\n            cfg (:obj:`ConfigDict` or dict): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4), the last\n              dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)\n        mlvl_bbox_preds = []\n        mlvl_valid_anchors = []\n        mlvl_scores = []\n        for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,\n                                                       mlvl_anchors,\n                                                       mlvl_masks):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            # if no location is kept, end.\n            if mask.sum() == 0:\n                continue\n            # reshape scores and bbox_pred\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            # filter scores, bbox_pred w.r.t. mask.\n            # anchors are filtered in get_anchors() beforehand.\n            scores = scores[mask, :]\n            bbox_pred = bbox_pred[mask, :]\n            if scores.dim() == 0:\n                anchors = anchors.unsqueeze(0)\n                scores = scores.unsqueeze(0)\n                bbox_pred = bbox_pred.unsqueeze(0)\n            # filter anchors, bbox_pred, scores w.r.t. scores\n            nms_pre = cfg.get('nms_pre', -1)\n            if nms_pre > 0 and scores.shape[0] > nms_pre:\n                if self.use_sigmoid_cls:\n                    max_scores, _ = scores.max(dim=1)\n                else:\n                    # remind that we set FG labels to [0, num_class-1]\n                    # since mmdet v2.0\n                    # BG cat_id: num_class\n                    max_scores, _ = scores[:, :-1].max(dim=1)\n                _, topk_inds = max_scores.topk(nms_pre)\n                anchors = anchors[topk_inds, :]\n                bbox_pred = bbox_pred[topk_inds, :]\n                scores = scores[topk_inds, :]\n\n            mlvl_bbox_preds.append(bbox_pred)\n            mlvl_valid_anchors.append(anchors)\n            mlvl_scores.append(scores)\n\n        mlvl_bbox_preds = torch.cat(mlvl_bbox_preds)\n        mlvl_anchors = torch.cat(mlvl_valid_anchors)\n        mlvl_scores = torch.cat(mlvl_scores)\n        mlvl_bboxes = self.bbox_coder.decode(\n            mlvl_anchors, mlvl_bbox_preds, max_shape=img_meta['img_shape'])\n\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            mlvl_bboxes /= mlvl_bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n\n        if self.use_sigmoid_cls:\n            # Add a dummy background class to the backend when using sigmoid\n            # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n            # BG cat_id: num_class\n            padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n            mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n        # multi class NMS\n        det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,\n                                                cfg.score_thr, cfg.nms,\n                                                cfg.max_per_img)\n\n        results = InstanceData()\n        results.bboxes = det_bboxes[:, :-1]\n        results.scores = det_bboxes[:, -1]\n        results.labels = det_labels\n        return results\n"
  },
  {
    "path": "mmdet/models/dense_heads/lad_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import InstanceList, OptInstanceList\nfrom ..utils import levels_to_images, multi_apply, unpack_gt_instances\nfrom .paa_head import PAAHead\n\n\n@MODELS.register_module()\nclass LADHead(PAAHead):\n    \"\"\"Label Assignment Head from the paper: `Improving Object Detection by\n    Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_\"\"\"\n\n    def get_label_assignment(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            iou_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> tuple:\n        \"\"\"Get label assignment (from teacher).\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            iou_preds (list[Tensor]): iou_preds for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            tuple: Returns a tuple containing label assignment variables.\n\n            - labels (Tensor): Labels of all anchors, each with\n              shape (num_anchors,).\n            - labels_weight (Tensor): Label weights of all anchor.\n              each with shape (num_anchors,).\n            - bboxes_target (Tensor): BBox targets of all anchors.\n              each with shape (num_anchors, 4).\n            - bboxes_weight (Tensor): BBox weights of all anchors.\n              each with shape (num_anchors, 4).\n            - pos_inds_flatten (Tensor): Contains all index of positive\n              sample in all anchor.\n            - pos_anchors (Tensor): Positive anchors.\n            - num_pos (int): Number of positive anchors.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n        )\n        (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,\n         pos_gt_index) = cls_reg_targets\n        cls_scores = levels_to_images(cls_scores)\n        cls_scores = [\n            item.reshape(-1, self.cls_out_channels) for item in cls_scores\n        ]\n        bbox_preds = levels_to_images(bbox_preds)\n        bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n        pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,\n                                       cls_scores, bbox_preds, labels,\n                                       labels_weight, bboxes_target,\n                                       bboxes_weight, pos_inds)\n\n        with torch.no_grad():\n            reassign_labels, reassign_label_weight, \\\n                reassign_bbox_weights, num_pos = multi_apply(\n                    self.paa_reassign,\n                    pos_losses_list,\n                    labels,\n                    labels_weight,\n                    bboxes_weight,\n                    pos_inds,\n                    pos_gt_index,\n                    anchor_list)\n            num_pos = sum(num_pos)\n        # convert all tensor list to a flatten tensor\n        labels = torch.cat(reassign_labels, 0).view(-1)\n        flatten_anchors = torch.cat(\n            [torch.cat(item, 0) for item in anchor_list])\n        labels_weight = torch.cat(reassign_label_weight, 0).view(-1)\n        bboxes_target = torch.cat(bboxes_target,\n                                  0).view(-1, bboxes_target[0].size(-1))\n\n        pos_inds_flatten = ((labels >= 0)\n                            &\n                            (labels < self.num_classes)).nonzero().reshape(-1)\n\n        if num_pos:\n            pos_anchors = flatten_anchors[pos_inds_flatten]\n        else:\n            pos_anchors = None\n\n        label_assignment_results = (labels, labels_weight, bboxes_target,\n                                    bboxes_weight, pos_inds_flatten,\n                                    pos_anchors, num_pos)\n        return label_assignment_results\n\n    def loss(self, x: List[Tensor], label_assignment_results: tuple,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Forward train with the available label assignment (student receives\n        from teacher).\n\n        Args:\n            x (list[Tensor]): Features from FPN.\n            label_assignment_results (tuple): As the outputs defined in the\n                function `self.get_label_assignment`.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            losses: (dict[str, Tensor]): A dictionary of loss components.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n\n        outs = self(x)\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas)\n        losses = self.loss_by_feat(\n            *loss_inputs,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            label_assignment_results=label_assignment_results)\n        return losses\n\n    def loss_by_feat(self,\n                     cls_scores: List[Tensor],\n                     bbox_preds: List[Tensor],\n                     iou_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict],\n                     batch_gt_instances_ignore: OptInstanceList = None,\n                     label_assignment_results: Optional[tuple] = None) -> dict:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            iou_preds (list[Tensor]): iou_preds for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            label_assignment_results (tuple, optional): As the outputs defined\n                in the function `self.get_\n                label_assignment`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss gmm_assignment.\n        \"\"\"\n\n        (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten,\n         pos_anchors, num_pos) = label_assignment_results\n\n        cls_scores = levels_to_images(cls_scores)\n        cls_scores = [\n            item.reshape(-1, self.cls_out_channels) for item in cls_scores\n        ]\n        bbox_preds = levels_to_images(bbox_preds)\n        bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n        iou_preds = levels_to_images(iou_preds)\n        iou_preds = [item.reshape(-1, 1) for item in iou_preds]\n\n        # convert all tensor list to a flatten tensor\n        cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))\n        bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))\n        iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))\n\n        losses_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            labels_weight,\n            avg_factor=max(num_pos, len(batch_img_metas)))  # avoid num_pos=0\n        if num_pos:\n            pos_bbox_pred = self.bbox_coder.decode(\n                pos_anchors, bbox_preds[pos_inds_flatten])\n            pos_bbox_target = bboxes_target[pos_inds_flatten]\n            iou_target = bbox_overlaps(\n                pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)\n            losses_iou = self.loss_centerness(\n                iou_preds[pos_inds_flatten],\n                iou_target.unsqueeze(-1),\n                avg_factor=num_pos)\n            losses_bbox = self.loss_bbox(\n                pos_bbox_pred, pos_bbox_target, avg_factor=num_pos)\n\n        else:\n            losses_iou = iou_preds.sum() * 0\n            losses_bbox = bbox_preds.sum() * 0\n\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)\n"
  },
  {
    "path": "mmdet/models/dense_heads/ld_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean\nfrom ..utils import multi_apply, unpack_gt_instances\nfrom .gfl_head import GFLHead\n\n\n@MODELS.register_module()\nclass LDHead(GFLHead):\n    \"\"\"Localization distillation Head. (Short description)\n\n    It utilizes the learned bbox distributions to transfer the localization\n    dark knowledge from teacher to student. Original paper: `Localization\n    Distillation for Object Detection. <https://arxiv.org/abs/2102.12252>`_\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        loss_ld (:obj:`ConfigDict` or dict): Config of Localization\n            Distillation Loss (LD), T is the temperature for distillation.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 loss_ld: ConfigType = dict(\n                     type='LocalizationDistillationLoss',\n                     loss_weight=0.25,\n                     T=10),\n                 **kwargs) -> dict:\n\n        super().__init__(\n            num_classes=num_classes, in_channels=in_channels, **kwargs)\n        self.loss_ld = MODELS.build(loss_ld)\n\n    def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n                            bbox_pred: Tensor, labels: Tensor,\n                            label_weights: Tensor, bbox_targets: Tensor,\n                            stride: Tuple[int], soft_targets: Tensor,\n                            avg_factor: int):\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            cls_score (Tensor): Cls and quality joint scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_pred (Tensor): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            stride (tuple): Stride in this scale level.\n            soft_targets (Tensor): Soft BBox regression targets.\n            avg_factor (int): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n\n        Returns:\n            dict[tuple, Tensor]: Loss components and weight targets.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        bbox_pred = bbox_pred.permute(0, 2, 3,\n                                      1).reshape(-1, 4 * (self.reg_max + 1))\n        soft_targets = soft_targets.permute(0, 2, 3,\n                                            1).reshape(-1,\n                                                       4 * (self.reg_max + 1))\n\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n        score = label_weights.new_zeros(labels.shape)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n            pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]\n\n            weight_targets = cls_score.detach().sigmoid()\n            weight_targets = weight_targets.max(dim=1)[0][pos_inds]\n            pos_bbox_pred_corners = self.integral(pos_bbox_pred)\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchor_centers, pos_bbox_pred_corners)\n            pos_decode_bbox_targets = pos_bbox_targets / stride[0]\n            score[pos_inds] = bbox_overlaps(\n                pos_decode_bbox_pred.detach(),\n                pos_decode_bbox_targets,\n                is_aligned=True)\n            pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)\n            pos_soft_targets = soft_targets[pos_inds]\n            soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1)\n\n            target_corners = self.bbox_coder.encode(pos_anchor_centers,\n                                                    pos_decode_bbox_targets,\n                                                    self.reg_max).reshape(-1)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=weight_targets,\n                avg_factor=1.0)\n\n            # dfl loss\n            loss_dfl = self.loss_dfl(\n                pred_corners,\n                target_corners,\n                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n                avg_factor=4.0)\n\n            # ld loss\n            loss_ld = self.loss_ld(\n                pred_corners,\n                soft_corners,\n                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n                avg_factor=4.0)\n\n        else:\n            loss_ld = bbox_pred.sum() * 0\n            loss_bbox = bbox_pred.sum() * 0\n            loss_dfl = bbox_pred.sum() * 0\n            weight_targets = bbox_pred.new_tensor(0)\n\n        # cls (qfl) loss\n        loss_cls = self.loss_cls(\n            cls_score, (labels, score),\n            weight=label_weights,\n            avg_factor=avg_factor)\n\n        return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum()\n\n    def loss(self, x: List[Tensor], out_teacher: Tuple[Tensor],\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"\n        Args:\n            x (list[Tensor]): Features from FPN.\n            out_teacher (tuple[Tensor]): The output of teacher.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            tuple[dict, list]: The loss components and proposals of each image.\n\n            - losses (dict[str, Tensor]): A dictionary of loss components.\n            - proposal_list (list[Tensor]): Proposals of each image.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n\n        outs = self(x)\n        soft_targets = out_teacher[1]\n        loss_inputs = outs + (batch_gt_instances, batch_img_metas,\n                              soft_targets)\n        losses = self.loss_by_feat(\n            *loss_inputs, batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        return losses\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            soft_targets: List[Tensor],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            soft_targets (list[Tensor]): Soft BBox regression targets.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_reg_targets\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n        losses_cls, losses_bbox, losses_dfl, losses_ld, \\\n            avg_factor = multi_apply(\n                self.loss_by_feat_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                self.prior_generator.strides,\n                soft_targets,\n                avg_factor=avg_factor)\n\n        avg_factor = sum(avg_factor) + 1e-6\n        avg_factor = reduce_mean(avg_factor).item()\n        losses_bbox = [x / avg_factor for x in losses_bbox]\n        losses_dfl = [x / avg_factor for x in losses_dfl]\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            loss_dfl=losses_dfl,\n            loss_ld=losses_ld)\n"
  },
  {
    "path": "mmdet/models/dense_heads/mask2former_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d\nfrom mmcv.ops import point_sample\nfrom mmengine.model import ModuleList, caffe2_xavier_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig, reduce_mean\nfrom ..layers import Mask2FormerTransformerDecoder, SinePositionalEncoding\nfrom ..utils import get_uncertain_point_coords_with_randomness\nfrom .anchor_free_head import AnchorFreeHead\nfrom .maskformer_head import MaskFormerHead\n\n\n@MODELS.register_module()\nclass Mask2FormerHead(MaskFormerHead):\n    \"\"\"Implements the Mask2Former head.\n\n    See `Masked-attention Mask Transformer for Universal Image\n    Segmentation <https://arxiv.org/pdf/2112.01527>`_ for details.\n\n    Args:\n        in_channels (list[int]): Number of channels in the input feature map.\n        feat_channels (int): Number of channels for features.\n        out_channels (int): Number of channels for output.\n        num_things_classes (int): Number of things.\n        num_stuff_classes (int): Number of stuff.\n        num_queries (int): Number of query in Transformer decoder.\n        pixel_decoder (:obj:`ConfigDict` or dict): Config for pixel\n            decoder. Defaults to None.\n        enforce_decoder_input_project (bool, optional): Whether to add\n            a layer to change the embed_dim of tranformer encoder in\n            pixel decoder to the embed_dim of transformer decoder.\n            Defaults to False.\n        transformer_decoder (:obj:`ConfigDict` or dict): Config for\n            transformer decoder. Defaults to None.\n        positional_encoding (:obj:`ConfigDict` or dict): Config for\n            transformer decoder position encoding. Defaults to\n            dict(num_feats=128, normalize=True).\n        loss_cls (:obj:`ConfigDict` or dict): Config of the classification\n            loss. Defaults to None.\n        loss_mask (:obj:`ConfigDict` or dict): Config of the mask loss.\n            Defaults to None.\n        loss_dice (:obj:`ConfigDict` or dict): Config of the dice loss.\n            Defaults to None.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config of\n            Mask2Former head.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            Mask2Former head.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: List[int],\n                 feat_channels: int,\n                 out_channels: int,\n                 num_things_classes: int = 80,\n                 num_stuff_classes: int = 53,\n                 num_queries: int = 100,\n                 num_transformer_feat_level: int = 3,\n                 pixel_decoder: ConfigType = ...,\n                 enforce_decoder_input_project: bool = False,\n                 transformer_decoder: ConfigType = ...,\n                 positional_encoding: ConfigType = dict(\n                     num_feats=128, normalize=True),\n                 loss_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=2.0,\n                     reduction='mean',\n                     class_weight=[1.0] * 133 + [0.1]),\n                 loss_mask: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     reduction='mean',\n                     loss_weight=5.0),\n                 loss_dice: ConfigType = dict(\n                     type='DiceLoss',\n                     use_sigmoid=True,\n                     activate=True,\n                     reduction='mean',\n                     naive_dice=True,\n                     eps=1.0,\n                     loss_weight=5.0),\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        super(AnchorFreeHead, self).__init__(init_cfg=init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        self.num_classes = self.num_things_classes + self.num_stuff_classes\n        self.num_queries = num_queries\n        self.num_transformer_feat_level = num_transformer_feat_level\n        self.num_heads = transformer_decoder.layer_cfg.cross_attn_cfg.num_heads\n        self.num_transformer_decoder_layers = transformer_decoder.num_layers\n        assert pixel_decoder.encoder.layer_cfg. \\\n            self_attn_cfg.num_levels == num_transformer_feat_level\n        pixel_decoder_ = copy.deepcopy(pixel_decoder)\n        pixel_decoder_.update(\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            out_channels=out_channels)\n        self.pixel_decoder = MODELS.build(pixel_decoder_)\n        self.transformer_decoder = Mask2FormerTransformerDecoder(\n            **transformer_decoder)\n        self.decoder_embed_dims = self.transformer_decoder.embed_dims\n\n        self.decoder_input_projs = ModuleList()\n        # from low resolution to high resolution\n        for _ in range(num_transformer_feat_level):\n            if (self.decoder_embed_dims != feat_channels\n                    or enforce_decoder_input_project):\n                self.decoder_input_projs.append(\n                    Conv2d(\n                        feat_channels, self.decoder_embed_dims, kernel_size=1))\n            else:\n                self.decoder_input_projs.append(nn.Identity())\n        self.decoder_positional_encoding = SinePositionalEncoding(\n            **positional_encoding)\n        self.query_embed = nn.Embedding(self.num_queries, feat_channels)\n        self.query_feat = nn.Embedding(self.num_queries, feat_channels)\n        # from low resolution to high resolution\n        self.level_embed = nn.Embedding(self.num_transformer_feat_level,\n                                        feat_channels)\n\n        self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1)\n        self.mask_embed = nn.Sequential(\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, out_channels))\n\n        self.test_cfg = test_cfg\n        self.train_cfg = train_cfg\n        if train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            self.sampler = TASK_UTILS.build(\n                self.train_cfg['sampler'], default_args=dict(context=self))\n            self.num_points = self.train_cfg.get('num_points', 12544)\n            self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0)\n            self.importance_sample_ratio = self.train_cfg.get(\n                'importance_sample_ratio', 0.75)\n\n        self.class_weight = loss_cls.class_weight\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_mask = MODELS.build(loss_mask)\n        self.loss_dice = MODELS.build(loss_dice)\n\n    def init_weights(self) -> None:\n        for m in self.decoder_input_projs:\n            if isinstance(m, Conv2d):\n                caffe2_xavier_init(m, bias=0)\n\n        self.pixel_decoder.init_weights()\n\n        for p in self.transformer_decoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_normal_(p)\n\n    def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict) -> Tuple[Tensor]:\n        \"\"\"Compute classification and mask targets for one image.\n\n        Args:\n            cls_score (Tensor): Mask score logits from a single decoder layer\n                for one image. Shape (num_queries, cls_out_channels).\n            mask_pred (Tensor): Mask logits for a single decoder layer for one\n                image. Shape (num_queries, h, w).\n            gt_instances (:obj:`InstanceData`): It contains ``labels`` and\n                ``masks``.\n            img_meta (dict): Image informtation.\n\n        Returns:\n            tuple[Tensor]: A tuple containing the following for one image.\n\n                - labels (Tensor): Labels of each image. \\\n                    shape (num_queries, ).\n                - label_weights (Tensor): Label weights of each image. \\\n                    shape (num_queries, ).\n                - mask_targets (Tensor): Mask targets of each image. \\\n                    shape (num_queries, h, w).\n                - mask_weights (Tensor): Mask weights of each image. \\\n                    shape (num_queries, ).\n                - pos_inds (Tensor): Sampled positive indices for each \\\n                    image.\n                - neg_inds (Tensor): Sampled negative indices for each \\\n                    image.\n                - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        gt_labels = gt_instances.labels\n        gt_masks = gt_instances.masks\n        # sample points\n        num_queries = cls_score.shape[0]\n        num_gts = gt_labels.shape[0]\n\n        point_coords = torch.rand((1, self.num_points, 2),\n                                  device=cls_score.device)\n        # shape (num_queries, num_points)\n        mask_points_pred = point_sample(\n            mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1,\n                                                        1)).squeeze(1)\n        # shape (num_gts, num_points)\n        gt_points_masks = point_sample(\n            gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1,\n                                                               1)).squeeze(1)\n\n        sampled_gt_instances = InstanceData(\n            labels=gt_labels, masks=gt_points_masks)\n        sampled_pred_instances = InstanceData(\n            scores=cls_score, masks=mask_points_pred)\n        # assign and sample\n        assign_result = self.assigner.assign(\n            pred_instances=sampled_pred_instances,\n            gt_instances=sampled_gt_instances,\n            img_meta=img_meta)\n        pred_instances = InstanceData(scores=cls_score, masks=mask_pred)\n        sampling_result = self.sampler.sample(\n            assign_result=assign_result,\n            pred_instances=pred_instances,\n            gt_instances=gt_instances)\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        # label target\n        labels = gt_labels.new_full((self.num_queries, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]\n        label_weights = gt_labels.new_ones((self.num_queries, ))\n\n        # mask target\n        mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]\n        mask_weights = mask_pred.new_zeros((self.num_queries, ))\n        mask_weights[pos_inds] = 1.0\n\n        return (labels, label_weights, mask_targets, mask_weights, pos_inds,\n                neg_inds, sampling_result)\n\n    def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor,\n                             batch_gt_instances: List[InstanceData],\n                             batch_img_metas: List[dict]) -> Tuple[Tensor]:\n        \"\"\"Loss function for outputs from a single decoder layer.\n\n        Args:\n            cls_scores (Tensor): Mask score logits from a single decoder layer\n                for all images. Shape (batch_size, num_queries,\n                cls_out_channels). Note `cls_out_channels` should includes\n                background.\n            mask_preds (Tensor): Mask logits for a pixel decoder for all\n                images. Shape (batch_size, num_queries, h, w).\n            batch_gt_instances (list[obj:`InstanceData`]): each contains\n                ``labels`` and ``masks``.\n            batch_img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            tuple[Tensor]: Loss components for outputs from a single \\\n                decoder layer.\n        \"\"\"\n        num_imgs = cls_scores.size(0)\n        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]\n        mask_preds_list = [mask_preds[i] for i in range(num_imgs)]\n        (labels_list, label_weights_list, mask_targets_list, mask_weights_list,\n         avg_factor) = self.get_targets(cls_scores_list, mask_preds_list,\n                                        batch_gt_instances, batch_img_metas)\n        # shape (batch_size, num_queries)\n        labels = torch.stack(labels_list, dim=0)\n        # shape (batch_size, num_queries)\n        label_weights = torch.stack(label_weights_list, dim=0)\n        # shape (num_total_gts, h, w)\n        mask_targets = torch.cat(mask_targets_list, dim=0)\n        # shape (batch_size, num_queries)\n        mask_weights = torch.stack(mask_weights_list, dim=0)\n\n        # classfication loss\n        # shape (batch_size * num_queries, )\n        cls_scores = cls_scores.flatten(0, 1)\n        labels = labels.flatten(0, 1)\n        label_weights = label_weights.flatten(0, 1)\n\n        class_weight = cls_scores.new_tensor(self.class_weight)\n        loss_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            label_weights,\n            avg_factor=class_weight[labels].sum())\n\n        num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor]))\n        num_total_masks = max(num_total_masks, 1)\n\n        # extract positive ones\n        # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w)\n        mask_preds = mask_preds[mask_weights > 0]\n\n        if mask_targets.shape[0] == 0:\n            # zero match\n            loss_dice = mask_preds.sum()\n            loss_mask = mask_preds.sum()\n            return loss_cls, loss_mask, loss_dice\n\n        with torch.no_grad():\n            points_coords = get_uncertain_point_coords_with_randomness(\n                mask_preds.unsqueeze(1), None, self.num_points,\n                self.oversample_ratio, self.importance_sample_ratio)\n            # shape (num_total_gts, h, w) -> (num_total_gts, num_points)\n            mask_point_targets = point_sample(\n                mask_targets.unsqueeze(1).float(), points_coords).squeeze(1)\n        # shape (num_queries, h, w) -> (num_queries, num_points)\n        mask_point_preds = point_sample(\n            mask_preds.unsqueeze(1), points_coords).squeeze(1)\n\n        # dice loss\n        loss_dice = self.loss_dice(\n            mask_point_preds, mask_point_targets, avg_factor=num_total_masks)\n\n        # mask loss\n        # shape (num_queries, num_points) -> (num_queries * num_points, )\n        mask_point_preds = mask_point_preds.reshape(-1)\n        # shape (num_total_gts, num_points) -> (num_total_gts * num_points, )\n        mask_point_targets = mask_point_targets.reshape(-1)\n        loss_mask = self.loss_mask(\n            mask_point_preds,\n            mask_point_targets,\n            avg_factor=num_total_masks * self.num_points)\n\n        return loss_cls, loss_mask, loss_dice\n\n    def _forward_head(self, decoder_out: Tensor, mask_feature: Tensor,\n                      attn_mask_target_size: Tuple[int, int]) -> Tuple[Tensor]:\n        \"\"\"Forward for head part which is called after every decoder layer.\n\n        Args:\n            decoder_out (Tensor): in shape (batch_size, num_queries, c).\n            mask_feature (Tensor): in shape (batch_size, c, h, w).\n            attn_mask_target_size (tuple[int, int]): target attention\n                mask size.\n\n        Returns:\n            tuple: A tuple contain three elements.\n\n                - cls_pred (Tensor): Classification scores in shape \\\n                    (batch_size, num_queries, cls_out_channels). \\\n                    Note `cls_out_channels` should includes background.\n                - mask_pred (Tensor): Mask scores in shape \\\n                    (batch_size, num_queries,h, w).\n                - attn_mask (Tensor): Attention mask in shape \\\n                    (batch_size * num_heads, num_queries, h, w).\n        \"\"\"\n        decoder_out = self.transformer_decoder.post_norm(decoder_out)\n        # shape (num_queries, batch_size, c)\n        cls_pred = self.cls_embed(decoder_out)\n        # shape (num_queries, batch_size, c)\n        mask_embed = self.mask_embed(decoder_out)\n        # shape (num_queries, batch_size, h, w)\n        mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature)\n        attn_mask = F.interpolate(\n            mask_pred,\n            attn_mask_target_size,\n            mode='bilinear',\n            align_corners=False)\n        # shape (num_queries, batch_size, h, w) ->\n        #   (batch_size * num_head, num_queries, h, w)\n        attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat(\n            (1, self.num_heads, 1, 1)).flatten(0, 1)\n        attn_mask = attn_mask.sigmoid() < 0.5\n        attn_mask = attn_mask.detach()\n\n        return cls_pred, mask_pred, attn_mask\n\n    def forward(self, x: List[Tensor],\n                batch_data_samples: SampleList) -> Tuple[List[Tensor]]:\n        \"\"\"Forward function.\n\n        Args:\n            x (list[Tensor]): Multi scale Features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            tuple[list[Tensor]]: A tuple contains two elements.\n\n                - cls_pred_list (list[Tensor)]: Classification logits \\\n                    for each decoder layer. Each is a 3D-tensor with shape \\\n                    (batch_size, num_queries, cls_out_channels). \\\n                    Note `cls_out_channels` should includes background.\n                - mask_pred_list (list[Tensor]): Mask logits for each \\\n                    decoder layer. Each with shape (batch_size, num_queries, \\\n                    h, w).\n        \"\"\"\n        batch_img_metas = [\n            data_sample.metainfo for data_sample in batch_data_samples\n        ]\n        batch_size = len(batch_img_metas)\n        mask_features, multi_scale_memorys = self.pixel_decoder(x)\n        # multi_scale_memorys (from low resolution to high resolution)\n        decoder_inputs = []\n        decoder_positional_encodings = []\n        for i in range(self.num_transformer_feat_level):\n            decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])\n            # shape (batch_size, c, h, w) -> (batch_size, h*w, c)\n            decoder_input = decoder_input.flatten(2).permute(0, 2, 1)\n            level_embed = self.level_embed.weight[i].view(1, 1, -1)\n            decoder_input = decoder_input + level_embed\n            # shape (batch_size, c, h, w) -> (batch_size, h*w, c)\n            mask = decoder_input.new_zeros(\n                (batch_size, ) + multi_scale_memorys[i].shape[-2:],\n                dtype=torch.bool)\n            decoder_positional_encoding = self.decoder_positional_encoding(\n                mask)\n            decoder_positional_encoding = decoder_positional_encoding.flatten(\n                2).permute(0, 2, 1)\n            decoder_inputs.append(decoder_input)\n            decoder_positional_encodings.append(decoder_positional_encoding)\n        # shape (num_queries, c) -> (batch_size, num_queries, c)\n        query_feat = self.query_feat.weight.unsqueeze(0).repeat(\n            (batch_size, 1, 1))\n        query_embed = self.query_embed.weight.unsqueeze(0).repeat(\n            (batch_size, 1, 1))\n\n        cls_pred_list = []\n        mask_pred_list = []\n        cls_pred, mask_pred, attn_mask = self._forward_head(\n            query_feat, mask_features, multi_scale_memorys[0].shape[-2:])\n        cls_pred_list.append(cls_pred)\n        mask_pred_list.append(mask_pred)\n\n        for i in range(self.num_transformer_decoder_layers):\n            level_idx = i % self.num_transformer_feat_level\n            # if a mask is all True(all background), then set it all False.\n            attn_mask[torch.where(\n                attn_mask.sum(-1) == attn_mask.shape[-1])] = False\n\n            # cross_attn + self_attn\n            layer = self.transformer_decoder.layers[i]\n            query_feat = layer(\n                query=query_feat,\n                key=decoder_inputs[level_idx],\n                value=decoder_inputs[level_idx],\n                query_pos=query_embed,\n                key_pos=decoder_positional_encodings[level_idx],\n                cross_attn_mask=attn_mask,\n                query_key_padding_mask=None,\n                # here we do not apply masking on padded region\n                key_padding_mask=None)\n            cls_pred, mask_pred, attn_mask = self._forward_head(\n                query_feat, mask_features, multi_scale_memorys[\n                    (i + 1) % self.num_transformer_feat_level].shape[-2:])\n\n            cls_pred_list.append(cls_pred)\n            mask_pred_list.append(mask_pred)\n\n        return cls_pred_list, mask_pred_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/maskformer_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d\nfrom mmengine.model import caffe2_xavier_init\nfrom mmengine.structures import InstanceData, PixelData\nfrom torch import Tensor\n\nfrom mmdet.models.layers.pixel_decoder import PixelDecoder\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptMultiConfig, reduce_mean)\nfrom ..layers import DetrTransformerDecoder, SinePositionalEncoding\nfrom ..utils import multi_apply, preprocess_panoptic_gt\nfrom .anchor_free_head import AnchorFreeHead\n\n\n@MODELS.register_module()\nclass MaskFormerHead(AnchorFreeHead):\n    \"\"\"Implements the MaskFormer head.\n\n    See `Per-Pixel Classification is Not All You Need for Semantic\n    Segmentation <https://arxiv.org/pdf/2107.06278>`_ for details.\n\n    Args:\n        in_channels (list[int]): Number of channels in the input feature map.\n        feat_channels (int): Number of channels for feature.\n        out_channels (int): Number of channels for output.\n        num_things_classes (int): Number of things.\n        num_stuff_classes (int): Number of stuff.\n        num_queries (int): Number of query in Transformer.\n        pixel_decoder (:obj:`ConfigDict` or dict): Config for pixel\n            decoder.\n        enforce_decoder_input_project (bool): Whether to add a layer\n            to change the embed_dim of transformer encoder in pixel decoder to\n            the embed_dim of transformer decoder. Defaults to False.\n        transformer_decoder (:obj:`ConfigDict` or dict): Config for\n            transformer decoder.\n        positional_encoding (:obj:`ConfigDict` or dict): Config for\n            transformer decoder position encoding.\n        loss_cls (:obj:`ConfigDict` or dict): Config of the classification\n            loss. Defaults to `CrossEntropyLoss`.\n        loss_mask (:obj:`ConfigDict` or dict): Config of the mask loss.\n            Defaults to `FocalLoss`.\n        loss_dice (:obj:`ConfigDict` or dict): Config of the dice loss.\n            Defaults to `DiceLoss`.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config of\n            MaskFormer head.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            MaskFormer head.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: List[int],\n                 feat_channels: int,\n                 out_channels: int,\n                 num_things_classes: int = 80,\n                 num_stuff_classes: int = 53,\n                 num_queries: int = 100,\n                 pixel_decoder: ConfigType = ...,\n                 enforce_decoder_input_project: bool = False,\n                 transformer_decoder: ConfigType = ...,\n                 positional_encoding: ConfigType = dict(\n                     num_feats=128, normalize=True),\n                 loss_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=1.0,\n                     class_weight=[1.0] * 133 + [0.1]),\n                 loss_mask: ConfigType = dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=20.0),\n                 loss_dice: ConfigType = dict(\n                     type='DiceLoss',\n                     use_sigmoid=True,\n                     activate=True,\n                     naive_dice=True,\n                     loss_weight=1.0),\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        super(AnchorFreeHead, self).__init__(init_cfg=init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        self.num_classes = self.num_things_classes + self.num_stuff_classes\n        self.num_queries = num_queries\n\n        pixel_decoder.update(\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            out_channels=out_channels)\n        self.pixel_decoder = MODELS.build(pixel_decoder)\n        self.transformer_decoder = DetrTransformerDecoder(\n            **transformer_decoder)\n        self.decoder_embed_dims = self.transformer_decoder.embed_dims\n        if type(self.pixel_decoder) == PixelDecoder and (\n                self.decoder_embed_dims != in_channels[-1]\n                or enforce_decoder_input_project):\n            self.decoder_input_proj = Conv2d(\n                in_channels[-1], self.decoder_embed_dims, kernel_size=1)\n        else:\n            self.decoder_input_proj = nn.Identity()\n        self.decoder_pe = SinePositionalEncoding(**positional_encoding)\n        self.query_embed = nn.Embedding(self.num_queries, out_channels)\n\n        self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1)\n        self.mask_embed = nn.Sequential(\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, out_channels))\n\n        self.test_cfg = test_cfg\n        self.train_cfg = train_cfg\n        if train_cfg:\n            self.assigner = TASK_UTILS.build(train_cfg['assigner'])\n            self.sampler = TASK_UTILS.build(\n                train_cfg['sampler'], default_args=dict(context=self))\n\n        self.class_weight = loss_cls.class_weight\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_mask = MODELS.build(loss_mask)\n        self.loss_dice = MODELS.build(loss_dice)\n\n    def init_weights(self) -> None:\n        if isinstance(self.decoder_input_proj, Conv2d):\n            caffe2_xavier_init(self.decoder_input_proj, bias=0)\n\n        self.pixel_decoder.init_weights()\n\n        for p in self.transformer_decoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n    def preprocess_gt(\n            self, batch_gt_instances: InstanceList,\n            batch_gt_semantic_segs: List[Optional[PixelData]]) -> InstanceList:\n        \"\"\"Preprocess the ground truth for all images.\n\n        Args:\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``labels``, each is\n                ground truth labels of each bbox, with shape (num_gts, )\n                and ``masks``, each is ground truth masks of each instances\n                of a image, shape (num_gts, h, w).\n            gt_semantic_seg (list[Optional[PixelData]]): Ground truth of\n                semantic segmentation, each with the shape (1, h, w).\n                [0, num_thing_class - 1] means things,\n                [num_thing_class, num_class-1] means stuff,\n                255 means VOID. It's None when training instance segmentation.\n\n        Returns:\n            list[obj:`InstanceData`]: each contains the following keys\n\n                - labels (Tensor): Ground truth class indices\\\n                    for a image, with shape (n, ), n is the sum of\\\n                    number of stuff type and number of instance in a image.\n                - masks (Tensor): Ground truth mask for a\\\n                    image, with shape (n, h, w).\n        \"\"\"\n        num_things_list = [self.num_things_classes] * len(batch_gt_instances)\n        num_stuff_list = [self.num_stuff_classes] * len(batch_gt_instances)\n        gt_labels_list = [\n            gt_instances['labels'] for gt_instances in batch_gt_instances\n        ]\n        gt_masks_list = [\n            gt_instances['masks'] for gt_instances in batch_gt_instances\n        ]\n        gt_semantic_segs = [\n            None if gt_semantic_seg is None else gt_semantic_seg.sem_seg\n            for gt_semantic_seg in batch_gt_semantic_segs\n        ]\n        targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,\n                              gt_masks_list, gt_semantic_segs, num_things_list,\n                              num_stuff_list)\n        labels, masks = targets\n        batch_gt_instances = [\n            InstanceData(labels=label, masks=mask)\n            for label, mask in zip(labels, masks)\n        ]\n        return batch_gt_instances\n\n    def get_targets(\n        self,\n        cls_scores_list: List[Tensor],\n        mask_preds_list: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        return_sampling_results: bool = False\n    ) -> Tuple[List[Union[Tensor, int]]]:\n        \"\"\"Compute classification and mask targets for all images for a decoder\n        layer.\n\n        Args:\n            cls_scores_list (list[Tensor]): Mask score logits from a single\n                decoder layer for all images. Each with shape (num_queries,\n                cls_out_channels).\n            mask_preds_list (list[Tensor]): Mask logits from a single decoder\n                layer for all images. Each with shape (num_queries, h, w).\n            batch_gt_instances (list[obj:`InstanceData`]): each contains\n                ``labels`` and ``masks``.\n            batch_img_metas (list[dict]): List of image meta information.\n            return_sampling_results (bool): Whether to return the sampling\n                results. Defaults to False.\n\n        Returns:\n            tuple: a tuple containing the following targets.\n\n                - labels_list (list[Tensor]): Labels of all images.\\\n                    Each with shape (num_queries, ).\n                - label_weights_list (list[Tensor]): Label weights\\\n                    of all images. Each with shape (num_queries, ).\n                - mask_targets_list (list[Tensor]): Mask targets of\\\n                    all images. Each with shape (num_queries, h, w).\n                - mask_weights_list (list[Tensor]): Mask weights of\\\n                    all images. Each with shape (num_queries, ).\n                - avg_factor (int): Average factor that is used to average\\\n                    the loss. When using sampling method, avg_factor is\n                    usually the sum of positive and negative priors. When\n                    using `MaskPseudoSampler`, `avg_factor` is usually equal\n                    to the number of positive priors.\n\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end.\n        \"\"\"\n        results = multi_apply(self._get_targets_single, cls_scores_list,\n                              mask_preds_list, batch_gt_instances,\n                              batch_img_metas)\n        (labels_list, label_weights_list, mask_targets_list, mask_weights_list,\n         pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]\n        rest_results = list(results[7:])\n\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n\n        res = (labels_list, label_weights_list, mask_targets_list,\n               mask_weights_list, avg_factor)\n        if return_sampling_results:\n            res = res + (sampling_results_list)\n\n        return res + tuple(rest_results)\n\n    def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict) -> Tuple[Tensor]:\n        \"\"\"Compute classification and mask targets for one image.\n\n        Args:\n            cls_score (Tensor): Mask score logits from a single decoder layer\n                for one image. Shape (num_queries, cls_out_channels).\n            mask_pred (Tensor): Mask logits for a single decoder layer for one\n                image. Shape (num_queries, h, w).\n            gt_instances (:obj:`InstanceData`): It contains ``labels`` and\n                ``masks``.\n            img_meta (dict): Image informtation.\n\n        Returns:\n            tuple: a tuple containing the following for one image.\n\n                - labels (Tensor): Labels of each image.\n                    shape (num_queries, ).\n                - label_weights (Tensor): Label weights of each image.\n                    shape (num_queries, ).\n                - mask_targets (Tensor): Mask targets of each image.\n                    shape (num_queries, h, w).\n                - mask_weights (Tensor): Mask weights of each image.\n                    shape (num_queries, ).\n                - pos_inds (Tensor): Sampled positive indices for each image.\n                - neg_inds (Tensor): Sampled negative indices for each image.\n                - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        gt_masks = gt_instances.masks\n        gt_labels = gt_instances.labels\n\n        target_shape = mask_pred.shape[-2:]\n        if gt_masks.shape[0] > 0:\n            gt_masks_downsampled = F.interpolate(\n                gt_masks.unsqueeze(1).float(), target_shape,\n                mode='nearest').squeeze(1).long()\n        else:\n            gt_masks_downsampled = gt_masks\n\n        pred_instances = InstanceData(scores=cls_score, masks=mask_pred)\n        downsampled_gt_instances = InstanceData(\n            labels=gt_labels, masks=gt_masks_downsampled)\n        # assign and sample\n        assign_result = self.assigner.assign(\n            pred_instances=pred_instances,\n            gt_instances=downsampled_gt_instances,\n            img_meta=img_meta)\n        sampling_result = self.sampler.sample(\n            assign_result=assign_result,\n            pred_instances=pred_instances,\n            gt_instances=gt_instances)\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        # label target\n        labels = gt_labels.new_full((self.num_queries, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]\n        label_weights = gt_labels.new_ones(self.num_queries)\n\n        # mask target\n        mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]\n        mask_weights = mask_pred.new_zeros((self.num_queries, ))\n        mask_weights[pos_inds] = 1.0\n\n        return (labels, label_weights, mask_targets, mask_weights, pos_inds,\n                neg_inds, sampling_result)\n\n    def loss_by_feat(self, all_cls_scores: Tensor, all_mask_preds: Tensor,\n                     batch_gt_instances: List[InstanceData],\n                     batch_img_metas: List[dict]) -> Dict[str, Tensor]:\n        \"\"\"Loss function.\n\n        Args:\n            all_cls_scores (Tensor): Classification scores for all decoder\n                layers with shape (num_decoder, batch_size, num_queries,\n                cls_out_channels). Note `cls_out_channels` should includes\n                background.\n            all_mask_preds (Tensor): Mask scores for all decoder layers with\n                shape (num_decoder, batch_size, num_queries, h, w).\n            batch_gt_instances (list[obj:`InstanceData`]): each contains\n                ``labels`` and ``masks``.\n            batch_img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_dec_layers = len(all_cls_scores)\n        batch_gt_instances_list = [\n            batch_gt_instances for _ in range(num_dec_layers)\n        ]\n        img_metas_list = [batch_img_metas for _ in range(num_dec_layers)]\n        losses_cls, losses_mask, losses_dice = multi_apply(\n            self._loss_by_feat_single, all_cls_scores, all_mask_preds,\n            batch_gt_instances_list, img_metas_list)\n\n        loss_dict = dict()\n        # loss from the last decoder layer\n        loss_dict['loss_cls'] = losses_cls[-1]\n        loss_dict['loss_mask'] = losses_mask[-1]\n        loss_dict['loss_dice'] = losses_dice[-1]\n        # loss from other decoder layers\n        num_dec_layer = 0\n        for loss_cls_i, loss_mask_i, loss_dice_i in zip(\n                losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]):\n            loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i\n            loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i\n            loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i\n            num_dec_layer += 1\n        return loss_dict\n\n    def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor,\n                             batch_gt_instances: List[InstanceData],\n                             batch_img_metas: List[dict]) -> Tuple[Tensor]:\n        \"\"\"Loss function for outputs from a single decoder layer.\n\n        Args:\n            cls_scores (Tensor): Mask score logits from a single decoder layer\n                for all images. Shape (batch_size, num_queries,\n                cls_out_channels). Note `cls_out_channels` should includes\n                background.\n            mask_preds (Tensor): Mask logits for a pixel decoder for all\n                images. Shape (batch_size, num_queries, h, w).\n            batch_gt_instances (list[obj:`InstanceData`]): each contains\n                ``labels`` and ``masks``.\n            batch_img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            tuple[Tensor]: Loss components for outputs from a single decoder\\\n                layer.\n        \"\"\"\n        num_imgs = cls_scores.size(0)\n        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]\n        mask_preds_list = [mask_preds[i] for i in range(num_imgs)]\n\n        (labels_list, label_weights_list, mask_targets_list, mask_weights_list,\n         avg_factor) = self.get_targets(cls_scores_list, mask_preds_list,\n                                        batch_gt_instances, batch_img_metas)\n        # shape (batch_size, num_queries)\n        labels = torch.stack(labels_list, dim=0)\n        # shape (batch_size, num_queries)\n        label_weights = torch.stack(label_weights_list, dim=0)\n        # shape (num_total_gts, h, w)\n        mask_targets = torch.cat(mask_targets_list, dim=0)\n        # shape (batch_size, num_queries)\n        mask_weights = torch.stack(mask_weights_list, dim=0)\n\n        # classfication loss\n        # shape (batch_size * num_queries, )\n        cls_scores = cls_scores.flatten(0, 1)\n        labels = labels.flatten(0, 1)\n        label_weights = label_weights.flatten(0, 1)\n\n        class_weight = cls_scores.new_tensor(self.class_weight)\n        loss_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            label_weights,\n            avg_factor=class_weight[labels].sum())\n\n        num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor]))\n        num_total_masks = max(num_total_masks, 1)\n\n        # extract positive ones\n        # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w)\n        mask_preds = mask_preds[mask_weights > 0]\n        target_shape = mask_targets.shape[-2:]\n\n        if mask_targets.shape[0] == 0:\n            # zero match\n            loss_dice = mask_preds.sum()\n            loss_mask = mask_preds.sum()\n            return loss_cls, loss_mask, loss_dice\n\n        # upsample to shape of target\n        # shape (num_total_gts, h, w)\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(1),\n            target_shape,\n            mode='bilinear',\n            align_corners=False).squeeze(1)\n\n        # dice loss\n        loss_dice = self.loss_dice(\n            mask_preds, mask_targets, avg_factor=num_total_masks)\n\n        # mask loss\n        # FocalLoss support input of shape (n, num_class)\n        h, w = mask_preds.shape[-2:]\n        # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1)\n        mask_preds = mask_preds.reshape(-1, 1)\n        # shape (num_total_gts, h, w) -> (num_total_gts * h * w)\n        mask_targets = mask_targets.reshape(-1)\n        # target is (1 - mask_targets) !!!\n        loss_mask = self.loss_mask(\n            mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w)\n\n        return loss_cls, loss_mask, loss_dice\n\n    def forward(self, x: Tuple[Tensor],\n                batch_data_samples: SampleList) -> Tuple[Tensor]:\n        \"\"\"Forward function.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each\n                is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            tuple[Tensor]: a tuple contains two elements.\n\n                - all_cls_scores (Tensor): Classification scores for each\\\n                    scale level. Each is a 4D-tensor with shape\\\n                    (num_decoder, batch_size, num_queries, cls_out_channels).\\\n                    Note `cls_out_channels` should includes background.\n                - all_mask_preds (Tensor): Mask scores for each decoder\\\n                    layer. Each with shape (num_decoder, batch_size,\\\n                    num_queries, h, w).\n        \"\"\"\n        batch_img_metas = [\n            data_sample.metainfo for data_sample in batch_data_samples\n        ]\n        batch_size = len(batch_img_metas)\n        input_img_h, input_img_w = batch_img_metas[0]['batch_input_shape']\n        padding_mask = x[-1].new_ones((batch_size, input_img_h, input_img_w),\n                                      dtype=torch.float32)\n        for i in range(batch_size):\n            img_h, img_w = batch_img_metas[i]['img_shape']\n            padding_mask[i, :img_h, :img_w] = 0\n        padding_mask = F.interpolate(\n            padding_mask.unsqueeze(1), size=x[-1].shape[-2:],\n            mode='nearest').to(torch.bool).squeeze(1)\n        # when backbone is swin, memory is output of last stage of swin.\n        # when backbone is r50, memory is output of tranformer encoder.\n        mask_features, memory = self.pixel_decoder(x, batch_img_metas)\n        pos_embed = self.decoder_pe(padding_mask)\n        memory = self.decoder_input_proj(memory)\n        # shape (batch_size, c, h, w) -> (batch_size, h*w, c)\n        memory = memory.flatten(2).permute(0, 2, 1)\n        pos_embed = pos_embed.flatten(2).permute(0, 2, 1)\n        # shape (batch_size, h * w)\n        padding_mask = padding_mask.flatten(1)\n        # shape = (num_queries, embed_dims)\n        query_embed = self.query_embed.weight\n        # shape = (batch_size, num_queries, embed_dims)\n        query_embed = query_embed.unsqueeze(0).repeat(batch_size, 1, 1)\n        target = torch.zeros_like(query_embed)\n        # shape (num_decoder, num_queries, batch_size, embed_dims)\n        out_dec = self.transformer_decoder(\n            query=target,\n            key=memory,\n            value=memory,\n            query_pos=query_embed,\n            key_pos=pos_embed,\n            key_padding_mask=padding_mask)\n\n        # cls_scores\n        all_cls_scores = self.cls_embed(out_dec)\n\n        # mask_preds\n        mask_embed = self.mask_embed(out_dec)\n        all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed,\n                                      mask_features)\n\n        return all_cls_scores, all_mask_preds\n\n    def loss(\n        self,\n        x: Tuple[Tensor],\n        batch_data_samples: SampleList,\n    ) -> Dict[str, Tensor]:\n        \"\"\"Perform forward propagation and loss calculation of the panoptic\n        head on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the upstream\n                network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        batch_img_metas = []\n        batch_gt_instances = []\n        batch_gt_semantic_segs = []\n        for data_sample in batch_data_samples:\n            batch_img_metas.append(data_sample.metainfo)\n            batch_gt_instances.append(data_sample.gt_instances)\n            if 'gt_sem_seg' in data_sample:\n                batch_gt_semantic_segs.append(data_sample.gt_sem_seg)\n            else:\n                batch_gt_semantic_segs.append(None)\n\n        # forward\n        all_cls_scores, all_mask_preds = self(x, batch_data_samples)\n\n        # preprocess ground truth\n        batch_gt_instances = self.preprocess_gt(batch_gt_instances,\n                                                batch_gt_semantic_segs)\n\n        # loss\n        losses = self.loss_by_feat(all_cls_scores, all_mask_preds,\n                                   batch_gt_instances, batch_img_metas)\n\n        return losses\n\n    def predict(self, x: Tuple[Tensor],\n                batch_data_samples: SampleList) -> Tuple[Tensor]:\n        \"\"\"Test without augmentaton.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            tuple[Tensor]: A tuple contains two tensors.\n\n                - mask_cls_results (Tensor): Mask classification logits,\\\n                    shape (batch_size, num_queries, cls_out_channels).\n                    Note `cls_out_channels` should includes background.\n                - mask_pred_results (Tensor): Mask logits, shape \\\n                    (batch_size, num_queries, h, w).\n        \"\"\"\n        batch_img_metas = [\n            data_sample.metainfo for data_sample in batch_data_samples\n        ]\n        all_cls_scores, all_mask_preds = self(x, batch_data_samples)\n        mask_cls_results = all_cls_scores[-1]\n        mask_pred_results = all_mask_preds[-1]\n\n        # upsample masks\n        img_shape = batch_img_metas[0]['batch_input_shape']\n        mask_pred_results = F.interpolate(\n            mask_pred_results,\n            size=(img_shape[0], img_shape[1]),\n            mode='bilinear',\n            align_corners=False)\n\n        return mask_cls_results, mask_pred_results\n"
  },
  {
    "path": "mmdet/models/dense_heads/nasfcos_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\n\nfrom mmdet.models.dense_heads.fcos_head import FCOSHead\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptMultiConfig\n\n\n@MODELS.register_module()\nclass NASFCOSHead(FCOSHead):\n    \"\"\"Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n    It is quite similar with FCOS head, except for the searched structure of\n    classification branch and bbox regression branch, where a structure of\n    \"dconv3x3, conv3x3, dconv3x3, conv1x1\" is utilized instead.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points\n            in multiple feature levels. Defaults to (4, 8, 16, 32, 64).\n        regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple\n            level points.\n        center_sampling (bool): If true, use center sampling.\n            Defaults to False.\n        center_sample_radius (float): Radius of center sampling.\n            Defaults to 1.5.\n        norm_on_bbox (bool): If true, normalize the regression targets with\n            FPN strides. Defaults to False.\n        centerness_on_reg (bool): If true, position centerness on the\n            regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.\n            Defaults to False.\n        conv_bias (bool or str): If specified as `auto`, it will be decided by\n            the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n            None, otherwise False. Defaults to \"auto\".\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.\n        loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness\n            loss.\n        norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and\n            config norm layer.  Defaults to\n            ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], opitonal): Initialization config dict.\n    \"\"\"  # noqa: E501\n\n    def __init__(self,\n                 *args,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        if init_cfg is None:\n            init_cfg = [\n                dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),\n                dict(\n                    type='Normal',\n                    std=0.01,\n                    override=[\n                        dict(name='conv_reg'),\n                        dict(name='conv_centerness'),\n                        dict(\n                            name='conv_cls',\n                            type='Normal',\n                            std=0.01,\n                            bias_prob=0.01)\n                    ]),\n            ]\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        dconv3x3_config = dict(\n            type='DCNv2',\n            kernel_size=3,\n            use_bias=True,\n            deform_groups=2,\n            padding=1)\n        conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)\n        conv1x1_config = dict(type='Conv', kernel_size=1)\n\n        self.arch_config = [\n            dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config\n        ]\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i, op_ in enumerate(self.arch_config):\n            op = copy.deepcopy(op_)\n            chn = self.in_channels if i == 0 else self.feat_channels\n            assert isinstance(op, dict)\n            use_bias = op.pop('use_bias', False)\n            padding = op.pop('padding', 0)\n            kernel_size = op.pop('kernel_size')\n            module = ConvModule(\n                chn,\n                self.feat_channels,\n                kernel_size,\n                stride=1,\n                padding=padding,\n                norm_cfg=self.norm_cfg,\n                bias=use_bias,\n                conv_cfg=op)\n\n            self.cls_convs.append(copy.deepcopy(module))\n            self.reg_convs.append(copy.deepcopy(module))\n\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n        self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n"
  },
  {
    "path": "mmdet/models/dense_heads/paa_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList)\nfrom ..layers import multiclass_nms\nfrom ..utils import levels_to_images, multi_apply\nfrom . import ATSSHead\n\nEPS = 1e-12\ntry:\n    import sklearn.mixture as skm\nexcept ImportError:\n    skm = None\n\n\n@MODELS.register_module()\nclass PAAHead(ATSSHead):\n    \"\"\"Head of PAAAssignment: Probabilistic Anchor Assignment with IoU\n    Prediction for Object Detection.\n\n    Code is modified from the `official github repo\n    <https://github.com/kkhoot/PAA/blob/master/paa_core\n    /modeling/rpn/paa/loss.py>`_.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2007.08103>`_ .\n\n    Args:\n        topk (int): Select topk samples with smallest loss in\n            each level.\n        score_voting (bool): Whether to use score voting in post-process.\n        covariance_type : String describing the type of covariance parameters\n            to be used in :class:`sklearn.mixture.GaussianMixture`.\n            It must be one of:\n\n            - 'full': each component has its own general covariance matrix\n            - 'tied': all components share the same general covariance matrix\n            - 'diag': each component has its own diagonal covariance matrix\n            - 'spherical': each component has its own single variance\n            Default: 'diag'. From 'full' to 'spherical', the gmm fitting\n            process is faster yet the performance could be influenced. For most\n            cases, 'diag' should be a good choice.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 topk: int = 9,\n                 score_voting: bool = True,\n                 covariance_type: str = 'diag',\n                 **kwargs):\n        # topk used in paa reassign process\n        self.topk = topk\n        self.with_score_voting = score_voting\n        self.covariance_type = covariance_type\n        super().__init__(*args, **kwargs)\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            iou_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            iou_preds (list[Tensor]): iou_preds for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss gmm_assignment.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n        )\n        (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,\n         pos_gt_index) = cls_reg_targets\n        cls_scores = levels_to_images(cls_scores)\n        cls_scores = [\n            item.reshape(-1, self.cls_out_channels) for item in cls_scores\n        ]\n        bbox_preds = levels_to_images(bbox_preds)\n        bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n        iou_preds = levels_to_images(iou_preds)\n        iou_preds = [item.reshape(-1, 1) for item in iou_preds]\n        pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,\n                                       cls_scores, bbox_preds, labels,\n                                       labels_weight, bboxes_target,\n                                       bboxes_weight, pos_inds)\n\n        with torch.no_grad():\n            reassign_labels, reassign_label_weight, \\\n                reassign_bbox_weights, num_pos = multi_apply(\n                    self.paa_reassign,\n                    pos_losses_list,\n                    labels,\n                    labels_weight,\n                    bboxes_weight,\n                    pos_inds,\n                    pos_gt_index,\n                    anchor_list)\n            num_pos = sum(num_pos)\n        # convert all tensor list to a flatten tensor\n        cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))\n        bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))\n        iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))\n        labels = torch.cat(reassign_labels, 0).view(-1)\n        flatten_anchors = torch.cat(\n            [torch.cat(item, 0) for item in anchor_list])\n        labels_weight = torch.cat(reassign_label_weight, 0).view(-1)\n        bboxes_target = torch.cat(bboxes_target,\n                                  0).view(-1, bboxes_target[0].size(-1))\n\n        pos_inds_flatten = ((labels >= 0)\n                            &\n                            (labels < self.num_classes)).nonzero().reshape(-1)\n\n        losses_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            labels_weight,\n            avg_factor=max(num_pos, len(batch_img_metas)))  # avoid num_pos=0\n        if num_pos:\n            pos_bbox_pred = self.bbox_coder.decode(\n                flatten_anchors[pos_inds_flatten],\n                bbox_preds[pos_inds_flatten])\n            pos_bbox_target = bboxes_target[pos_inds_flatten]\n            iou_target = bbox_overlaps(\n                pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)\n            losses_iou = self.loss_centerness(\n                iou_preds[pos_inds_flatten],\n                iou_target.unsqueeze(-1),\n                avg_factor=num_pos)\n            losses_bbox = self.loss_bbox(\n                pos_bbox_pred,\n                pos_bbox_target,\n                iou_target.clamp(min=EPS),\n                avg_factor=iou_target.sum())\n        else:\n            losses_iou = iou_preds.sum() * 0\n            losses_bbox = bbox_preds.sum() * 0\n\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)\n\n    def get_pos_loss(self, anchors: List[Tensor], cls_score: Tensor,\n                     bbox_pred: Tensor, label: Tensor, label_weight: Tensor,\n                     bbox_target: dict, bbox_weight: Tensor,\n                     pos_inds: Tensor) -> Tensor:\n        \"\"\"Calculate loss of all potential positive samples obtained from first\n        match process.\n\n        Args:\n            anchors (list[Tensor]): Anchors of each scale.\n            cls_score (Tensor): Box scores of single image with shape\n                (num_anchors, num_classes)\n            bbox_pred (Tensor): Box energies / deltas of single image\n                with shape (num_anchors, 4)\n            label (Tensor): classification target of each anchor with\n                shape (num_anchors,)\n            label_weight (Tensor): Classification loss weight of each\n                anchor with shape (num_anchors).\n            bbox_target (dict): Regression target of each anchor with\n                shape (num_anchors, 4).\n            bbox_weight (Tensor): Bbox weight of each anchor with shape\n                (num_anchors, 4).\n            pos_inds (Tensor): Index of all positive samples got from\n                first assign process.\n\n        Returns:\n            Tensor: Losses of all positive samples in single image.\n        \"\"\"\n        if not len(pos_inds):\n            return cls_score.new([]),\n        anchors_all_level = torch.cat(anchors, 0)\n        pos_scores = cls_score[pos_inds]\n        pos_bbox_pred = bbox_pred[pos_inds]\n        pos_label = label[pos_inds]\n        pos_label_weight = label_weight[pos_inds]\n        pos_bbox_target = bbox_target[pos_inds]\n        pos_bbox_weight = bbox_weight[pos_inds]\n        pos_anchors = anchors_all_level[pos_inds]\n        pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)\n\n        # to keep loss dimension\n        loss_cls = self.loss_cls(\n            pos_scores,\n            pos_label,\n            pos_label_weight,\n            avg_factor=1.0,\n            reduction_override='none')\n\n        loss_bbox = self.loss_bbox(\n            pos_bbox_pred,\n            pos_bbox_target,\n            pos_bbox_weight,\n            avg_factor=1.0,  # keep same loss weight before reassign\n            reduction_override='none')\n\n        loss_cls = loss_cls.sum(-1)\n        pos_loss = loss_bbox + loss_cls\n        return pos_loss,\n\n    def paa_reassign(self, pos_losses: Tensor, label: Tensor,\n                     label_weight: Tensor, bbox_weight: Tensor,\n                     pos_inds: Tensor, pos_gt_inds: Tensor,\n                     anchors: List[Tensor]) -> tuple:\n        \"\"\"Fit loss to GMM distribution and separate positive, ignore, negative\n        samples again with GMM model.\n\n        Args:\n            pos_losses (Tensor): Losses of all positive samples in\n                single image.\n            label (Tensor): classification target of each anchor with\n                shape (num_anchors,)\n            label_weight (Tensor): Classification loss weight of each\n                anchor with shape (num_anchors).\n            bbox_weight (Tensor): Bbox weight of each anchor with shape\n                (num_anchors, 4).\n            pos_inds (Tensor): Index of all positive samples got from\n                first assign process.\n            pos_gt_inds (Tensor): Gt_index of all positive samples got\n                from first assign process.\n            anchors (list[Tensor]): Anchors of each scale.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - label (Tensor): classification target of each anchor after\n                  paa assign, with shape (num_anchors,)\n                - label_weight (Tensor): Classification loss weight of each\n                  anchor after paa assign, with shape (num_anchors).\n                - bbox_weight (Tensor): Bbox weight of each anchor with shape\n                  (num_anchors, 4).\n                - num_pos (int): The number of positive samples after paa\n                  assign.\n        \"\"\"\n        if not len(pos_inds):\n            return label, label_weight, bbox_weight, 0\n        label = label.clone()\n        label_weight = label_weight.clone()\n        bbox_weight = bbox_weight.clone()\n        num_gt = pos_gt_inds.max() + 1\n        num_level = len(anchors)\n        num_anchors_each_level = [item.size(0) for item in anchors]\n        num_anchors_each_level.insert(0, 0)\n        inds_level_interval = np.cumsum(num_anchors_each_level)\n        pos_level_mask = []\n        for i in range(num_level):\n            mask = (pos_inds >= inds_level_interval[i]) & (\n                pos_inds < inds_level_interval[i + 1])\n            pos_level_mask.append(mask)\n        pos_inds_after_paa = [label.new_tensor([])]\n        ignore_inds_after_paa = [label.new_tensor([])]\n        for gt_ind in range(num_gt):\n            pos_inds_gmm = []\n            pos_loss_gmm = []\n            gt_mask = pos_gt_inds == gt_ind\n            for level in range(num_level):\n                level_mask = pos_level_mask[level]\n                level_gt_mask = level_mask & gt_mask\n                value, topk_inds = pos_losses[level_gt_mask].topk(\n                    min(level_gt_mask.sum(), self.topk), largest=False)\n                pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])\n                pos_loss_gmm.append(value)\n            pos_inds_gmm = torch.cat(pos_inds_gmm)\n            pos_loss_gmm = torch.cat(pos_loss_gmm)\n            # fix gmm need at least two sample\n            if len(pos_inds_gmm) < 2:\n                continue\n            device = pos_inds_gmm.device\n            pos_loss_gmm, sort_inds = pos_loss_gmm.sort()\n            pos_inds_gmm = pos_inds_gmm[sort_inds]\n            pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()\n            min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()\n            means_init = np.array([min_loss, max_loss]).reshape(2, 1)\n            weights_init = np.array([0.5, 0.5])\n            precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1)  # full\n            if self.covariance_type == 'spherical':\n                precisions_init = precisions_init.reshape(2)\n            elif self.covariance_type == 'diag':\n                precisions_init = precisions_init.reshape(2, 1)\n            elif self.covariance_type == 'tied':\n                precisions_init = np.array([[1.0]])\n            if skm is None:\n                raise ImportError('Please run \"pip install sklearn\" '\n                                  'to install sklearn first.')\n            gmm = skm.GaussianMixture(\n                2,\n                weights_init=weights_init,\n                means_init=means_init,\n                precisions_init=precisions_init,\n                covariance_type=self.covariance_type)\n            gmm.fit(pos_loss_gmm)\n            gmm_assignment = gmm.predict(pos_loss_gmm)\n            scores = gmm.score_samples(pos_loss_gmm)\n            gmm_assignment = torch.from_numpy(gmm_assignment).to(device)\n            scores = torch.from_numpy(scores).to(device)\n\n            pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(\n                gmm_assignment, scores, pos_inds_gmm)\n            pos_inds_after_paa.append(pos_inds_temp)\n            ignore_inds_after_paa.append(ignore_inds_temp)\n\n        pos_inds_after_paa = torch.cat(pos_inds_after_paa)\n        ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)\n        reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)\n        reassign_ids = pos_inds[reassign_mask]\n        label[reassign_ids] = self.num_classes\n        label_weight[ignore_inds_after_paa] = 0\n        bbox_weight[reassign_ids] = 0\n        num_pos = len(pos_inds_after_paa)\n        return label, label_weight, bbox_weight, num_pos\n\n    def gmm_separation_scheme(self, gmm_assignment: Tensor, scores: Tensor,\n                              pos_inds_gmm: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"A general separation scheme for gmm model.\n\n        It separates a GMM distribution of candidate samples into three\n        parts, 0 1 and uncertain areas, and you can implement other\n        separation schemes by rewriting this function.\n\n        Args:\n            gmm_assignment (Tensor): The prediction of GMM which is of shape\n                (num_samples,). The 0/1 value indicates the distribution\n                that each sample comes from.\n            scores (Tensor): The probability of sample coming from the\n                fit GMM distribution. The tensor is of shape (num_samples,).\n            pos_inds_gmm (Tensor): All the indexes of samples which are used\n                to fit GMM model. The tensor is of shape (num_samples,)\n\n        Returns:\n            tuple[Tensor, Tensor]: The indices of positive and ignored samples.\n\n                - pos_inds_temp (Tensor): Indices of positive samples.\n                - ignore_inds_temp (Tensor): Indices of ignore samples.\n        \"\"\"\n        # The implementation is (c) in Fig.3 in origin paper instead of (b).\n        # You can refer to issues such as\n        # https://github.com/kkhoot/PAA/issues/8 and\n        # https://github.com/kkhoot/PAA/issues/9.\n        fgs = gmm_assignment == 0\n        pos_inds_temp = fgs.new_tensor([], dtype=torch.long)\n        ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)\n        if fgs.nonzero().numel():\n            _, pos_thr_ind = scores[fgs].topk(1)\n            pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]\n            ignore_inds_temp = pos_inds_gmm.new_tensor([])\n        return pos_inds_temp, ignore_inds_temp\n\n    def get_targets(self,\n                    anchor_list: List[List[Tensor]],\n                    valid_flag_list: List[List[Tensor]],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs: bool = True) -> tuple:\n        \"\"\"Get targets for PAA head.\n\n        This method is almost the same as `AnchorHead.get_targets()`. We direct\n        return the results from _get_targets_single instead map it to levels\n        by images_to_levels function.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels (list[Tensor]): Labels of all anchors, each with\n                    shape (num_anchors,).\n                - label_weights (list[Tensor]): Label weights of all anchor.\n                    each with shape (num_anchors,).\n                - bbox_targets (list[Tensor]): BBox targets of all anchors.\n                    each with shape (num_anchors, 4).\n                - bbox_weights (list[Tensor]): BBox weights of all anchors.\n                    each with shape (num_anchors, 4).\n                - pos_inds (list[Tensor]): Contains all index of positive\n                    sample in all anchor.\n                - gt_inds (list[Tensor]): Contains all gt_index of positive\n                    sample in all anchor.\n        \"\"\"\n\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n        concat_anchor_list = []\n        concat_valid_flag_list = []\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n            concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n        results = multi_apply(\n            self._get_targets_single,\n            concat_anchor_list,\n            concat_valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore,\n            unmap_outputs=unmap_outputs)\n\n        (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,\n         valid_neg_inds, sampling_result) = results\n\n        # Due to valid flag of anchors, we have to calculate the real pos_inds\n        # in origin anchor set.\n        pos_inds = []\n        for i, single_labels in enumerate(labels):\n            pos_mask = (0 <= single_labels) & (\n                single_labels < self.num_classes)\n            pos_inds.append(pos_mask.nonzero().view(-1))\n\n        gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                gt_inds)\n\n    def _get_targets_single(self,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        This method is same as `AnchorHead._get_targets_single()`.\n        \"\"\"\n        assert unmap_outputs, 'We must map outputs back to the original' \\\n                              'set of anchors in PAAhead'\n        return super(ATSSHead, self)._get_targets_single(\n            flat_anchors,\n            valid_flags,\n            gt_instances,\n            img_meta,\n            gt_instances_ignore,\n            unmap_outputs=True)\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        score_factors: Optional[List[Tensor]] = None,\n                        batch_img_metas: Optional[List[dict]] = None,\n                        cfg: OptConfigType = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        This method is same as `BaseDenseHead.get_results()`.\n        \"\"\"\n        assert with_nms, 'PAA only supports \"with_nms=True\" now and it ' \\\n                         'means PAAHead does not support ' \\\n                         'test-time augmentation'\n        return super().predict_by_feat(\n            cls_scores=cls_scores,\n            bbox_preds=bbox_preds,\n            score_factors=score_factors,\n            batch_img_metas=batch_img_metas,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms)\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: OptConfigType = None,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factors from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (:obj:`ConfigDict` or dict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_score_factors = []\n        for level_idx, (cls_score, bbox_pred, score_factor, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              score_factor_list, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            scores = cls_score.permute(1, 2, 0).reshape(\n                -1, self.cls_out_channels).sigmoid()\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid()\n\n            if 0 < nms_pre < scores.shape[0]:\n                max_scores, _ = (scores *\n                                 score_factor[:, None]).sqrt().max(dim=1)\n                _, topk_inds = max_scores.topk(nms_pre)\n                priors = priors[topk_inds, :]\n                bbox_pred = bbox_pred[topk_inds, :]\n                scores = scores[topk_inds, :]\n                score_factor = score_factor[topk_inds]\n\n            bboxes = self.bbox_coder.decode(\n                priors, bbox_pred, max_shape=img_shape)\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_score_factors.append(score_factor)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bboxes)\n        results.scores = torch.cat(mlvl_scores)\n        results.score_factors = torch.cat(mlvl_score_factors)\n\n        return self._bbox_post_process(results, cfg, rescale, with_nms,\n                                       img_meta)\n\n    def _bbox_post_process(self,\n                           results: InstanceData,\n                           cfg: ConfigType,\n                           rescale: bool = False,\n                           with_nms: bool = True,\n                           img_meta: Optional[dict] = None):\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually with_nms is False is used for aug test.\n\n        Args:\n            results (:obj:`InstaceData`): Detection instance results,\n                each item has shape (num_bboxes, ).\n            cfg (:obj:`ConfigDict` or dict): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n            img_meta (dict, optional): Image meta info. Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        if rescale:\n            results.bboxes /= results.bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n        # Add a dummy background class to the backend when using sigmoid\n        # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n        # BG cat_id: num_class\n        padding = results.scores.new_zeros(results.scores.shape[0], 1)\n        mlvl_scores = torch.cat([results.scores, padding], dim=1)\n\n        mlvl_nms_scores = (mlvl_scores * results.score_factors[:, None]).sqrt()\n        det_bboxes, det_labels = multiclass_nms(\n            results.bboxes,\n            mlvl_nms_scores,\n            cfg.score_thr,\n            cfg.nms,\n            cfg.max_per_img,\n            score_factors=None)\n        if self.with_score_voting and len(det_bboxes) > 0:\n            det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels,\n                                                       results.bboxes,\n                                                       mlvl_nms_scores,\n                                                       cfg.score_thr)\n        nms_results = InstanceData()\n        nms_results.bboxes = det_bboxes[:, :-1]\n        nms_results.scores = det_bboxes[:, -1]\n        nms_results.labels = det_labels\n        return nms_results\n\n    def score_voting(self, det_bboxes: Tensor, det_labels: Tensor,\n                     mlvl_bboxes: Tensor, mlvl_nms_scores: Tensor,\n                     score_thr: float) -> Tuple[Tensor, Tensor]:\n        \"\"\"Implementation of score voting method works on each remaining boxes\n        after NMS procedure.\n\n        Args:\n            det_bboxes (Tensor): Remaining boxes after NMS procedure,\n                with shape (k, 5), each dimension means\n                (x1, y1, x2, y2, score).\n            det_labels (Tensor): The label of remaining boxes, with shape\n                (k, 1),Labels are 0-based.\n            mlvl_bboxes (Tensor): All boxes before the NMS procedure,\n                with shape (num_anchors,4).\n            mlvl_nms_scores (Tensor): The scores of all boxes which is used\n                in the NMS procedure, with shape (num_anchors, num_class)\n            score_thr (float): The score threshold of bboxes.\n\n        Returns:\n            tuple: Usually returns a tuple containing voting results.\n\n                - det_bboxes_voted (Tensor): Remaining boxes after\n                    score voting procedure, with shape (k, 5), each\n                    dimension means (x1, y1, x2, y2, score).\n                - det_labels_voted (Tensor): Label of remaining bboxes\n                    after voting, with shape (num_anchors,).\n        \"\"\"\n        candidate_mask = mlvl_nms_scores > score_thr\n        candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False)\n        candidate_inds = candidate_mask_nonzeros[:, 0]\n        candidate_labels = candidate_mask_nonzeros[:, 1]\n        candidate_bboxes = mlvl_bboxes[candidate_inds]\n        candidate_scores = mlvl_nms_scores[candidate_mask]\n        det_bboxes_voted = []\n        det_labels_voted = []\n        for cls in range(self.cls_out_channels):\n            candidate_cls_mask = candidate_labels == cls\n            if not candidate_cls_mask.any():\n                continue\n            candidate_cls_scores = candidate_scores[candidate_cls_mask]\n            candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]\n            det_cls_mask = det_labels == cls\n            det_cls_bboxes = det_bboxes[det_cls_mask].view(\n                -1, det_bboxes.size(-1))\n            det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],\n                                               candidate_cls_bboxes)\n            for det_ind in range(len(det_cls_bboxes)):\n                single_det_ious = det_candidate_ious[det_ind]\n                pos_ious_mask = single_det_ious > 0.01\n                pos_ious = single_det_ious[pos_ious_mask]\n                pos_bboxes = candidate_cls_bboxes[pos_ious_mask]\n                pos_scores = candidate_cls_scores[pos_ious_mask]\n                pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *\n                       pos_scores)[:, None]\n                voted_box = torch.sum(\n                    pis * pos_bboxes, dim=0) / torch.sum(\n                        pis, dim=0)\n                voted_score = det_cls_bboxes[det_ind][-1:][None, :]\n                det_bboxes_voted.append(\n                    torch.cat((voted_box[None, :], voted_score), dim=1))\n                det_labels_voted.append(cls)\n\n        det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)\n        det_labels_voted = det_labels.new_tensor(det_labels_voted)\n        return det_bboxes_voted, det_labels_voted\n"
  },
  {
    "path": "mmdet/models/dense_heads/pisa_retinanet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import InstanceList, OptInstanceList\nfrom ..losses import carl_loss, isr_p\nfrom ..utils import images_to_levels\nfrom .retina_head import RetinaHead\n\n\n@MODELS.register_module()\nclass PISARetinaHead(RetinaHead):\n    \"\"\"PISA Retinanet Head.\n\n    The head owns the same structure with Retinanet Head, but differs in two\n        aspects:\n        1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n            change the positive loss weights.\n        2. Classification-aware regression loss is adopted as a third loss.\n    \"\"\"\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: Loss dict, comprise classification loss, regression loss and\n            carl loss.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            return_sampling_results=True)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor, sampling_results_list) = cls_reg_targets\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        num_imgs = len(batch_img_metas)\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_cls_scores = torch.cat(\n            flatten_cls_scores, dim=1).reshape(-1,\n                                               flatten_cls_scores[0].size(-1))\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_bbox_preds = torch.cat(\n            flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))\n        flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)\n        flatten_label_weights = torch.cat(\n            label_weights_list, dim=1).reshape(-1)\n        flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)\n        flatten_bbox_targets = torch.cat(\n            bbox_targets_list, dim=1).reshape(-1, 4)\n        flatten_bbox_weights = torch.cat(\n            bbox_weights_list, dim=1).reshape(-1, 4)\n\n        # Apply ISR-P\n        isr_cfg = self.train_cfg.get('isr', None)\n        if isr_cfg is not None:\n            all_targets = (flatten_labels, flatten_label_weights,\n                           flatten_bbox_targets, flatten_bbox_weights)\n            with torch.no_grad():\n                all_targets = isr_p(\n                    flatten_cls_scores,\n                    flatten_bbox_preds,\n                    all_targets,\n                    flatten_anchors,\n                    sampling_results_list,\n                    bbox_coder=self.bbox_coder,\n                    loss_cls=self.loss_cls,\n                    num_class=self.num_classes,\n                    **self.train_cfg['isr'])\n            (flatten_labels, flatten_label_weights, flatten_bbox_targets,\n             flatten_bbox_weights) = all_targets\n\n        # For convenience we compute loss once instead separating by fpn level,\n        # so that we don't need to separate the weights by level again.\n        # The result should be the same\n        losses_cls = self.loss_cls(\n            flatten_cls_scores,\n            flatten_labels,\n            flatten_label_weights,\n            avg_factor=avg_factor)\n        losses_bbox = self.loss_bbox(\n            flatten_bbox_preds,\n            flatten_bbox_targets,\n            flatten_bbox_weights,\n            avg_factor=avg_factor)\n        loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n        # CARL Loss\n        carl_cfg = self.train_cfg.get('carl', None)\n        if carl_cfg is not None:\n            loss_carl = carl_loss(\n                flatten_cls_scores,\n                flatten_labels,\n                flatten_bbox_preds,\n                flatten_bbox_targets,\n                self.loss_bbox,\n                **self.train_cfg['carl'],\n                avg_factor=avg_factor,\n                sigmoid=True,\n                num_class=self.num_classes)\n            loss_dict.update(loss_carl)\n\n        return loss_dict\n"
  },
  {
    "path": "mmdet/models/dense_heads/pisa_ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import InstanceList, OptInstanceList\nfrom ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p\nfrom ..utils import multi_apply\nfrom .ssd_head import SSDHead\n\n\n# TODO: add loss evaluator for SSD\n@MODELS.register_module()\nclass PISASSDHead(SSDHead):\n    \"\"\"Implementation of `PISA SSD head <https://arxiv.org/abs/1904.04821>`_\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (Sequence[int]): Number of channels in the input feature\n            map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Defaults to 0.\n        feat_channels (int): Number of hidden channels when stacked_convs\n            > 0. Defaults to 256.\n        use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n            Defaults to False.\n        conv_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct\n            and config conv layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct\n            and config norm layer. Defaults to None.\n        act_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct\n            and config activation layer. Defaults to None.\n        anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor\n            generator.\n        bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Defaults to False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of\n            anchor head.\n        test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of\n            anchor head.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], Optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Union[List[Tensor], Tensor]]:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Union[List[Tensor], Tensor]]: A dictionary of loss\n            components. the dict has components below:\n\n            - loss_cls (list[Tensor]): A list containing each feature map \\\n            classification loss.\n            - loss_bbox (list[Tensor]): A list containing each feature map \\\n            regression loss.\n            - loss_carl (Tensor): The loss of CARL.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            unmap_outputs=False,\n            return_sampling_results=True)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor, sampling_results_list) = cls_reg_targets\n\n        num_images = len(batch_img_metas)\n        all_cls_scores = torch.cat([\n            s.permute(0, 2, 3, 1).reshape(\n                num_images, -1, self.cls_out_channels) for s in cls_scores\n        ], 1)\n        all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n        all_label_weights = torch.cat(label_weights_list,\n                                      -1).view(num_images, -1)\n        all_bbox_preds = torch.cat([\n            b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n            for b in bbox_preds\n        ], -2)\n        all_bbox_targets = torch.cat(bbox_targets_list,\n                                     -2).view(num_images, -1, 4)\n        all_bbox_weights = torch.cat(bbox_weights_list,\n                                     -2).view(num_images, -1, 4)\n\n        # concat all level anchors to a single tensor\n        all_anchors = []\n        for i in range(num_images):\n            all_anchors.append(torch.cat(anchor_list[i]))\n\n        isr_cfg = self.train_cfg.get('isr', None)\n        all_targets = (all_labels.view(-1), all_label_weights.view(-1),\n                       all_bbox_targets.view(-1,\n                                             4), all_bbox_weights.view(-1, 4))\n        # apply ISR-P\n        if isr_cfg is not None:\n            all_targets = isr_p(\n                all_cls_scores.view(-1, all_cls_scores.size(-1)),\n                all_bbox_preds.view(-1, 4),\n                all_targets,\n                torch.cat(all_anchors),\n                sampling_results_list,\n                loss_cls=CrossEntropyLoss(),\n                bbox_coder=self.bbox_coder,\n                **self.train_cfg['isr'],\n                num_class=self.num_classes)\n            (new_labels, new_label_weights, new_bbox_targets,\n             new_bbox_weights) = all_targets\n            all_labels = new_labels.view(all_labels.shape)\n            all_label_weights = new_label_weights.view(all_label_weights.shape)\n            all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)\n            all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)\n\n        # add CARL loss\n        carl_loss_cfg = self.train_cfg.get('carl', None)\n        if carl_loss_cfg is not None:\n            loss_carl = carl_loss(\n                all_cls_scores.view(-1, all_cls_scores.size(-1)),\n                all_targets[0],\n                all_bbox_preds.view(-1, 4),\n                all_targets[2],\n                SmoothL1Loss(beta=1.),\n                **self.train_cfg['carl'],\n                avg_factor=avg_factor,\n                num_class=self.num_classes)\n\n        # check NaN and Inf\n        assert torch.isfinite(all_cls_scores).all().item(), \\\n            'classification scores become infinite or NaN!'\n        assert torch.isfinite(all_bbox_preds).all().item(), \\\n            'bbox predications become infinite or NaN!'\n\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_by_feat_single,\n            all_cls_scores,\n            all_bbox_preds,\n            all_anchors,\n            all_labels,\n            all_label_weights,\n            all_bbox_targets,\n            all_bbox_weights,\n            avg_factor=avg_factor)\n        loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n        if carl_loss_cfg is not None:\n            loss_dict.update(loss_carl)\n        return loss_dict\n"
  },
  {
    "path": "mmdet/models/dense_heads/reppoints_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Sequence, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import DeformConv2d\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import (filter_scores_and_topk, images_to_levels, multi_apply,\n                     unmap)\nfrom .anchor_free_head import AnchorFreeHead\n\n\n@MODELS.register_module()\nclass RepPointsHead(AnchorFreeHead):\n    \"\"\"RepPoint head.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        point_feat_channels (int): Number of channels of points features.\n        num_points (int): Number of points.\n        gradient_mul (float): The multiplier to gradients from\n            points refinement and recognition.\n        point_strides (Sequence[int]): points strides.\n        point_base_scale (int): bbox scale for assigning labels.\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox_init (:obj:`ConfigDict` or dict): Config of initial points\n            loss.\n        loss_bbox_refine (:obj:`ConfigDict` or dict): Config of points loss in\n            refinement.\n        use_grid_points (bool): If we use bounding box representation, the\n        reppoints is represented as grid points on the bounding box.\n        center_init (bool): Whether to use center point assignment.\n        transform_method (str): The methods to transform RepPoints to bbox.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 point_feat_channels: int = 256,\n                 num_points: int = 9,\n                 gradient_mul: float = 0.1,\n                 point_strides: Sequence[int] = [8, 16, 32, 64, 128],\n                 point_base_scale: int = 4,\n                 loss_cls: ConfigType = dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 loss_bbox_init: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),\n                 loss_bbox_refine: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n                 use_grid_points: bool = False,\n                 center_init: bool = True,\n                 transform_method: str = 'moment',\n                 moment_mul: float = 0.01,\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='reppoints_cls_out',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        self.num_points = num_points\n        self.point_feat_channels = point_feat_channels\n        self.use_grid_points = use_grid_points\n        self.center_init = center_init\n\n        # we use deform conv to extract points features\n        self.dcn_kernel = int(np.sqrt(num_points))\n        self.dcn_pad = int((self.dcn_kernel - 1) / 2)\n        assert self.dcn_kernel * self.dcn_kernel == num_points, \\\n            'The points number should be a square number.'\n        assert self.dcn_kernel % 2 == 1, \\\n            'The points number should be an odd square number.'\n        dcn_base = np.arange(-self.dcn_pad,\n                             self.dcn_pad + 1).astype(np.float64)\n        dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)\n        dcn_base_x = np.tile(dcn_base, self.dcn_kernel)\n        dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(\n            (-1))\n        self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)\n\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            loss_cls=loss_cls,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        self.gradient_mul = gradient_mul\n        self.point_base_scale = point_base_scale\n        self.point_strides = point_strides\n        self.prior_generator = MlvlPointGenerator(\n            self.point_strides, offset=0.)\n\n        if self.train_cfg:\n            self.init_assigner = TASK_UTILS.build(\n                self.train_cfg['init']['assigner'])\n            self.refine_assigner = TASK_UTILS.build(\n                self.train_cfg['refine']['assigner'])\n\n            if self.train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler(context=self)\n\n        self.transform_method = transform_method\n        if self.transform_method == 'moment':\n            self.moment_transfer = nn.Parameter(\n                data=torch.zeros(2), requires_grad=True)\n            self.moment_mul = moment_mul\n\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = self.num_classes\n        else:\n            self.cls_out_channels = self.num_classes + 1\n        self.loss_bbox_init = MODELS.build(loss_bbox_init)\n        self.loss_bbox_refine = MODELS.build(loss_bbox_refine)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points\n        self.reppoints_cls_conv = DeformConv2d(self.feat_channels,\n                                               self.point_feat_channels,\n                                               self.dcn_kernel, 1,\n                                               self.dcn_pad)\n        self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,\n                                           self.cls_out_channels, 1, 1, 0)\n        self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,\n                                                 self.point_feat_channels, 3,\n                                                 1, 1)\n        self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,\n                                                pts_out_dim, 1, 1, 0)\n        self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,\n                                                      self.point_feat_channels,\n                                                      self.dcn_kernel, 1,\n                                                      self.dcn_pad)\n        self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,\n                                                  pts_out_dim, 1, 1, 0)\n\n    def points2bbox(self, pts: Tensor, y_first: bool = True) -> Tensor:\n        \"\"\"Converting the points set into bounding box.\n\n        Args:\n            pts (Tensor): the input points sets (fields), each points\n                set (fields) is represented as 2n scalar.\n            y_first (bool): if y_first=True, the point set is\n                represented as [y1, x1, y2, x2 ... yn, xn], otherwise\n                the point set is represented as\n                [x1, y1, x2, y2 ... xn, yn]. Defaults to True.\n\n        Returns:\n            Tensor: each points set is converting to a bbox [x1, y1, x2, y2].\n        \"\"\"\n        pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])\n        pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,\n                                                                      ...]\n        pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,\n                                                                      ...]\n        if self.transform_method == 'minmax':\n            bbox_left = pts_x.min(dim=1, keepdim=True)[0]\n            bbox_right = pts_x.max(dim=1, keepdim=True)[0]\n            bbox_up = pts_y.min(dim=1, keepdim=True)[0]\n            bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]\n            bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],\n                             dim=1)\n        elif self.transform_method == 'partial_minmax':\n            pts_y = pts_y[:, :4, ...]\n            pts_x = pts_x[:, :4, ...]\n            bbox_left = pts_x.min(dim=1, keepdim=True)[0]\n            bbox_right = pts_x.max(dim=1, keepdim=True)[0]\n            bbox_up = pts_y.min(dim=1, keepdim=True)[0]\n            bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]\n            bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],\n                             dim=1)\n        elif self.transform_method == 'moment':\n            pts_y_mean = pts_y.mean(dim=1, keepdim=True)\n            pts_x_mean = pts_x.mean(dim=1, keepdim=True)\n            pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)\n            pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)\n            moment_transfer = (self.moment_transfer * self.moment_mul) + (\n                self.moment_transfer.detach() * (1 - self.moment_mul))\n            moment_width_transfer = moment_transfer[0]\n            moment_height_transfer = moment_transfer[1]\n            half_width = pts_x_std * torch.exp(moment_width_transfer)\n            half_height = pts_y_std * torch.exp(moment_height_transfer)\n            bbox = torch.cat([\n                pts_x_mean - half_width, pts_y_mean - half_height,\n                pts_x_mean + half_width, pts_y_mean + half_height\n            ],\n                             dim=1)\n        else:\n            raise NotImplementedError\n        return bbox\n\n    def gen_grid_from_reg(self, reg: Tensor,\n                          previous_boxes: Tensor) -> Tuple[Tensor]:\n        \"\"\"Base on the previous bboxes and regression values, we compute the\n        regressed bboxes and generate the grids on the bboxes.\n\n        Args:\n            reg (Tensor): the regression value to previous bboxes.\n            previous_boxes (Tensor): previous bboxes.\n\n        Returns:\n            Tuple[Tensor]: generate grids on the regressed bboxes.\n        \"\"\"\n        b, _, h, w = reg.shape\n        bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.\n        bwh = (previous_boxes[:, 2:, ...] -\n               previous_boxes[:, :2, ...]).clamp(min=1e-6)\n        grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(\n            reg[:, 2:, ...])\n        grid_wh = bwh * torch.exp(reg[:, 2:, ...])\n        grid_left = grid_topleft[:, [0], ...]\n        grid_top = grid_topleft[:, [1], ...]\n        grid_width = grid_wh[:, [0], ...]\n        grid_height = grid_wh[:, [1], ...]\n        intervel = torch.linspace(0., 1., self.dcn_kernel).view(\n            1, self.dcn_kernel, 1, 1).type_as(reg)\n        grid_x = grid_left + grid_width * intervel\n        grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)\n        grid_x = grid_x.view(b, -1, h, w)\n        grid_y = grid_top + grid_height * intervel\n        grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)\n        grid_y = grid_y.view(b, -1, h, w)\n        grid_yx = torch.stack([grid_y, grid_x], dim=2)\n        grid_yx = grid_yx.view(b, -1, h, w)\n        regressed_bbox = torch.cat([\n            grid_left, grid_top, grid_left + grid_width, grid_top + grid_height\n        ], 1)\n        return grid_yx, regressed_bbox\n\n    def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:\n        return multi_apply(self.forward_single, feats)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward feature map of a single FPN level.\"\"\"\n        dcn_base_offset = self.dcn_base_offset.type_as(x)\n        # If we use center_init, the initial reppoints is from center points.\n        # If we use bounding bbox representation, the initial reppoints is\n        #   from regular grid placed on a pre-defined bbox.\n        if self.use_grid_points or not self.center_init:\n            scale = self.point_base_scale / 2\n            points_init = dcn_base_offset / dcn_base_offset.max() * scale\n            bbox_init = x.new_tensor([-scale, -scale, scale,\n                                      scale]).view(1, 4, 1, 1)\n        else:\n            points_init = 0\n        cls_feat = x\n        pts_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            pts_feat = reg_conv(pts_feat)\n        # initialize reppoints\n        pts_out_init = self.reppoints_pts_init_out(\n            self.relu(self.reppoints_pts_init_conv(pts_feat)))\n        if self.use_grid_points:\n            pts_out_init, bbox_out_init = self.gen_grid_from_reg(\n                pts_out_init, bbox_init.detach())\n        else:\n            pts_out_init = pts_out_init + points_init\n        # refine and classify reppoints\n        pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(\n        ) + self.gradient_mul * pts_out_init\n        dcn_offset = pts_out_init_grad_mul - dcn_base_offset\n        cls_out = self.reppoints_cls_out(\n            self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))\n        pts_out_refine = self.reppoints_pts_refine_out(\n            self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))\n        if self.use_grid_points:\n            pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(\n                pts_out_refine, bbox_out_init.detach())\n        else:\n            pts_out_refine = pts_out_refine + pts_out_init.detach()\n\n        if self.training:\n            return cls_out, pts_out_init, pts_out_refine\n        else:\n            return cls_out, self.points2bbox(pts_out_refine)\n\n    def get_points(self, featmap_sizes: List[Tuple[int]],\n                   batch_img_metas: List[dict], device: str) -> tuple:\n        \"\"\"Get points according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            batch_img_metas (list[dict]): Image meta info.\n\n        Returns:\n            tuple: points of each image, valid flags of each image\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # points center for one time\n        multi_level_points = self.prior_generator.grid_priors(\n            featmap_sizes, device=device, with_stride=True)\n        points_list = [[point.clone() for point in multi_level_points]\n                       for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level grids\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            multi_level_flags = self.prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'], device=device)\n            valid_flag_list.append(multi_level_flags)\n\n        return points_list, valid_flag_list\n\n    def centers_to_bboxes(self, point_list: List[Tensor]) -> List[Tensor]:\n        \"\"\"Get bboxes according to center points.\n\n        Only used in :class:`MaxIoUAssigner`.\n        \"\"\"\n        bbox_list = []\n        for i_img, point in enumerate(point_list):\n            bbox = []\n            for i_lvl in range(len(self.point_strides)):\n                scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5\n                bbox_shift = torch.Tensor([-scale, -scale, scale,\n                                           scale]).view(1, 4).type_as(point[0])\n                bbox_center = torch.cat(\n                    [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)\n                bbox.append(bbox_center + bbox_shift)\n            bbox_list.append(bbox)\n        return bbox_list\n\n    def offset_to_pts(self, center_list: List[Tensor],\n                      pred_list: List[Tensor]) -> List[Tensor]:\n        \"\"\"Change from point offset to point coordinate.\"\"\"\n        pts_list = []\n        for i_lvl in range(len(self.point_strides)):\n            pts_lvl = []\n            for i_img in range(len(center_list)):\n                pts_center = center_list[i_img][i_lvl][:, :2].repeat(\n                    1, self.num_points)\n                pts_shift = pred_list[i_lvl][i_img]\n                yx_pts_shift = pts_shift.permute(1, 2, 0).view(\n                    -1, 2 * self.num_points)\n                y_pts_shift = yx_pts_shift[..., 0::2]\n                x_pts_shift = yx_pts_shift[..., 1::2]\n                xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)\n                xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)\n                pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center\n                pts_lvl.append(pts)\n            pts_lvl = torch.stack(pts_lvl, 0)\n            pts_list.append(pts_lvl)\n        return pts_list\n\n    def _get_targets_single(self,\n                            flat_proposals: Tensor,\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            gt_instances_ignore: InstanceData,\n                            stage: str = 'init',\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute corresponding GT box and classification targets for\n        proposals.\n\n        Args:\n            flat_proposals (Tensor): Multi level points of a image.\n            valid_flags (Tensor): Multi level valid flags of a image.\n            gt_instances (InstanceData): It usually includes ``bboxes`` and\n                ``labels`` attributes.\n            gt_instances_ignore (InstanceData): It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n            stage (str): 'init' or 'refine'. Generate target for\n                init stage or refine stage. Defaults to 'init'.\n            unmap_outputs (bool): Whether to map outputs back to\n                the original set of anchors. Defaults to True.\n\n        Returns:\n            tuple:\n\n                - labels (Tensor): Labels of each level.\n                - label_weights (Tensor): Label weights of each level.\n                - bbox_targets (Tensor): BBox targets of each level.\n                - bbox_weights (Tensor): BBox weights of each level.\n                - pos_inds (Tensor): positive samples indexes.\n                - neg_inds (Tensor): negative samples indexes.\n                - sampling_result (:obj:`SamplingResult`): Sampling results.\n        \"\"\"\n        inside_flags = valid_flags\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid proposal inside the image boundary. Please '\n                'check the image size.')\n        # assign gt and sample proposals\n        proposals = flat_proposals[inside_flags, :]\n        pred_instances = InstanceData(priors=proposals)\n\n        if stage == 'init':\n            assigner = self.init_assigner\n            pos_weight = self.train_cfg['init']['pos_weight']\n        else:\n            assigner = self.refine_assigner\n            pos_weight = self.train_cfg['refine']['pos_weight']\n\n        assign_result = assigner.assign(pred_instances, gt_instances,\n                                        gt_instances_ignore)\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_proposals = proposals.shape[0]\n        bbox_gt = proposals.new_zeros([num_valid_proposals, 4])\n        pos_proposals = torch.zeros_like(proposals)\n        proposals_weights = proposals.new_zeros([num_valid_proposals, 4])\n        labels = proposals.new_full((num_valid_proposals, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        label_weights = proposals.new_zeros(\n            num_valid_proposals, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            bbox_gt[pos_inds, :] = sampling_result.pos_gt_bboxes\n            pos_proposals[pos_inds, :] = proposals[pos_inds, :]\n            proposals_weights[pos_inds, :] = 1.0\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of proposals\n        if unmap_outputs:\n            num_total_proposals = flat_proposals.size(0)\n            labels = unmap(\n                labels,\n                num_total_proposals,\n                inside_flags,\n                fill=self.num_classes)  # fill bg label\n            label_weights = unmap(label_weights, num_total_proposals,\n                                  inside_flags)\n            bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)\n            pos_proposals = unmap(pos_proposals, num_total_proposals,\n                                  inside_flags)\n            proposals_weights = unmap(proposals_weights, num_total_proposals,\n                                      inside_flags)\n\n        return (labels, label_weights, bbox_gt, pos_proposals,\n                proposals_weights, pos_inds, neg_inds, sampling_result)\n\n    def get_targets(self,\n                    proposals_list: List[Tensor],\n                    valid_flag_list: List[Tensor],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    stage: str = 'init',\n                    unmap_outputs: bool = True,\n                    return_sampling_results: bool = False) -> tuple:\n        \"\"\"Compute corresponding GT box and classification targets for\n        proposals.\n\n        Args:\n            proposals_list (list[Tensor]): Multi level points/bboxes of each\n                image.\n            valid_flag_list (list[Tensor]): Multi level valid flags of each\n                image.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            stage (str): 'init' or 'refine'. Generate target for init stage or\n                refine stage.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n            return_sampling_results (bool): Whether to return the sampling\n                results. Defaults to False.\n\n        Returns:\n            tuple:\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_gt_list (list[Tensor]): Ground truth bbox of each level.\n                - proposals_list (list[Tensor]): Proposals(points/bboxes) of\n                  each level.\n                - proposal_weights_list (list[Tensor]): Proposal weights of\n                  each level.\n                - avg_factor (int): Average factor that is used to average\n                  the loss. When using sampling method, avg_factor is usually\n                  the sum of positive and negative priors. When using\n                  `PseudoSampler`, `avg_factor` is usually equal to the number\n                  of positive priors.\n        \"\"\"\n        assert stage in ['init', 'refine']\n        num_imgs = len(batch_img_metas)\n        assert len(proposals_list) == len(valid_flag_list) == num_imgs\n\n        # points number of multi levels\n        num_level_proposals = [points.size(0) for points in proposals_list[0]]\n\n        # concat all level points and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(proposals_list[i]) == len(valid_flag_list[i])\n            proposals_list[i] = torch.cat(proposals_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n\n        (all_labels, all_label_weights, all_bbox_gt, all_proposals,\n         all_proposal_weights, pos_inds_list, neg_inds_list,\n         sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             proposals_list,\n             valid_flag_list,\n             batch_gt_instances,\n             batch_gt_instances_ignore,\n             stage=stage,\n             unmap_outputs=unmap_outputs)\n\n        # sampled points of all images\n        avg_refactor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        labels_list = images_to_levels(all_labels, num_level_proposals)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_proposals)\n        bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)\n        proposals_list = images_to_levels(all_proposals, num_level_proposals)\n        proposal_weights_list = images_to_levels(all_proposal_weights,\n                                                 num_level_proposals)\n        res = (labels_list, label_weights_list, bbox_gt_list, proposals_list,\n               proposal_weights_list, avg_refactor)\n        if return_sampling_results:\n            res = res + (sampling_results_list, )\n\n        return res\n\n    def loss_by_feat_single(self, cls_score: Tensor, pts_pred_init: Tensor,\n                            pts_pred_refine: Tensor, labels: Tensor,\n                            label_weights, bbox_gt_init: Tensor,\n                            bbox_weights_init: Tensor, bbox_gt_refine: Tensor,\n                            bbox_weights_refine: Tensor, stride: int,\n                            avg_factor_init: int,\n                            avg_factor_refine: int) -> Tuple[Tensor]:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_classes, h_i, w_i).\n            pts_pred_init (Tensor): Points of shape\n                (batch_size, h_i * w_i, num_points * 2).\n            pts_pred_refine (Tensor): Points refined of shape\n                (batch_size, h_i * w_i, num_points * 2).\n            labels (Tensor): Ground truth class indices with shape\n                (batch_size, h_i * w_i).\n            label_weights (Tensor): Label weights of shape\n                (batch_size, h_i * w_i).\n            bbox_gt_init (Tensor): BBox regression targets in the init stage\n                of shape (batch_size, h_i * w_i, 4).\n            bbox_weights_init (Tensor): BBox regression loss weights in the\n                init stage of shape (batch_size, h_i * w_i, 4).\n            bbox_gt_refine (Tensor): BBox regression targets in the refine\n                stage of shape (batch_size, h_i * w_i, 4).\n            bbox_weights_refine (Tensor): BBox regression loss weights in the\n                refine stage of shape (batch_size, h_i * w_i, 4).\n            stride (int): Point stride.\n            avg_factor_init (int): Average factor that is used to average\n                the loss in the init stage.\n            avg_factor_refine (int): Average factor that is used to average\n                the loss in the refine stage.\n\n        Returns:\n            Tuple[Tensor]: loss components.\n        \"\"\"\n        # classification loss\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        cls_score = cls_score.contiguous()\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=avg_factor_refine)\n\n        # points loss\n        bbox_gt_init = bbox_gt_init.reshape(-1, 4)\n        bbox_weights_init = bbox_weights_init.reshape(-1, 4)\n        bbox_pred_init = self.points2bbox(\n            pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)\n        bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)\n        bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)\n        bbox_pred_refine = self.points2bbox(\n            pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)\n        normalize_term = self.point_base_scale * stride\n        loss_pts_init = self.loss_bbox_init(\n            bbox_pred_init / normalize_term,\n            bbox_gt_init / normalize_term,\n            bbox_weights_init,\n            avg_factor=avg_factor_init)\n        loss_pts_refine = self.loss_bbox_refine(\n            bbox_pred_refine / normalize_term,\n            bbox_gt_refine / normalize_term,\n            bbox_weights_refine,\n            avg_factor=avg_factor_refine)\n        return loss_cls, loss_pts_init, loss_pts_refine\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        pts_preds_init: List[Tensor],\n        pts_preds_refine: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, Tensor]:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, of shape (batch_size, num_classes, h, w).\n            pts_preds_init (list[Tensor]): Points for each scale level, each is\n                a 3D-tensor, of shape (batch_size, h_i * w_i, num_points * 2).\n            pts_preds_refine (list[Tensor]): Points refined for each scale\n                level, each is a 3D-tensor, of shape\n                (batch_size, h_i * w_i, num_points * 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        device = cls_scores[0].device\n\n        # target for initial stage\n        center_list, valid_flag_list = self.get_points(featmap_sizes,\n                                                       batch_img_metas, device)\n        pts_coordinate_preds_init = self.offset_to_pts(center_list,\n                                                       pts_preds_init)\n        if self.train_cfg['init']['assigner']['type'] == 'PointAssigner':\n            # Assign target for center list\n            candidate_list = center_list\n        else:\n            # transform center list to bbox list and\n            #   assign target for bbox list\n            bbox_list = self.centers_to_bboxes(center_list)\n            candidate_list = bbox_list\n        cls_reg_targets_init = self.get_targets(\n            proposals_list=candidate_list,\n            valid_flag_list=valid_flag_list,\n            batch_gt_instances=batch_gt_instances,\n            batch_img_metas=batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            stage='init',\n            return_sampling_results=False)\n        (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,\n         avg_factor_init) = cls_reg_targets_init\n\n        # target for refinement stage\n        center_list, valid_flag_list = self.get_points(featmap_sizes,\n                                                       batch_img_metas, device)\n        pts_coordinate_preds_refine = self.offset_to_pts(\n            center_list, pts_preds_refine)\n        bbox_list = []\n        for i_img, center in enumerate(center_list):\n            bbox = []\n            for i_lvl in range(len(pts_preds_refine)):\n                bbox_preds_init = self.points2bbox(\n                    pts_preds_init[i_lvl].detach())\n                bbox_shift = bbox_preds_init * self.point_strides[i_lvl]\n                bbox_center = torch.cat(\n                    [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)\n                bbox.append(bbox_center +\n                            bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))\n            bbox_list.append(bbox)\n        cls_reg_targets_refine = self.get_targets(\n            proposals_list=bbox_list,\n            valid_flag_list=valid_flag_list,\n            batch_gt_instances=batch_gt_instances,\n            batch_img_metas=batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            stage='refine',\n            return_sampling_results=False)\n        (labels_list, label_weights_list, bbox_gt_list_refine,\n         candidate_list_refine, bbox_weights_list_refine,\n         avg_factor_refine) = cls_reg_targets_refine\n\n        # compute loss\n        losses_cls, losses_pts_init, losses_pts_refine = multi_apply(\n            self.loss_by_feat_single,\n            cls_scores,\n            pts_coordinate_preds_init,\n            pts_coordinate_preds_refine,\n            labels_list,\n            label_weights_list,\n            bbox_gt_list_init,\n            bbox_weights_list_init,\n            bbox_gt_list_refine,\n            bbox_weights_list_refine,\n            self.point_strides,\n            avg_factor_init=avg_factor_init,\n            avg_factor_refine=avg_factor_refine)\n        loss_dict_all = {\n            'loss_cls': losses_cls,\n            'loss_pts_init': losses_pts_init,\n            'loss_pts_refine': losses_pts_refine\n        }\n        return loss_dict_all\n\n    # Same as base_dense_head/_get_bboxes_single except self._bbox_decode\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image. RepPoints head does not need\n                this value.\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 2).\n            img_meta (dict): Image meta info.\n            cfg (:obj:`ConfigDict`): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_score_list) == len(bbox_pred_list)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for level_idx, (cls_score, bbox_pred, priors) in enumerate(\n                zip(cls_score_list, bbox_pred_list, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, _, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            bboxes = self._bbox_decode(priors, bbox_pred,\n                                       self.point_strides[level_idx],\n                                       img_shape)\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bboxes)\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n    def _bbox_decode(self, points: Tensor, bbox_pred: Tensor, stride: int,\n                     max_shape: Tuple[int, int]) -> Tensor:\n        \"\"\"Decode the prediction to bounding box.\n\n        Args:\n            points (Tensor): shape (h_i * w_i, 2).\n            bbox_pred (Tensor): shape (h_i * w_i, 4).\n            stride (int): Stride for bbox_pred in different level.\n            max_shape (Tuple[int, int]): image shape.\n\n        Returns:\n            Tensor: Bounding boxes decoded.\n        \"\"\"\n        bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)\n        bboxes = bbox_pred * stride + bbox_pos_center\n        x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1])\n        y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0])\n        x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1])\n        y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0])\n        decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n        return decoded_bboxes\n"
  },
  {
    "path": "mmdet/models/dense_heads/retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.registry import MODELS\nfrom .anchor_head import AnchorHead\n\n\n@MODELS.register_module()\nclass RetinaHead(AnchorHead):\n    r\"\"\"An anchor-based head used in `RetinaNet\n    <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n    The head contains two subnetworks. The first classifies anchor boxes and\n    the second regresses deltas for the anchors.\n\n    Example:\n        >>> import torch\n        >>> self = RetinaHead(11, 7)\n        >>> x = torch.rand(1, 7, 32, 32)\n        >>> cls_score, bbox_pred = self.forward_single(x)\n        >>> # Each anchor predicts a score for each class except background\n        >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n        >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n        >>> assert cls_per_anchor == (self.num_classes)\n        >>> assert box_per_anchor == 4\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     octave_base_scale=4,\n                     scales_per_octave=3,\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[8, 16, 32, 64, 128]),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='retina_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        assert stacked_convs >= 0, \\\n            '`stacked_convs` must be non-negative integers, ' \\\n            f'but got {stacked_convs} instead.'\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super(RetinaHead, self).__init__(\n            num_classes,\n            in_channels,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        in_channels = self.in_channels\n        for i in range(self.stacked_convs):\n            self.cls_convs.append(\n                ConvModule(\n                    in_channels,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    in_channels,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            in_channels = self.feat_channels\n        self.retina_cls = nn.Conv2d(\n            in_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        reg_dim = self.bbox_coder.encode_size\n        self.retina_reg = nn.Conv2d(\n            in_channels, self.num_base_priors * reg_dim, 3, padding=1)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level\n                    the channels number is num_anchors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale\n                    level, the channels number is num_anchors * 4.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.retina_cls(cls_feat)\n        bbox_pred = self.retina_reg(reg_feat)\n        return cls_score, bbox_pred\n"
  },
  {
    "path": "mmdet/models/dense_heads/retina_sepbn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import bias_init_with_prob, normal_init\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\nfrom .anchor_head import AnchorHead\n\n\n@MODELS.register_module()\nclass RetinaSepBNHead(AnchorHead):\n    \"\"\"\"RetinaHead with separate BN.\n\n    In RetinaHead, conv/norm layers are shared across different FPN levels,\n    while in RetinaSepBNHead, conv layers are shared across different FPN\n    levels, but BN layers are separated.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 num_ins: int,\n                 in_channels: int,\n                 stacked_convs: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.num_ins = num_ins\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.num_ins):\n            cls_convs = nn.ModuleList()\n            reg_convs = nn.ModuleList()\n            for j in range(self.stacked_convs):\n                chn = self.in_channels if j == 0 else self.feat_channels\n                cls_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n                reg_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n            self.cls_convs.append(cls_convs)\n            self.reg_convs.append(reg_convs)\n        for i in range(self.stacked_convs):\n            for j in range(1, self.num_ins):\n                self.cls_convs[j][i].conv = self.cls_convs[0][i].conv\n                self.reg_convs[j][i].conv = self.reg_convs[0][i].conv\n        self.retina_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.retina_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        super().init_weights()\n        for m in self.cls_convs[0]:\n            normal_init(m.conv, std=0.01)\n        for m in self.reg_convs[0]:\n            normal_init(m.conv, std=0.01)\n        bias_cls = bias_init_with_prob(0.01)\n        normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n        normal_init(self.retina_reg, std=0.01)\n\n    def forward(self, feats: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n\n                - cls_scores (list[Tensor]): Classification scores for all\n                  scale levels, each is a 4D-tensor, the channels number is\n                  num_anchors * num_classes.\n                - bbox_preds (list[Tensor]): Box energies / deltas for all\n                  scale levels, each is a 4D-tensor, the channels number is\n                  num_anchors * 4.\n        \"\"\"\n        cls_scores = []\n        bbox_preds = []\n        for i, x in enumerate(feats):\n            cls_feat = feats[i]\n            reg_feat = feats[i]\n            for cls_conv in self.cls_convs[i]:\n                cls_feat = cls_conv(cls_feat)\n            for reg_conv in self.reg_convs[i]:\n                reg_feat = reg_conv(reg_feat)\n            cls_score = self.retina_cls(cls_feat)\n            bbox_pred = self.retina_reg(reg_feat)\n            cls_scores.append(cls_score)\n            bbox_preds.append(bbox_pred)\n        return cls_scores, bbox_preds\n"
  },
  {
    "path": "mmdet/models/dense_heads/rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import batched_nms\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import (cat_boxes, empty_box_as, get_box_tensor,\n                                   get_box_wh, scale_boxes)\nfrom mmdet.utils import InstanceList, MultiConfig, OptInstanceList\nfrom .anchor_head import AnchorHead\n\n\n@MODELS.register_module()\nclass RPNHead(AnchorHead):\n    \"\"\"Implementation of RPN head.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        num_classes (int): Number of categories excluding the background\n            category. Defaults to 1.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \\\n            list[dict]): Initialization config dict.\n        num_convs (int): Number of convolution layers in the head.\n            Defaults to 1.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 in_channels: int,\n                 num_classes: int = 1,\n                 init_cfg: MultiConfig = dict(\n                     type='Normal', layer='Conv2d', std=0.01),\n                 num_convs: int = 1,\n                 **kwargs) -> None:\n        self.num_convs = num_convs\n        assert num_classes == 1\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        if self.num_convs > 1:\n            rpn_convs = []\n            for i in range(self.num_convs):\n                if i == 0:\n                    in_channels = self.in_channels\n                else:\n                    in_channels = self.feat_channels\n                # use ``inplace=False`` to avoid error: one of the variables\n                # needed for gradient computation has been modified by an\n                # inplace operation.\n                rpn_convs.append(\n                    ConvModule(\n                        in_channels,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        inplace=False))\n            self.rpn_conv = nn.Sequential(*rpn_convs)\n        else:\n            self.rpn_conv = nn.Conv2d(\n                self.in_channels, self.feat_channels, 3, padding=1)\n        self.rpn_cls = nn.Conv2d(self.feat_channels,\n                                 self.num_base_priors * self.cls_out_channels,\n                                 1)\n        reg_dim = self.bbox_coder.encode_size\n        self.rpn_reg = nn.Conv2d(self.feat_channels,\n                                 self.num_base_priors * reg_dim, 1)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level \\\n                    the channels number is num_base_priors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale \\\n                    level, the channels number is num_base_priors * 4.\n        \"\"\"\n        x = self.rpn_conv(x)\n        x = F.relu(x)\n        rpn_cls_score = self.rpn_cls(x)\n        rpn_bbox_pred = self.rpn_reg(x)\n        return rpn_cls_score, rpn_bbox_pred\n\n    def loss_by_feat(self,\n                     cls_scores: List[Tensor],\n                     bbox_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict],\n                     batch_gt_instances_ignore: OptInstanceList = None) \\\n            -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            batch_gt_instances (list[obj:InstanceData]): Batch of gt_instance.\n                It usually includes ``bboxes`` and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[obj:InstanceData], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        losses = super().loss_by_feat(\n            cls_scores,\n            bbox_preds,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        return dict(\n            loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Be compatible with\n                BaseDenseHead. Not used in RPNHead.\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (ConfigDict, optional): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bbox_preds = []\n        mlvl_valid_priors = []\n        mlvl_scores = []\n        level_ids = []\n        for level_idx, (cls_score, bbox_pred, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            reg_dim = self.bbox_coder.encode_size\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, reg_dim)\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                # remind that we set FG labels to [0] since mmdet v2.0\n                # BG cat_id: 1\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            scores = torch.squeeze(scores)\n            if 0 < nms_pre < scores.shape[0]:\n                # sort is faster than topk\n                # _, topk_inds = scores.topk(cfg.nms_pre)\n                ranked_scores, rank_inds = scores.sort(descending=True)\n                topk_inds = rank_inds[:nms_pre]\n                scores = ranked_scores[:nms_pre]\n                bbox_pred = bbox_pred[topk_inds, :]\n                priors = priors[topk_inds]\n\n            mlvl_bbox_preds.append(bbox_pred)\n            mlvl_valid_priors.append(priors)\n            mlvl_scores.append(scores)\n\n            # use level id to implement the separate level nms\n            level_ids.append(\n                scores.new_full((scores.size(0), ),\n                                level_idx,\n                                dtype=torch.long))\n\n        bbox_pred = torch.cat(mlvl_bbox_preds)\n        priors = cat_boxes(mlvl_valid_priors)\n        bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape)\n\n        results = InstanceData()\n        results.bboxes = bboxes\n        results.scores = torch.cat(mlvl_scores)\n        results.level_ids = torch.cat(level_ids)\n\n        return self._bbox_post_process(\n            results=results, cfg=cfg, rescale=rescale, img_meta=img_meta)\n\n    def _bbox_post_process(self,\n                           results: InstanceData,\n                           cfg: ConfigDict,\n                           rescale: bool = False,\n                           with_nms: bool = True,\n                           img_meta: Optional[dict] = None) -> InstanceData:\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation.\n\n        Args:\n            results (:obj:`InstaceData`): Detection instance results,\n                each item has shape (num_bboxes, ).\n            cfg (ConfigDict): Test / postprocessing configuration.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default to True.\n            img_meta (dict, optional): Image meta info. Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert with_nms, '`with_nms` must be True in RPNHead'\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = [1 / s for s in img_meta['scale_factor']]\n            results.bboxes = scale_boxes(results.bboxes, scale_factor)\n\n        # filter small size bboxes\n        if cfg.get('min_bbox_size', -1) >= 0:\n            w, h = get_box_wh(results.bboxes)\n            valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n            if not valid_mask.all():\n                results = results[valid_mask]\n\n        if results.bboxes.numel() > 0:\n            bboxes = get_box_tensor(results.bboxes)\n            det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,\n                                                results.level_ids, cfg.nms)\n            results = results[keep_idxs]\n            # some nms would reweight the score, such as softnms\n            results.scores = det_bboxes[:, -1]\n            results = results[:cfg.max_per_img]\n            # TODO: This would unreasonably show the 0th class label\n            #  in visualization\n            results.labels = results.scores.new_zeros(\n                len(results), dtype=torch.long)\n            del results.level_ids\n        else:\n            # To avoid some potential error\n            results_ = InstanceData()\n            results_.bboxes = empty_box_as(results.bboxes)\n            results_.scores = results.scores.new_zeros(0)\n            results_.labels = results.scores.new_zeros(0)\n            results = results_\n        return results\n"
  },
  {
    "path": "mmdet/models/dense_heads/rtmdet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, Scale, is_norm\nfrom mmengine.model import bias_init_with_prob, constant_init, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import distance2bbox\nfrom mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean\nfrom ..layers.transformer import inverse_sigmoid\nfrom ..task_modules import anchor_inside_flags\nfrom ..utils import (images_to_levels, multi_apply, sigmoid_geometric_mean,\n                     unmap)\nfrom .atss_head import ATSSHead\n\n\n@MODELS.register_module()\nclass RTMDetHead(ATSSHead):\n    \"\"\"Detection Head of RTMDet.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        with_objectness (bool): Whether to add an objectness branch.\n            Defaults to True.\n        act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.\n            Default: dict(type='ReLU')\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 with_objectness: bool = True,\n                 act_cfg: ConfigType = dict(type='ReLU'),\n                 **kwargs) -> None:\n        self.act_cfg = act_cfg\n        self.with_objectness = with_objectness\n        super().__init__(num_classes, in_channels, **kwargs)\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg))\n        pred_pad_size = self.pred_kernel_size // 2\n        self.rtm_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.rtm_reg = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * 4,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        if self.with_objectness:\n            self.rtm_obj = nn.Conv2d(\n                self.feat_channels,\n                1,\n                self.pred_kernel_size,\n                padding=pred_pad_size)\n\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n        bias_cls = bias_init_with_prob(0.01)\n        normal_init(self.rtm_cls, std=0.01, bias=bias_cls)\n        normal_init(self.rtm_reg, std=0.01)\n        if self.with_objectness:\n            normal_init(self.rtm_obj, std=0.01, bias=bias_cls)\n\n    def forward(self, feats: Tuple[Tensor, ...]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n            - cls_scores (list[Tensor]): Classification scores for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_base_priors * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_base_priors * 4.\n        \"\"\"\n\n        cls_scores = []\n        bbox_preds = []\n        for idx, (x, scale, stride) in enumerate(\n                zip(feats, self.scales, self.prior_generator.strides)):\n            cls_feat = x\n            reg_feat = x\n\n            for cls_layer in self.cls_convs:\n                cls_feat = cls_layer(cls_feat)\n            cls_score = self.rtm_cls(cls_feat)\n\n            for reg_layer in self.reg_convs:\n                reg_feat = reg_layer(reg_feat)\n\n            if self.with_objectness:\n                objectness = self.rtm_obj(reg_feat)\n                cls_score = inverse_sigmoid(\n                    sigmoid_geometric_mean(cls_score, objectness))\n\n            reg_dist = scale(self.rtm_reg(reg_feat).exp()).float() * stride[0]\n\n            cls_scores.append(cls_score)\n            bbox_preds.append(reg_dist)\n        return tuple(cls_scores), tuple(bbox_preds)\n\n    def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                            labels: Tensor, label_weights: Tensor,\n                            bbox_targets: Tensor, assign_metrics: Tensor,\n                            stride: List[int]):\n        \"\"\"Compute loss of a single scale level.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Decoded bboxes for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors).\n            bbox_targets (Tensor): BBox regression targets of each anchor with\n                shape (N, num_total_anchors, 4).\n            assign_metrics (Tensor): Assign metrics with shape\n                (N, num_total_anchors).\n            stride (List[int]): Downsample stride of the feature map.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        bbox_pred = bbox_pred.reshape(-1, 4)\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        assign_metrics = assign_metrics.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        targets = (labels, assign_metrics)\n\n        loss_cls = self.loss_cls(\n            cls_score, targets, label_weights, avg_factor=1.0)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n\n            pos_decode_bbox_pred = pos_bbox_pred\n            pos_decode_bbox_targets = pos_bbox_targets\n\n            # regression loss\n            pos_bbox_weight = assign_metrics[pos_inds]\n\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=pos_bbox_weight,\n                avg_factor=1.0)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            pos_bbox_weight = bbox_targets.new_tensor(0.)\n\n        return loss_cls, loss_bbox, assign_metrics.sum(), pos_bbox_weight.sum()\n\n    def loss_by_feat(self,\n                     cls_scores: List[Tensor],\n                     bbox_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict],\n                     batch_gt_instances_ignore: OptInstanceList = None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Decoded box for each scale\n                level with shape (N, num_anchors * 4, H, W) in\n                [tl_x, tl_y, br_x, br_y] format.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        flatten_cls_scores = torch.cat([\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.cls_out_channels)\n            for cls_score in cls_scores\n        ], 1)\n        decoded_bboxes = []\n        for anchor, bbox_pred in zip(anchor_list[0], bbox_preds):\n            anchor = anchor.reshape(-1, 4)\n            bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            bbox_pred = distance2bbox(anchor, bbox_pred)\n            decoded_bboxes.append(bbox_pred)\n\n        flatten_bboxes = torch.cat(decoded_bboxes, 1)\n\n        cls_reg_targets = self.get_targets(\n            flatten_cls_scores,\n            flatten_bboxes,\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         assign_metrics_list, sampling_results_list) = cls_reg_targets\n\n        losses_cls, losses_bbox,\\\n            cls_avg_factors, bbox_avg_factors = multi_apply(\n                self.loss_by_feat_single,\n                cls_scores,\n                decoded_bboxes,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                assign_metrics_list,\n                self.prior_generator.strides)\n\n        cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()\n        losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))\n\n        bbox_avg_factor = reduce_mean(\n            sum(bbox_avg_factors)).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n    def get_targets(self,\n                    cls_scores: Tensor,\n                    bbox_preds: Tensor,\n                    anchor_list: List[List[Tensor]],\n                    valid_flag_list: List[List[Tensor]],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            cls_scores (Tensor): Classification predictions of images,\n                a 3D-Tensor with shape [num_imgs, num_priors, num_classes].\n            bbox_preds (Tensor): Decoded bboxes predictions of one image,\n                a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x,\n                tl_y, br_x, br_y] format.\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n\n        Returns:\n            tuple: a tuple containing learning targets.\n\n            - anchors_list (list[list[Tensor]]): Anchors of each level.\n            - labels_list (list[Tensor]): Labels of each level.\n            - label_weights_list (list[Tensor]): Label weights of each\n              level.\n            - bbox_targets_list (list[Tensor]): BBox targets of each level.\n            - assign_metrics_list (list[Tensor]): alignment metrics of each\n              level.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n        # anchor_list: list(b * [-1, 4])\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_assign_metrics, sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             cls_scores.detach(),\n             bbox_preds.detach(),\n             anchor_list,\n             valid_flag_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             unmap_outputs=unmap_outputs)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        assign_metrics_list = images_to_levels(all_assign_metrics,\n                                               num_level_anchors)\n\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, assign_metrics_list, sampling_results_list)\n\n    def _get_targets_single(self,\n                            cls_scores: Tensor,\n                            bbox_preds: Tensor,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs=True):\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            cls_scores (list(Tensor)): Box scores for each image.\n            bbox_preds (list(Tensor)): Box energies / deltas for each image.\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors ,4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n\n            - anchors (Tensor): All anchors in the image with shape (N, 4).\n            - labels (Tensor): Labels of all anchors in the image with shape\n              (N,).\n            - label_weights (Tensor): Label weights of all anchor in the\n              image with shape (N,).\n            - bbox_targets (Tensor): BBox targets of all anchors in the\n              image with shape (N, 4).\n            - norm_alignment_metrics (Tensor): Normalized alignment metrics\n              of all priors in the image with shape (N,).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        pred_instances = InstanceData(\n            scores=cls_scores[inside_flags, :],\n            bboxes=bbox_preds[inside_flags, :],\n            priors=anchors)\n\n        assign_result = self.assigner.assign(pred_instances, gt_instances,\n                                             gt_instances_ignore)\n\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n        assign_metrics = anchors.new_zeros(\n            num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            # point-based\n            pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        class_assigned_gt_inds = torch.unique(\n            sampling_result.pos_assigned_gt_inds)\n        for gt_inds in class_assigned_gt_inds:\n            gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds ==\n                                     gt_inds]\n            assign_metrics[gt_class_inds] = assign_result.max_overlaps[\n                gt_class_inds]\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            assign_metrics = unmap(assign_metrics, num_total_anchors,\n                                   inside_flags)\n        return (anchors, labels, label_weights, bbox_targets, assign_metrics,\n                sampling_result)\n\n    def get_anchors(self,\n                    featmap_sizes: List[tuple],\n                    batch_img_metas: List[dict],\n                    device: Union[torch.device, str] = 'cuda') \\\n            -> Tuple[List[List[Tensor]], List[List[Tensor]]]:\n        \"\"\"Get anchors according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            batch_img_metas (list[dict]): Image meta info.\n            device (torch.device or str): Device for returned tensors.\n                Defaults to cuda.\n\n        Returns:\n            tuple:\n\n            - anchor_list (list[list[Tensor]]): Anchors of each image.\n            - valid_flag_list (list[list[Tensor]]): Valid flags of each\n              image.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # anchors for one time\n        multi_level_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device, with_stride=True)\n        anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level anchors\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            multi_level_flags = self.prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'], device)\n            valid_flag_list.append(multi_level_flags)\n        return anchor_list, valid_flag_list\n\n\n@MODELS.register_module()\nclass RTMDetSepBNHead(RTMDetHead):\n    \"\"\"RTMDetHead with separated BN layers and shared conv layers.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        share_conv (bool): Whether to share conv layers between stages.\n            Defaults to True.\n        use_depthwise (bool): Whether to use depthwise separable convolution in\n            head. Defaults to False.\n        norm_cfg (:obj:`ConfigDict` or dict)): Config dict for normalization\n            layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).\n        act_cfg (:obj:`ConfigDict` or dict)): Config dict for activation layer.\n            Defaults to dict(type='SiLU').\n        pred_kernel_size (int): Kernel size of prediction layer. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 share_conv: bool = True,\n                 use_depthwise: bool = False,\n                 norm_cfg: ConfigType = dict(\n                     type='BN', momentum=0.03, eps=0.001),\n                 act_cfg: ConfigType = dict(type='SiLU'),\n                 pred_kernel_size: int = 1,\n                 exp_on_reg=False,\n                 **kwargs) -> None:\n        self.share_conv = share_conv\n        self.exp_on_reg = exp_on_reg\n        self.use_depthwise = use_depthwise\n        super().__init__(\n            num_classes,\n            in_channels,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg,\n            pred_kernel_size=pred_kernel_size,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        conv = DepthwiseSeparableConvModule \\\n            if self.use_depthwise else ConvModule\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n\n        self.rtm_cls = nn.ModuleList()\n        self.rtm_reg = nn.ModuleList()\n        if self.with_objectness:\n            self.rtm_obj = nn.ModuleList()\n        for n in range(len(self.prior_generator.strides)):\n            cls_convs = nn.ModuleList()\n            reg_convs = nn.ModuleList()\n            for i in range(self.stacked_convs):\n                chn = self.in_channels if i == 0 else self.feat_channels\n                cls_convs.append(\n                    conv(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                reg_convs.append(\n                    conv(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n            self.cls_convs.append(cls_convs)\n            self.reg_convs.append(reg_convs)\n\n            self.rtm_cls.append(\n                nn.Conv2d(\n                    self.feat_channels,\n                    self.num_base_priors * self.cls_out_channels,\n                    self.pred_kernel_size,\n                    padding=self.pred_kernel_size // 2))\n            self.rtm_reg.append(\n                nn.Conv2d(\n                    self.feat_channels,\n                    self.num_base_priors * 4,\n                    self.pred_kernel_size,\n                    padding=self.pred_kernel_size // 2))\n            if self.with_objectness:\n                self.rtm_obj.append(\n                    nn.Conv2d(\n                        self.feat_channels,\n                        1,\n                        self.pred_kernel_size,\n                        padding=self.pred_kernel_size // 2))\n\n        if self.share_conv:\n            for n in range(len(self.prior_generator.strides)):\n                for i in range(self.stacked_convs):\n                    self.cls_convs[n][i].conv = self.cls_convs[0][i].conv\n                    self.reg_convs[n][i].conv = self.reg_convs[0][i].conv\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n        bias_cls = bias_init_with_prob(0.01)\n        for rtm_cls, rtm_reg in zip(self.rtm_cls, self.rtm_reg):\n            normal_init(rtm_cls, std=0.01, bias=bias_cls)\n            normal_init(rtm_reg, std=0.01)\n        if self.with_objectness:\n            for rtm_obj in self.rtm_obj:\n                normal_init(rtm_obj, std=0.01, bias=bias_cls)\n\n    def forward(self, feats: Tuple[Tensor, ...]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n\n            - cls_scores (tuple[Tensor]): Classification scores for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_anchors * num_classes.\n            - bbox_preds (tuple[Tensor]): Box energies / deltas for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_anchors * 4.\n        \"\"\"\n\n        cls_scores = []\n        bbox_preds = []\n        for idx, (x, stride) in enumerate(\n                zip(feats, self.prior_generator.strides)):\n            cls_feat = x\n            reg_feat = x\n\n            for cls_layer in self.cls_convs[idx]:\n                cls_feat = cls_layer(cls_feat)\n            cls_score = self.rtm_cls[idx](cls_feat)\n\n            for reg_layer in self.reg_convs[idx]:\n                reg_feat = reg_layer(reg_feat)\n\n            if self.with_objectness:\n                objectness = self.rtm_obj[idx](reg_feat)\n                cls_score = inverse_sigmoid(\n                    sigmoid_geometric_mean(cls_score, objectness))\n            if self.exp_on_reg:\n                reg_dist = self.rtm_reg[idx](reg_feat).exp() * stride[0]\n            else:\n                reg_dist = self.rtm_reg[idx](reg_feat) * stride[0]\n            cls_scores.append(cls_score)\n            bbox_preds.append(reg_dist)\n        return tuple(cls_scores), tuple(bbox_preds)\n"
  },
  {
    "path": "mmdet/models/dense_heads/rtmdet_ins_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport math\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, is_norm\nfrom mmcv.ops import batched_nms\nfrom mmengine.model import (BaseModule, bias_init_with_prob, constant_init,\n                            normal_init)\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.layers.transformer import inverse_sigmoid\nfrom mmdet.models.utils import (filter_scores_and_topk, multi_apply,\n                                select_single_mlvl, sigmoid_geometric_mean)\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import (cat_boxes, distance2bbox, get_box_tensor,\n                                   get_box_wh, scale_boxes)\nfrom mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean\nfrom .rtmdet_head import RTMDetHead\n\n\n@MODELS.register_module()\nclass RTMDetInsHead(RTMDetHead):\n    \"\"\"Detection Head of RTMDet-Ins.\n\n    Args:\n        num_prototypes (int): Number of mask prototype features extracted\n            from the mask head. Defaults to 8.\n        dyconv_channels (int): Channel of the dynamic conv layers.\n            Defaults to 8.\n        num_dyconvs (int): Number of the dynamic convolution layers.\n            Defaults to 3.\n        mask_loss_stride (int): Down sample stride of the masks for loss\n            computation. Defaults to 4.\n        loss_mask (:obj:`ConfigDict` or dict): Config dict for mask loss.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 num_prototypes: int = 8,\n                 dyconv_channels: int = 8,\n                 num_dyconvs: int = 3,\n                 mask_loss_stride: int = 4,\n                 loss_mask=dict(\n                     type='DiceLoss',\n                     loss_weight=2.0,\n                     eps=5e-6,\n                     reduction='mean'),\n                 **kwargs) -> None:\n        self.num_prototypes = num_prototypes\n        self.num_dyconvs = num_dyconvs\n        self.dyconv_channels = dyconv_channels\n        self.mask_loss_stride = mask_loss_stride\n        super().__init__(*args, **kwargs)\n        self.loss_mask = MODELS.build(loss_mask)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        super()._init_layers()\n        # a branch to predict kernels of dynamic convs\n        self.kernel_convs = nn.ModuleList()\n        # calculate num dynamic parameters\n        weight_nums, bias_nums = [], []\n        for i in range(self.num_dyconvs):\n            if i == 0:\n                weight_nums.append(\n                    # mask prototype and coordinate features\n                    (self.num_prototypes + 2) * self.dyconv_channels)\n                bias_nums.append(self.dyconv_channels * 1)\n            elif i == self.num_dyconvs - 1:\n                weight_nums.append(self.dyconv_channels * 1)\n                bias_nums.append(1)\n            else:\n                weight_nums.append(self.dyconv_channels * self.dyconv_channels)\n                bias_nums.append(self.dyconv_channels * 1)\n        self.weight_nums = weight_nums\n        self.bias_nums = bias_nums\n        self.num_gen_params = sum(weight_nums) + sum(bias_nums)\n\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.kernel_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg))\n        pred_pad_size = self.pred_kernel_size // 2\n        self.rtm_kernel = nn.Conv2d(\n            self.feat_channels,\n            self.num_gen_params,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.mask_head = MaskFeatModule(\n            in_channels=self.in_channels,\n            feat_channels=self.feat_channels,\n            stacked_convs=4,\n            num_levels=len(self.prior_generator.strides),\n            num_prototypes=self.num_prototypes,\n            act_cfg=self.act_cfg,\n            norm_cfg=self.norm_cfg)\n\n    def forward(self, feats: Tuple[Tensor, ...]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n            - cls_scores (list[Tensor]): Classification scores for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_base_priors * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_base_priors * 4.\n            - kernel_preds (list[Tensor]): Dynamic conv kernels for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_gen_params.\n            - mask_feat (Tensor): Output feature of the mask head. Each is a\n              4D-tensor, the channels number is num_prototypes.\n        \"\"\"\n        mask_feat = self.mask_head(feats)\n\n        cls_scores = []\n        bbox_preds = []\n        kernel_preds = []\n        for idx, (x, scale, stride) in enumerate(\n                zip(feats, self.scales, self.prior_generator.strides)):\n            cls_feat = x\n            reg_feat = x\n            kernel_feat = x\n\n            for cls_layer in self.cls_convs:\n                cls_feat = cls_layer(cls_feat)\n            cls_score = self.rtm_cls(cls_feat)\n\n            for kernel_layer in self.kernel_convs:\n                kernel_feat = kernel_layer(kernel_feat)\n            kernel_pred = self.rtm_kernel(kernel_feat)\n\n            for reg_layer in self.reg_convs:\n                reg_feat = reg_layer(reg_feat)\n\n            if self.with_objectness:\n                objectness = self.rtm_obj(reg_feat)\n                cls_score = inverse_sigmoid(\n                    sigmoid_geometric_mean(cls_score, objectness))\n\n            reg_dist = scale(self.rtm_reg(reg_feat)) * stride[0]\n\n            cls_scores.append(cls_score)\n            bbox_preds.append(reg_dist)\n            kernel_preds.append(kernel_pred)\n        return tuple(cls_scores), tuple(bbox_preds), tuple(\n            kernel_preds), mask_feat\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        kernel_preds: List[Tensor],\n                        mask_feat: Tensor,\n                        score_factors: Optional[List[Tensor]] = None,\n                        batch_img_metas: Optional[List[dict]] = None,\n                        cfg: Optional[ConfigType] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Note: When score_factors is not None, the cls_scores are\n        usually multiplied by it then obtain the real score used in NMS,\n        such as CenterNess in FCOS, IoU branch in ATSS.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            kernel_preds (list[Tensor]): Kernel predictions of dynamic\n                convs for all scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_params, H, W).\n            mask_feat (Tensor): Mask prototype features extracted from the\n                mask head, has shape (batch_size, num_prototypes, H, W).\n            score_factors (list[Tensor], optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 1, H, W). Defaults to None.\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n                Defaults to None.\n            cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, h, w).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        if score_factors is None:\n            # e.g. Retina, FreeAnchor, Foveabox, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, AutoAssign, etc.\n            with_score_factors = True\n            assert len(cls_scores) == len(score_factors)\n\n        num_levels = len(cls_scores)\n\n        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device,\n            with_stride=True)\n\n        result_list = []\n\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            cls_score_list = select_single_mlvl(\n                cls_scores, img_id, detach=True)\n            bbox_pred_list = select_single_mlvl(\n                bbox_preds, img_id, detach=True)\n            kernel_pred_list = select_single_mlvl(\n                kernel_preds, img_id, detach=True)\n            if with_score_factors:\n                score_factor_list = select_single_mlvl(\n                    score_factors, img_id, detach=True)\n            else:\n                score_factor_list = [None for _ in range(num_levels)]\n\n            results = self._predict_by_feat_single(\n                cls_score_list=cls_score_list,\n                bbox_pred_list=bbox_pred_list,\n                kernel_pred_list=kernel_pred_list,\n                mask_feat=mask_feat[img_id],\n                score_factor_list=score_factor_list,\n                mlvl_priors=mlvl_priors,\n                img_meta=img_meta,\n                cfg=cfg,\n                rescale=rescale,\n                with_nms=with_nms)\n            result_list.append(results)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                kernel_pred_list: List[Tensor],\n                                mask_feat: Tensor,\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigType,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox and mask results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            kernel_preds (list[Tensor]): Kernel predictions of dynamic\n                convs for all scale levels of a single image, each is a\n                4D-tensor, has shape (num_params, H, W).\n            mask_feat (Tensor): Mask prototype features of a single image\n                extracted from the mask head, has shape (num_prototypes, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmengine.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, h, w).\n        \"\"\"\n        if score_factor_list[0] is None:\n            # e.g. Retina, FreeAnchor, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, etc.\n            with_score_factors = True\n\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bbox_preds = []\n        mlvl_kernels = []\n        mlvl_valid_priors = []\n        mlvl_scores = []\n        mlvl_labels = []\n        if with_score_factors:\n            mlvl_score_factors = []\n        else:\n            mlvl_score_factors = None\n\n        for level_idx, (cls_score, bbox_pred, kernel_pred,\n                        score_factor, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list, kernel_pred_list,\n                              score_factor_list, mlvl_priors)):\n\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            dim = self.bbox_coder.encode_size\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)\n            if with_score_factors:\n                score_factor = score_factor.permute(1, 2,\n                                                    0).reshape(-1).sigmoid()\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            kernel_pred = kernel_pred.permute(1, 2, 0).reshape(\n                -1, self.num_gen_params)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                # remind that we set FG labels to [0, num_class-1]\n                # since mmdet v2.0\n                # BG cat_id: num_class\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            score_thr = cfg.get('score_thr', 0)\n\n            results = filter_scores_and_topk(\n                scores, score_thr, nms_pre,\n                dict(\n                    bbox_pred=bbox_pred,\n                    priors=priors,\n                    kernel_pred=kernel_pred))\n            scores, labels, keep_idxs, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n            kernel_pred = filtered_results['kernel_pred']\n\n            if with_score_factors:\n                score_factor = score_factor[keep_idxs]\n\n            mlvl_bbox_preds.append(bbox_pred)\n            mlvl_valid_priors.append(priors)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n            mlvl_kernels.append(kernel_pred)\n\n            if with_score_factors:\n                mlvl_score_factors.append(score_factor)\n\n        bbox_pred = torch.cat(mlvl_bbox_preds)\n        priors = cat_boxes(mlvl_valid_priors)\n        bboxes = self.bbox_coder.decode(\n            priors[..., :2], bbox_pred, max_shape=img_shape)\n\n        results = InstanceData()\n        results.bboxes = bboxes\n        results.priors = priors\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n        results.kernels = torch.cat(mlvl_kernels)\n        if with_score_factors:\n            results.score_factors = torch.cat(mlvl_score_factors)\n\n        return self._bbox_mask_post_process(\n            results=results,\n            mask_feat=mask_feat,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n    def _bbox_mask_post_process(\n            self,\n            results: InstanceData,\n            mask_feat,\n            cfg: ConfigType,\n            rescale: bool = False,\n            with_nms: bool = True,\n            img_meta: Optional[dict] = None) -> InstanceData:\n        \"\"\"bbox and mask post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually `with_nms` is False is used for aug test.\n\n        Args:\n            results (:obj:`InstaceData`): Detection instance results,\n                each item has shape (num_bboxes, ).\n            cfg (ConfigDict): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default to True.\n            img_meta (dict, optional): Image meta info. Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, h, w).\n        \"\"\"\n        stride = self.prior_generator.strides[0][0]\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = [1 / s for s in img_meta['scale_factor']]\n            results.bboxes = scale_boxes(results.bboxes, scale_factor)\n\n        if hasattr(results, 'score_factors'):\n            # TODO： Add sqrt operation in order to be consistent with\n            #  the paper.\n            score_factors = results.pop('score_factors')\n            results.scores = results.scores * score_factors\n\n        # filter small size bboxes\n        if cfg.get('min_bbox_size', -1) >= 0:\n            w, h = get_box_wh(results.bboxes)\n            valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n            if not valid_mask.all():\n                results = results[valid_mask]\n\n        # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg\n        assert with_nms, 'with_nms must be True for RTMDet-Ins'\n        if results.bboxes.numel() > 0:\n            bboxes = get_box_tensor(results.bboxes)\n            det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,\n                                                results.labels, cfg.nms)\n            results = results[keep_idxs]\n            # some nms would reweight the score, such as softnms\n            results.scores = det_bboxes[:, -1]\n            results = results[:cfg.max_per_img]\n\n            # process masks\n            mask_logits = self._mask_predict_by_feat_single(\n                mask_feat, results.kernels, results.priors)\n\n            mask_logits = F.interpolate(\n                mask_logits.unsqueeze(0), scale_factor=stride, mode='bilinear')\n            if rescale:\n                ori_h, ori_w = img_meta['ori_shape'][:2]\n                mask_logits = F.interpolate(\n                    mask_logits,\n                    size=[\n                        math.ceil(mask_logits.shape[-2] * scale_factor[0]),\n                        math.ceil(mask_logits.shape[-1] * scale_factor[1])\n                    ],\n                    mode='bilinear',\n                    align_corners=False)[..., :ori_h, :ori_w]\n            masks = mask_logits.sigmoid().squeeze(0)\n            masks = masks > cfg.mask_thr_binary\n            results.masks = masks\n        else:\n            h, w = img_meta['ori_shape'][:2] if rescale else img_meta[\n                'img_shape'][:2]\n            results.masks = torch.zeros(\n                size=(results.bboxes.shape[0], h, w),\n                dtype=torch.bool,\n                device=results.bboxes.device)\n\n        return results\n\n    def parse_dynamic_params(self, flatten_kernels: Tensor) -> tuple:\n        \"\"\"split kernel head prediction to conv weight and bias.\"\"\"\n        n_inst = flatten_kernels.size(0)\n        n_layers = len(self.weight_nums)\n        params_splits = list(\n            torch.split_with_sizes(\n                flatten_kernels, self.weight_nums + self.bias_nums, dim=1))\n        weight_splits = params_splits[:n_layers]\n        bias_splits = params_splits[n_layers:]\n        for i in range(n_layers):\n            if i < n_layers - 1:\n                weight_splits[i] = weight_splits[i].reshape(\n                    n_inst * self.dyconv_channels, -1, 1, 1)\n                bias_splits[i] = bias_splits[i].reshape(n_inst *\n                                                        self.dyconv_channels)\n            else:\n                weight_splits[i] = weight_splits[i].reshape(n_inst, -1, 1, 1)\n                bias_splits[i] = bias_splits[i].reshape(n_inst)\n\n        return weight_splits, bias_splits\n\n    def _mask_predict_by_feat_single(self, mask_feat: Tensor, kernels: Tensor,\n                                     priors: Tensor) -> Tensor:\n        \"\"\"Generate mask logits from mask features with dynamic convs.\n\n        Args:\n            mask_feat (Tensor): Mask prototype features.\n                Has shape (num_prototypes, H, W).\n            kernels (Tensor): Kernel parameters for each instance.\n                Has shape (num_instance, num_params)\n            priors (Tensor): Center priors for each instance.\n                Has shape (num_instance, 4).\n        Returns:\n            Tensor: Instance segmentation masks for each instance.\n                Has shape (num_instance, H, W).\n        \"\"\"\n        num_inst = priors.shape[0]\n        h, w = mask_feat.size()[-2:]\n        if num_inst < 1:\n            return torch.empty(\n                size=(num_inst, h, w),\n                dtype=mask_feat.dtype,\n                device=mask_feat.device)\n        if len(mask_feat.shape) < 4:\n            mask_feat.unsqueeze(0)\n\n        coord = self.prior_generator.single_level_grid_priors(\n            (h, w), level_idx=0).reshape(1, -1, 2)\n        num_inst = priors.shape[0]\n        points = priors[:, :2].reshape(-1, 1, 2)\n        strides = priors[:, 2:].reshape(-1, 1, 2)\n        relative_coord = (points - coord).permute(0, 2, 1) / (\n            strides[..., 0].reshape(-1, 1, 1) * 8)\n        relative_coord = relative_coord.reshape(num_inst, 2, h, w)\n\n        mask_feat = torch.cat(\n            [relative_coord,\n             mask_feat.repeat(num_inst, 1, 1, 1)], dim=1)\n        weights, biases = self.parse_dynamic_params(kernels)\n\n        n_layers = len(weights)\n        x = mask_feat.reshape(1, -1, h, w)\n        for i, (weight, bias) in enumerate(zip(weights, biases)):\n            x = F.conv2d(\n                x, weight, bias=bias, stride=1, padding=0, groups=num_inst)\n            if i < n_layers - 1:\n                x = F.relu(x)\n        x = x.reshape(num_inst, h, w)\n        return x\n\n    def loss_mask_by_feat(self, mask_feats: Tensor, flatten_kernels: Tensor,\n                          sampling_results_list: list,\n                          batch_gt_instances: InstanceList) -> Tensor:\n        \"\"\"Compute instance segmentation loss.\n\n        Args:\n            mask_feats (list[Tensor]): Mask prototype features extracted from\n                the mask head. Has shape (N, num_prototypes, H, W)\n            flatten_kernels (list[Tensor]): Kernels of the dynamic conv layers.\n                Has shape (N, num_instances, num_params)\n            sampling_results_list (list[:obj:`SamplingResults`]) Batch of\n                assignment results.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            Tensor: The mask loss tensor.\n        \"\"\"\n        batch_pos_mask_logits = []\n        pos_gt_masks = []\n        for idx, (mask_feat, kernels, sampling_results,\n                  gt_instances) in enumerate(\n                      zip(mask_feats, flatten_kernels, sampling_results_list,\n                          batch_gt_instances)):\n            pos_priors = sampling_results.pos_priors\n            pos_inds = sampling_results.pos_inds\n            pos_kernels = kernels[pos_inds]  # n_pos, num_gen_params\n            pos_mask_logits = self._mask_predict_by_feat_single(\n                mask_feat, pos_kernels, pos_priors)\n            if gt_instances.masks.numel() == 0:\n                gt_masks = torch.empty_like(gt_instances.masks)\n            else:\n                gt_masks = gt_instances.masks[\n                    sampling_results.pos_assigned_gt_inds, :]\n            batch_pos_mask_logits.append(pos_mask_logits)\n            pos_gt_masks.append(gt_masks)\n\n        pos_gt_masks = torch.cat(pos_gt_masks, 0)\n        batch_pos_mask_logits = torch.cat(batch_pos_mask_logits, 0)\n\n        # avg_factor\n        num_pos = batch_pos_mask_logits.shape[0]\n        num_pos = reduce_mean(mask_feats.new_tensor([num_pos\n                                                     ])).clamp_(min=1).item()\n\n        if batch_pos_mask_logits.shape[0] == 0:\n            return mask_feats.sum() * 0\n\n        scale = self.prior_generator.strides[0][0] // self.mask_loss_stride\n        # upsample pred masks\n        batch_pos_mask_logits = F.interpolate(\n            batch_pos_mask_logits.unsqueeze(0),\n            scale_factor=scale,\n            mode='bilinear',\n            align_corners=False).squeeze(0)\n        # downsample gt masks\n        pos_gt_masks = pos_gt_masks[:, self.mask_loss_stride //\n                                    2::self.mask_loss_stride,\n                                    self.mask_loss_stride //\n                                    2::self.mask_loss_stride]\n\n        loss_mask = self.loss_mask(\n            batch_pos_mask_logits,\n            pos_gt_masks,\n            weight=None,\n            avg_factor=num_pos)\n\n        return loss_mask\n\n    def loss_by_feat(self,\n                     cls_scores: List[Tensor],\n                     bbox_preds: List[Tensor],\n                     kernel_preds: List[Tensor],\n                     mask_feat: Tensor,\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict],\n                     batch_gt_instances_ignore: OptInstanceList = None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Decoded box for each scale\n                level with shape (N, num_anchors * 4, H, W) in\n                [tl_x, tl_y, br_x, br_y] format.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        flatten_cls_scores = torch.cat([\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.cls_out_channels)\n            for cls_score in cls_scores\n        ], 1)\n        flatten_kernels = torch.cat([\n            kernel_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                    self.num_gen_params)\n            for kernel_pred in kernel_preds\n        ], 1)\n        decoded_bboxes = []\n        for anchor, bbox_pred in zip(anchor_list[0], bbox_preds):\n            anchor = anchor.reshape(-1, 4)\n            bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            bbox_pred = distance2bbox(anchor, bbox_pred)\n            decoded_bboxes.append(bbox_pred)\n\n        flatten_bboxes = torch.cat(decoded_bboxes, 1)\n        for gt_instances in batch_gt_instances:\n            gt_instances.masks = gt_instances.masks.to_tensor(\n                dtype=torch.bool, device=device)\n\n        cls_reg_targets = self.get_targets(\n            flatten_cls_scores,\n            flatten_bboxes,\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         assign_metrics_list, sampling_results_list) = cls_reg_targets\n\n        losses_cls, losses_bbox,\\\n            cls_avg_factors, bbox_avg_factors = multi_apply(\n                self.loss_by_feat_single,\n                cls_scores,\n                decoded_bboxes,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                assign_metrics_list,\n                self.prior_generator.strides)\n\n        cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()\n        losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))\n\n        bbox_avg_factor = reduce_mean(\n            sum(bbox_avg_factors)).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n\n        loss_mask = self.loss_mask_by_feat(mask_feat, flatten_kernels,\n                                           sampling_results_list,\n                                           batch_gt_instances)\n        loss = dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_mask=loss_mask)\n        return loss\n\n\nclass MaskFeatModule(BaseModule):\n    \"\"\"Mask feature head used in RTMDet-Ins.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels of the mask feature\n             map branch.\n        num_levels (int): The starting feature map level from RPN that\n             will be used to predict the mask feature map.\n        num_prototypes (int): Number of output channel of the mask feature\n             map branch. This is the channel count of the mask\n             feature map that to be dynamically convolved with the predicted\n             kernel.\n        stacked_convs (int): Number of convs in mask feature branch.\n        act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.\n            Default: dict(type='ReLU', inplace=True)\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        feat_channels: int = 256,\n        stacked_convs: int = 4,\n        num_levels: int = 3,\n        num_prototypes: int = 8,\n        act_cfg: ConfigType = dict(type='ReLU', inplace=True),\n        norm_cfg: ConfigType = dict(type='BN')\n    ) -> None:\n        super().__init__(init_cfg=None)\n        self.num_levels = num_levels\n        self.fusion_conv = nn.Conv2d(num_levels * in_channels, in_channels, 1)\n        convs = []\n        for i in range(stacked_convs):\n            in_c = in_channels if i == 0 else feat_channels\n            convs.append(\n                ConvModule(\n                    in_c,\n                    feat_channels,\n                    3,\n                    padding=1,\n                    act_cfg=act_cfg,\n                    norm_cfg=norm_cfg))\n        self.stacked_convs = nn.Sequential(*convs)\n        self.projection = nn.Conv2d(\n            feat_channels, num_prototypes, kernel_size=1)\n\n    def forward(self, features: Tuple[Tensor, ...]) -> Tensor:\n        # multi-level feature fusion\n        fusion_feats = [features[0]]\n        size = features[0].shape[-2:]\n        for i in range(1, self.num_levels):\n            f = F.interpolate(features[i], size=size, mode='bilinear')\n            fusion_feats.append(f)\n        fusion_feats = torch.cat(fusion_feats, dim=1)\n        fusion_feats = self.fusion_conv(fusion_feats)\n        # pred mask feats\n        mask_features = self.stacked_convs(fusion_feats)\n        mask_features = self.projection(mask_features)\n        return mask_features\n\n\n@MODELS.register_module()\nclass RTMDetInsSepBNHead(RTMDetInsHead):\n    \"\"\"Detection Head of RTMDet-Ins with sep-bn layers.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        share_conv (bool): Whether to share conv layers between stages.\n            Defaults to True.\n        norm_cfg (:obj:`ConfigDict` or dict)): Config dict for normalization\n            layer. Defaults to dict(type='BN').\n        act_cfg (:obj:`ConfigDict` or dict)): Config dict for activation layer.\n            Defaults to dict(type='SiLU', inplace=True).\n        pred_kernel_size (int): Kernel size of prediction layer. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 share_conv: bool = True,\n                 with_objectness: bool = False,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 act_cfg: ConfigType = dict(type='SiLU', inplace=True),\n                 pred_kernel_size: int = 1,\n                 **kwargs) -> None:\n        self.share_conv = share_conv\n        super().__init__(\n            num_classes,\n            in_channels,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg,\n            pred_kernel_size=pred_kernel_size,\n            with_objectness=with_objectness,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        self.kernel_convs = nn.ModuleList()\n\n        self.rtm_cls = nn.ModuleList()\n        self.rtm_reg = nn.ModuleList()\n        self.rtm_kernel = nn.ModuleList()\n        self.rtm_obj = nn.ModuleList()\n\n        # calculate num dynamic parameters\n        weight_nums, bias_nums = [], []\n        for i in range(self.num_dyconvs):\n            if i == 0:\n                weight_nums.append(\n                    (self.num_prototypes + 2) * self.dyconv_channels)\n                bias_nums.append(self.dyconv_channels)\n            elif i == self.num_dyconvs - 1:\n                weight_nums.append(self.dyconv_channels)\n                bias_nums.append(1)\n            else:\n                weight_nums.append(self.dyconv_channels * self.dyconv_channels)\n                bias_nums.append(self.dyconv_channels)\n        self.weight_nums = weight_nums\n        self.bias_nums = bias_nums\n        self.num_gen_params = sum(weight_nums) + sum(bias_nums)\n        pred_pad_size = self.pred_kernel_size // 2\n\n        for n in range(len(self.prior_generator.strides)):\n            cls_convs = nn.ModuleList()\n            reg_convs = nn.ModuleList()\n            kernel_convs = nn.ModuleList()\n            for i in range(self.stacked_convs):\n                chn = self.in_channels if i == 0 else self.feat_channels\n                cls_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                reg_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                kernel_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n            self.cls_convs.append(cls_convs)\n            self.reg_convs.append(cls_convs)\n            self.kernel_convs.append(kernel_convs)\n\n            self.rtm_cls.append(\n                nn.Conv2d(\n                    self.feat_channels,\n                    self.num_base_priors * self.cls_out_channels,\n                    self.pred_kernel_size,\n                    padding=pred_pad_size))\n            self.rtm_reg.append(\n                nn.Conv2d(\n                    self.feat_channels,\n                    self.num_base_priors * 4,\n                    self.pred_kernel_size,\n                    padding=pred_pad_size))\n            self.rtm_kernel.append(\n                nn.Conv2d(\n                    self.feat_channels,\n                    self.num_gen_params,\n                    self.pred_kernel_size,\n                    padding=pred_pad_size))\n            if self.with_objectness:\n                self.rtm_obj.append(\n                    nn.Conv2d(\n                        self.feat_channels,\n                        1,\n                        self.pred_kernel_size,\n                        padding=pred_pad_size))\n\n        if self.share_conv:\n            for n in range(len(self.prior_generator.strides)):\n                for i in range(self.stacked_convs):\n                    self.cls_convs[n][i].conv = self.cls_convs[0][i].conv\n                    self.reg_convs[n][i].conv = self.reg_convs[0][i].conv\n\n        self.mask_head = MaskFeatModule(\n            in_channels=self.in_channels,\n            feat_channels=self.feat_channels,\n            stacked_convs=4,\n            num_levels=len(self.prior_generator.strides),\n            num_prototypes=self.num_prototypes,\n            act_cfg=self.act_cfg,\n            norm_cfg=self.norm_cfg)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n        bias_cls = bias_init_with_prob(0.01)\n        for rtm_cls, rtm_reg, rtm_kernel in zip(self.rtm_cls, self.rtm_reg,\n                                                self.rtm_kernel):\n            normal_init(rtm_cls, std=0.01, bias=bias_cls)\n            normal_init(rtm_reg, std=0.01, bias=1)\n        if self.with_objectness:\n            for rtm_obj in self.rtm_obj:\n                normal_init(rtm_obj, std=0.01, bias=bias_cls)\n\n    def forward(self, feats: Tuple[Tensor, ...]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n            - cls_scores (list[Tensor]): Classification scores for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_base_priors * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_base_priors * 4.\n            - kernel_preds (list[Tensor]): Dynamic conv kernels for all scale\n              levels, each is a 4D-tensor, the channels number is\n              num_gen_params.\n            - mask_feat (Tensor): Output feature of the mask head. Each is a\n              4D-tensor, the channels number is num_prototypes.\n        \"\"\"\n        mask_feat = self.mask_head(feats)\n\n        cls_scores = []\n        bbox_preds = []\n        kernel_preds = []\n        for idx, (x, stride) in enumerate(\n                zip(feats, self.prior_generator.strides)):\n            cls_feat = x\n            reg_feat = x\n            kernel_feat = x\n\n            for cls_layer in self.cls_convs[idx]:\n                cls_feat = cls_layer(cls_feat)\n            cls_score = self.rtm_cls[idx](cls_feat)\n\n            for kernel_layer in self.kernel_convs[idx]:\n                kernel_feat = kernel_layer(kernel_feat)\n            kernel_pred = self.rtm_kernel[idx](kernel_feat)\n\n            for reg_layer in self.reg_convs[idx]:\n                reg_feat = reg_layer(reg_feat)\n\n            if self.with_objectness:\n                objectness = self.rtm_obj[idx](reg_feat)\n                cls_score = inverse_sigmoid(\n                    sigmoid_geometric_mean(cls_score, objectness))\n\n            reg_dist = F.relu(self.rtm_reg[idx](reg_feat)) * stride[0]\n\n            cls_scores.append(cls_score)\n            bbox_preds.append(reg_dist)\n            kernel_preds.append(kernel_pred)\n        return tuple(cls_scores), tuple(bbox_preds), tuple(\n            kernel_preds), mask_feat\n"
  },
  {
    "path": "mmdet/models/dense_heads/sabl_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptInstanceList)\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import (filter_scores_and_topk, images_to_levels, multi_apply,\n                     unmap)\nfrom .base_dense_head import BaseDenseHead\nfrom .guided_anchor_head import GuidedAnchorHead\n\n\n@MODELS.register_module()\nclass SABLRetinaHead(BaseDenseHead):\n    \"\"\"Side-Aware Boundary Localization (SABL) for RetinaNet.\n\n    The anchor generation, assigning and sampling in SABLRetinaHead\n    are the same as GuidedAnchorHead for guided anchoring.\n\n    Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n    Args:\n        num_classes (int): Number of classes.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of Convs for classification and\n            regression branches. Defaults to 4.\n        feat_channels (int): Number of hidden channels. Defaults to 256.\n        approx_anchor_generator (:obj:`ConfigType` or dict): Config dict for\n            approx generator.\n        square_anchor_generator (:obj:`ConfigDict` or dict): Config dict for\n            square generator.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            ConvModule. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            Norm Layer. Defaults to None.\n        bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be ``True`` when\n            using ``IoULoss``, ``GIoULoss``, or ``DIoULoss`` in the bbox head.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config of\n            SABLRetinaHead.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            SABLRetinaHead.\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox_cls (:obj:`ConfigDict` or dict): Config of classification\n            loss for bbox branch.\n        loss_bbox_reg (:obj:`ConfigDict` or dict): Config of regression loss\n            for bbox branch.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int,\n        stacked_convs: int = 4,\n        feat_channels: int = 256,\n        approx_anchor_generator: ConfigType = dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        square_anchor_generator: ConfigType = dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            scales=[4],\n            strides=[8, 16, 32, 64, 128]),\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        bbox_coder: ConfigType = dict(\n            type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),\n        reg_decoded_bbox: bool = False,\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        loss_cls: ConfigType = dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox_cls: ConfigType = dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),\n        loss_bbox_reg: ConfigType = dict(\n            type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),\n        init_cfg: MultiConfig = dict(\n            type='Normal',\n            layer='Conv2d',\n            std=0.01,\n            override=dict(\n                type='Normal', name='retina_cls', std=0.01, bias_prob=0.01))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.num_buckets = bbox_coder['num_buckets']\n        self.side_num = int(np.ceil(self.num_buckets / 2))\n\n        assert (approx_anchor_generator['octave_base_scale'] ==\n                square_anchor_generator['scales'][0])\n        assert (approx_anchor_generator['strides'] ==\n                square_anchor_generator['strides'])\n\n        self.approx_anchor_generator = TASK_UTILS.build(\n            approx_anchor_generator)\n        self.square_anchor_generator = TASK_UTILS.build(\n            square_anchor_generator)\n        self.approxs_per_octave = (\n            self.approx_anchor_generator.num_base_priors[0])\n\n        # one anchor per location\n        self.num_base_priors = self.square_anchor_generator.num_base_priors[0]\n\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        self.reg_decoded_bbox = reg_decoded_bbox\n\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox_cls = MODELS.build(loss_bbox_cls)\n        self.loss_bbox_reg = MODELS.build(loss_bbox_reg)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            # use PseudoSampler when sampling is False\n            if 'sampler' in self.train_cfg:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler(context=self)\n\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.retina_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.retina_bbox_reg = nn.Conv2d(\n            self.feat_channels, self.side_num * 4, 3, padding=1)\n        self.retina_bbox_cls = nn.Conv2d(\n            self.feat_channels, self.side_num * 4, 3, padding=1)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.retina_cls(cls_feat)\n        bbox_cls_pred = self.retina_bbox_cls(reg_feat)\n        bbox_reg_pred = self.retina_bbox_reg(reg_feat)\n        bbox_pred = (bbox_cls_pred, bbox_reg_pred)\n        return cls_score, bbox_pred\n\n    def forward(self, feats: List[Tensor]) -> Tuple[List[Tensor]]:\n        return multi_apply(self.forward_single, feats)\n\n    def get_anchors(\n        self,\n        featmap_sizes: List[tuple],\n        img_metas: List[dict],\n        device: Union[torch.device, str] = 'cuda'\n    ) -> Tuple[List[List[Tensor]], List[List[Tensor]]]:\n        \"\"\"Get squares according to feature map sizes and guided anchors.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n            device (torch.device | str): device for returned tensors\n\n        Returns:\n            tuple: square approxs of each image\n        \"\"\"\n        num_imgs = len(img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # squares for one time\n        multi_level_squares = self.square_anchor_generator.grid_priors(\n            featmap_sizes, device=device)\n        squares_list = [multi_level_squares for _ in range(num_imgs)]\n\n        return squares_list\n\n    def get_targets(self,\n                    approx_list: List[List[Tensor]],\n                    inside_flag_list: List[List[Tensor]],\n                    square_list: List[List[Tensor]],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas,\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs=True) -> tuple:\n        \"\"\"Compute bucketing targets.\n\n        Args:\n            approx_list (list[list[Tensor]]): Multi level approxs of each\n                image.\n            inside_flag_list (list[list[Tensor]]): Multi level inside flags of\n                each image.\n            square_list (list[list[Tensor]]): Multi level squares of each\n                image.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Defaults to True.\n\n        Returns:\n            tuple: Returns a tuple containing learning targets.\n\n            - labels_list (list[Tensor]): Labels of each level.\n            - label_weights_list (list[Tensor]): Label weights of each level.\n            - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \\\n            each level.\n            - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \\\n            each level.\n            - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \\\n            each level.\n            - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \\\n            each level.\n            - num_total_pos (int): Number of positive samples in all images.\n            - num_total_neg (int): Number of negative samples in all images.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(approx_list) == len(inside_flag_list) == len(\n            square_list) == num_imgs\n        # anchor number of multi levels\n        num_level_squares = [squares.size(0) for squares in square_list[0]]\n        # concat all level anchors and flags to a single tensor\n        inside_flag_flat_list = []\n        approx_flat_list = []\n        square_flat_list = []\n        for i in range(num_imgs):\n            assert len(square_list[i]) == len(inside_flag_list[i])\n            inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))\n            approx_flat_list.append(torch.cat(approx_list[i]))\n            square_flat_list.append(torch.cat(square_list[i]))\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None for _ in range(num_imgs)]\n        (all_labels, all_label_weights, all_bbox_cls_targets,\n         all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,\n         pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply(\n             self._get_targets_single,\n             approx_flat_list,\n             inside_flag_flat_list,\n             square_flat_list,\n             batch_gt_instances,\n             batch_img_metas,\n             batch_gt_instances_ignore,\n             unmap_outputs=unmap_outputs)\n\n        # sampled anchors of all images\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        # split targets to a list w.r.t. multiple levels\n        labels_list = images_to_levels(all_labels, num_level_squares)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_squares)\n        bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,\n                                                 num_level_squares)\n        bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,\n                                                 num_level_squares)\n        bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,\n                                                 num_level_squares)\n        bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,\n                                                 num_level_squares)\n        return (labels_list, label_weights_list, bbox_cls_targets_list,\n                bbox_cls_weights_list, bbox_reg_targets_list,\n                bbox_reg_weights_list, avg_factor)\n\n    def _get_targets_single(self,\n                            flat_approxs: Tensor,\n                            inside_flags: Tensor,\n                            flat_squares: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Args:\n            flat_approxs (Tensor): flat approxs of a single image,\n                shape (n, 4)\n            inside_flags (Tensor): inside flags of a single image,\n                shape (n, ).\n            flat_squares (Tensor): flat squares of a single image,\n                shape (approxs_per_octave * n, 4)\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.  Defaults to True.\n\n        Returns:\n            tuple:\n\n            - labels_list (Tensor): Labels in a single image.\n            - label_weights (Tensor): Label weights in a single image.\n            - bbox_cls_targets (Tensor): BBox cls targets in a single image.\n            - bbox_cls_weights (Tensor): BBox cls weights in a single image.\n            - bbox_reg_targets (Tensor): BBox reg targets in a single image.\n            - bbox_reg_weights (Tensor): BBox reg weights in a single image.\n            - num_total_pos (int): Number of positive samples in a single \\\n            image.\n            - num_total_neg (int): Number of negative samples in a single \\\n            image.\n            - sampling_result (:obj:`SamplingResult`): Sampling result object.\n        \"\"\"\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        num_square = flat_squares.size(0)\n        approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4)\n        approxs = approxs[inside_flags, ...]\n        squares = flat_squares[inside_flags, :]\n\n        pred_instances = InstanceData()\n        pred_instances.priors = squares\n        pred_instances.approxs = approxs\n        assign_result = self.assigner.assign(pred_instances, gt_instances,\n                                             gt_instances_ignore)\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_squares = squares.shape[0]\n        bbox_cls_targets = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        bbox_cls_weights = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        bbox_reg_targets = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        bbox_reg_weights = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        labels = squares.new_full((num_valid_squares, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,\n             pos_bbox_cls_weights) = self.bbox_coder.encode(\n                 sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n\n            bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets\n            bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets\n            bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights\n            bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_squares.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,\n                                     inside_flags)\n            bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,\n                                     inside_flags)\n            bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,\n                                     inside_flags)\n            bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,\n                                     inside_flags)\n        return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,\n                bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds,\n                sampling_result)\n\n    def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                            labels: Tensor, label_weights: Tensor,\n                            bbox_cls_targets: Tensor, bbox_cls_weights: Tensor,\n                            bbox_reg_targets: Tensor, bbox_reg_weights: Tensor,\n                            avg_factor: float) -> Tuple[Tensor]:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            labels (Tensor): Labels in a single image.\n            label_weights (Tensor): Label weights in a single level.\n            bbox_cls_targets (Tensor): BBox cls targets in a single level.\n            bbox_cls_weights (Tensor): BBox cls weights in a single level.\n            bbox_reg_targets (Tensor): BBox reg targets in a single level.\n            bbox_reg_weights (Tensor): BBox reg weights in a single level.\n            avg_factor (int): Average factor that is used to average the loss.\n\n        Returns:\n            tuple: loss components.\n        \"\"\"\n        # classification loss\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=avg_factor)\n        # regression loss\n        bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)\n        bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)\n        bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)\n        bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)\n        (bbox_cls_pred, bbox_reg_pred) = bbox_pred\n        bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(\n            -1, self.side_num * 4)\n        bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(\n            -1, self.side_num * 4)\n        loss_bbox_cls = self.loss_bbox_cls(\n            bbox_cls_pred,\n            bbox_cls_targets.long(),\n            bbox_cls_weights,\n            avg_factor=avg_factor * 4 * self.side_num)\n        loss_bbox_reg = self.loss_bbox_reg(\n            bbox_reg_pred,\n            bbox_reg_targets,\n            bbox_reg_weights,\n            avg_factor=avg_factor * 4 * self.bbox_coder.offset_topk)\n        return loss_cls, loss_bbox_cls, loss_bbox_reg\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.approx_anchor_generator.num_levels\n\n        device = cls_scores[0].device\n\n        # get sampled approxes\n        approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(\n            self, featmap_sizes, batch_img_metas, device=device)\n\n        square_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = self.get_targets(\n            approxs_list,\n            inside_flag_list,\n            square_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (labels_list, label_weights_list, bbox_cls_targets_list,\n         bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,\n         avg_factor) = cls_reg_targets\n\n        losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(\n            self.loss_by_feat_single,\n            cls_scores,\n            bbox_preds,\n            labels_list,\n            label_weights_list,\n            bbox_cls_targets_list,\n            bbox_cls_weights_list,\n            bbox_reg_targets_list,\n            bbox_reg_weights_list,\n            avg_factor=avg_factor)\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox_cls=losses_bbox_cls,\n            loss_bbox_reg=losses_bbox_reg)\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        batch_img_metas: List[dict],\n                        cfg: Optional[ConfigDict] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Note: When score_factors is not None, the cls_scores are\n        usually multiplied by it then obtain the real score used in NMS,\n        such as CenterNess in FCOS, IoU branch in ATSS.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n            cfg (:obj:`ConfigDict`, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n        num_levels = len(cls_scores)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n\n        device = cls_scores[0].device\n        mlvl_anchors = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_score_list = [\n                cls_scores[i][img_id].detach() for i in range(num_levels)\n            ]\n            bbox_cls_pred_list = [\n                bbox_preds[i][0][img_id].detach() for i in range(num_levels)\n            ]\n            bbox_reg_pred_list = [\n                bbox_preds[i][1][img_id].detach() for i in range(num_levels)\n            ]\n            proposals = self._predict_by_feat_single(\n                cls_scores=cls_score_list,\n                bbox_cls_preds=bbox_cls_pred_list,\n                bbox_reg_preds=bbox_reg_pred_list,\n                mlvl_anchors=mlvl_anchors[img_id],\n                img_meta=batch_img_metas[img_id],\n                cfg=cfg,\n                rescale=rescale,\n                with_nms=with_nms)\n            result_list.append(proposals)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_scores: List[Tensor],\n                                bbox_cls_preds: List[Tensor],\n                                bbox_reg_preds: List[Tensor],\n                                mlvl_anchors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        cfg = self.test_cfg if cfg is None else cfg\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_confids = []\n        mlvl_labels = []\n        assert len(cls_scores) == len(bbox_cls_preds) == len(\n            bbox_reg_preds) == len(mlvl_anchors)\n        for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(\n                cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):\n            assert cls_score.size()[-2:] == bbox_cls_pred.size(\n            )[-2:] == bbox_reg_pred.size()[-2::]\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)[:, :-1]\n            bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(\n                -1, self.side_num * 4)\n            bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(\n                -1, self.side_num * 4)\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(\n                    anchors=anchors,\n                    bbox_cls_pred=bbox_cls_pred,\n                    bbox_reg_pred=bbox_reg_pred))\n            scores, labels, _, filtered_results = results\n\n            anchors = filtered_results['anchors']\n            bbox_cls_pred = filtered_results['bbox_cls_pred']\n            bbox_reg_pred = filtered_results['bbox_reg_pred']\n\n            bbox_preds = [\n                bbox_cls_pred.contiguous(),\n                bbox_reg_pred.contiguous()\n            ]\n            bboxes, confids = self.bbox_coder.decode(\n                anchors.contiguous(),\n                bbox_preds,\n                max_shape=img_meta['img_shape'])\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_confids.append(confids)\n            mlvl_labels.append(labels)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bboxes)\n        results.scores = torch.cat(mlvl_scores)\n        results.score_factors = torch.cat(mlvl_confids)\n        results.labels = torch.cat(mlvl_labels)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n"
  },
  {
    "path": "mmdet/models/dense_heads/solo_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.utils.misc import floordiv\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType\nfrom ..layers import mask_matrix_nms\nfrom ..utils import center_of_mass, generate_coordinate, multi_apply\nfrom .base_mask_head import BaseMaskHead\n\n\n@MODELS.register_module()\nclass SOLOHead(BaseMaskHead):\n    \"\"\"SOLO mask head used in `SOLO: Segmenting Objects by Locations.\n\n    <https://arxiv.org/abs/1912.04488>`_\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n            Defaults to 256.\n        stacked_convs (int): Number of stacking convs of the head.\n            Defaults to 4.\n        strides (tuple): Downsample factor of each feature map.\n        scale_ranges (tuple[tuple[int, int]]): Area range of multiple\n            level masks, in the format [(min1, max1), (min2, max2), ...].\n            A range of (16, 64) means the area range between (16, 64).\n        pos_scale (float): Constant scale factor to control the center region.\n        num_grids (list[int]): Divided image into a uniform grids, each\n            feature map has a different grid value. The number of output\n            channels is grid ** 2. Defaults to [40, 36, 24, 16, 12].\n        cls_down_index (int): The index of downsample operation in\n            classification branch. Defaults to 0.\n        loss_mask (dict): Config of mask loss.\n        loss_cls (dict): Config of classification loss.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Defaults to norm_cfg=dict(type='GN', num_groups=32,\n            requires_grad=True).\n        train_cfg (dict): Training config of head.\n        test_cfg (dict): Testing config of head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int,\n        feat_channels: int = 256,\n        stacked_convs: int = 4,\n        strides: tuple = (4, 8, 16, 32, 64),\n        scale_ranges: tuple = ((8, 32), (16, 64), (32, 128), (64, 256), (128,\n                                                                         512)),\n        pos_scale: float = 0.2,\n        num_grids: list = [40, 36, 24, 16, 12],\n        cls_down_index: int = 0,\n        loss_mask: ConfigType = dict(\n            type='DiceLoss', use_sigmoid=True, loss_weight=3.0),\n        loss_cls: ConfigType = dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        norm_cfg: ConfigType = dict(\n            type='GN', num_groups=32, requires_grad=True),\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        init_cfg: MultiConfig = [\n            dict(type='Normal', layer='Conv2d', std=0.01),\n            dict(\n                type='Normal',\n                std=0.01,\n                bias_prob=0.01,\n                override=dict(name='conv_mask_list')),\n            dict(\n                type='Normal',\n                std=0.01,\n                bias_prob=0.01,\n                override=dict(name='conv_cls'))\n        ]\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.cls_out_channels = self.num_classes\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.strides = strides\n        self.num_grids = num_grids\n        # number of FPN feats\n        self.num_levels = len(strides)\n        assert self.num_levels == len(scale_ranges) == len(num_grids)\n        self.scale_ranges = scale_ranges\n        self.pos_scale = pos_scale\n\n        self.cls_down_index = cls_down_index\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_mask = MODELS.build(loss_mask)\n        self.norm_cfg = norm_cfg\n        self.init_cfg = init_cfg\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.mask_convs = nn.ModuleList()\n        self.cls_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels + 2 if i == 0 else self.feat_channels\n            self.mask_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n        self.conv_mask_list = nn.ModuleList()\n        for num_grid in self.num_grids:\n            self.conv_mask_list.append(\n                nn.Conv2d(self.feat_channels, num_grid**2, 1))\n\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def resize_feats(self, x: Tuple[Tensor]) -> List[Tensor]:\n        \"\"\"Downsample the first feat and upsample last feat in feats.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            list[Tensor]: Features after resizing, each is a 4D-tensor.\n        \"\"\"\n        out = []\n        for i in range(len(x)):\n            if i == 0:\n                out.append(\n                    F.interpolate(x[0], scale_factor=0.5, mode='bilinear'))\n            elif i == len(x) - 1:\n                out.append(\n                    F.interpolate(\n                        x[i], size=x[i - 1].shape[-2:], mode='bilinear'))\n            else:\n                out.append(x[i])\n        return out\n\n    def forward(self, x: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and mask prediction.\n\n                - mlvl_mask_preds (list[Tensor]): Multi-level mask prediction.\n                  Each element in the list has shape\n                  (batch_size, num_grids**2 ,h ,w).\n                - mlvl_cls_preds (list[Tensor]): Multi-level scores.\n                  Each element in the list has shape\n                  (batch_size, num_classes, num_grids ,num_grids).\n        \"\"\"\n        assert len(x) == self.num_levels\n        feats = self.resize_feats(x)\n        mlvl_mask_preds = []\n        mlvl_cls_preds = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            mask_feat = x\n            cls_feat = x\n            # generate and concat the coordinate\n            coord_feat = generate_coordinate(mask_feat.size(),\n                                             mask_feat.device)\n            mask_feat = torch.cat([mask_feat, coord_feat], 1)\n\n            for mask_layer in (self.mask_convs):\n                mask_feat = mask_layer(mask_feat)\n\n            mask_feat = F.interpolate(\n                mask_feat, scale_factor=2, mode='bilinear')\n            mask_preds = self.conv_mask_list[i](mask_feat)\n\n            # cls branch\n            for j, cls_layer in enumerate(self.cls_convs):\n                if j == self.cls_down_index:\n                    num_grid = self.num_grids[i]\n                    cls_feat = F.interpolate(\n                        cls_feat, size=num_grid, mode='bilinear')\n                cls_feat = cls_layer(cls_feat)\n\n            cls_pred = self.conv_cls(cls_feat)\n\n            if not self.training:\n                feat_wh = feats[0].size()[-2:]\n                upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2)\n                mask_preds = F.interpolate(\n                    mask_preds.sigmoid(), size=upsampled_size, mode='bilinear')\n                cls_pred = cls_pred.sigmoid()\n                # get local maximum\n                local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1)\n                keep_mask = local_max[:, :, :-1, :-1] == cls_pred\n                cls_pred = cls_pred * keep_mask\n\n            mlvl_mask_preds.append(mask_preds)\n            mlvl_cls_preds.append(cls_pred)\n        return mlvl_mask_preds, mlvl_cls_preds\n\n    def loss_by_feat(self, mlvl_mask_preds: List[Tensor],\n                     mlvl_cls_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict], **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mlvl_mask_preds (list[Tensor]): Multi-level mask prediction.\n                Each element in the list has shape\n                (batch_size, num_grids**2 ,h ,w).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``masks``,\n                and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of multiple images.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_levels = self.num_levels\n        num_imgs = len(batch_img_metas)\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds]\n\n        # `BoolTensor` in `pos_masks` represent\n        # whether the corresponding point is\n        # positive\n        pos_mask_targets, labels, pos_masks = multi_apply(\n            self._get_targets_single,\n            batch_gt_instances,\n            featmap_sizes=featmap_sizes)\n\n        # change from the outside list meaning multi images\n        # to the outside list meaning multi levels\n        mlvl_pos_mask_targets = [[] for _ in range(num_levels)]\n        mlvl_pos_mask_preds = [[] for _ in range(num_levels)]\n        mlvl_pos_masks = [[] for _ in range(num_levels)]\n        mlvl_labels = [[] for _ in range(num_levels)]\n        for img_id in range(num_imgs):\n            assert num_levels == len(pos_mask_targets[img_id])\n            for lvl in range(num_levels):\n                mlvl_pos_mask_targets[lvl].append(\n                    pos_mask_targets[img_id][lvl])\n                mlvl_pos_mask_preds[lvl].append(\n                    mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...])\n                mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten())\n                mlvl_labels[lvl].append(labels[img_id][lvl].flatten())\n\n        # cat multiple image\n        temp_mlvl_cls_preds = []\n        for lvl in range(num_levels):\n            mlvl_pos_mask_targets[lvl] = torch.cat(\n                mlvl_pos_mask_targets[lvl], dim=0)\n            mlvl_pos_mask_preds[lvl] = torch.cat(\n                mlvl_pos_mask_preds[lvl], dim=0)\n            mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0)\n            mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0)\n            temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute(\n                0, 2, 3, 1).reshape(-1, self.cls_out_channels))\n\n        num_pos = sum(item.sum() for item in mlvl_pos_masks)\n        # dice loss\n        loss_mask = []\n        for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets):\n            if pred.size()[0] == 0:\n                loss_mask.append(pred.sum().unsqueeze(0))\n                continue\n            loss_mask.append(\n                self.loss_mask(pred, target, reduction_override='none'))\n        if num_pos > 0:\n            loss_mask = torch.cat(loss_mask).sum() / num_pos\n        else:\n            loss_mask = torch.cat(loss_mask).mean()\n\n        flatten_labels = torch.cat(mlvl_labels)\n        flatten_cls_preds = torch.cat(temp_mlvl_cls_preds)\n        loss_cls = self.loss_cls(\n            flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)\n        return dict(loss_mask=loss_mask, loss_cls=loss_cls)\n\n    def _get_targets_single(self,\n                            gt_instances: InstanceData,\n                            featmap_sizes: Optional[list] = None) -> tuple:\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes``, ``labels``,\n                and ``masks`` attributes.\n            featmap_sizes (list[:obj:`torch.size`]): Size of each\n                feature map from feature pyramid, each element\n                means (feat_h, feat_w). Defaults to None.\n\n        Returns:\n            Tuple: Usually returns a tuple containing targets for predictions.\n\n                - mlvl_pos_mask_targets (list[Tensor]): Each element represent\n                  the binary mask targets for positive points in this\n                  level, has shape (num_pos, out_h, out_w).\n                - mlvl_labels (list[Tensor]): Each element is\n                  classification labels for all\n                  points in this level, has shape\n                  (num_grid, num_grid).\n                - mlvl_pos_masks (list[Tensor]): Each element is\n                  a `BoolTensor` to represent whether the\n                  corresponding point in single level\n                  is positive, has shape (num_grid **2).\n        \"\"\"\n        gt_labels = gt_instances.labels\n        device = gt_labels.device\n\n        gt_bboxes = gt_instances.bboxes\n        gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                              (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n\n        gt_masks = gt_instances.masks.to_tensor(\n            dtype=torch.bool, device=device)\n\n        mlvl_pos_mask_targets = []\n        mlvl_labels = []\n        mlvl_pos_masks = []\n        for (lower_bound, upper_bound), stride, featmap_size, num_grid \\\n                in zip(self.scale_ranges, self.strides,\n                       featmap_sizes, self.num_grids):\n\n            mask_target = torch.zeros(\n                [num_grid**2, featmap_size[0], featmap_size[1]],\n                dtype=torch.uint8,\n                device=device)\n            # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n            labels = torch.zeros([num_grid, num_grid],\n                                 dtype=torch.int64,\n                                 device=device) + self.num_classes\n            pos_mask = torch.zeros([num_grid**2],\n                                   dtype=torch.bool,\n                                   device=device)\n\n            gt_inds = ((gt_areas >= lower_bound) &\n                       (gt_areas <= upper_bound)).nonzero().flatten()\n            if len(gt_inds) == 0:\n                mlvl_pos_mask_targets.append(\n                    mask_target.new_zeros(0, featmap_size[0], featmap_size[1]))\n                mlvl_labels.append(labels)\n                mlvl_pos_masks.append(pos_mask)\n                continue\n            hit_gt_bboxes = gt_bboxes[gt_inds]\n            hit_gt_labels = gt_labels[gt_inds]\n            hit_gt_masks = gt_masks[gt_inds, ...]\n\n            pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] -\n                                  hit_gt_bboxes[:, 0]) * self.pos_scale\n            pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] -\n                                  hit_gt_bboxes[:, 1]) * self.pos_scale\n\n            # Make sure hit_gt_masks has a value\n            valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0\n            output_stride = stride / 2\n\n            for gt_mask, gt_label, pos_h_range, pos_w_range, \\\n                valid_mask_flag in \\\n                    zip(hit_gt_masks, hit_gt_labels, pos_h_ranges,\n                        pos_w_ranges, valid_mask_flags):\n                if not valid_mask_flag:\n                    continue\n                upsampled_size = (featmap_sizes[0][0] * 4,\n                                  featmap_sizes[0][1] * 4)\n                center_h, center_w = center_of_mass(gt_mask)\n\n                coord_w = int(\n                    floordiv((center_w / upsampled_size[1]), (1. / num_grid),\n                             rounding_mode='trunc'))\n                coord_h = int(\n                    floordiv((center_h / upsampled_size[0]), (1. / num_grid),\n                             rounding_mode='trunc'))\n\n                # left, top, right, down\n                top_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_h - pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                down_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_h + pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                left_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_w - pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                right_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_w + pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n\n                top = max(top_box, coord_h - 1)\n                down = min(down_box, coord_h + 1)\n                left = max(coord_w - 1, left_box)\n                right = min(right_box, coord_w + 1)\n\n                labels[top:(down + 1), left:(right + 1)] = gt_label\n                # ins\n                gt_mask = np.uint8(gt_mask.cpu().numpy())\n                # Follow the original implementation, F.interpolate is\n                # different from cv2 and opencv\n                gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride)\n                gt_mask = torch.from_numpy(gt_mask).to(device=device)\n\n                for i in range(top, down + 1):\n                    for j in range(left, right + 1):\n                        index = int(i * num_grid + j)\n                        mask_target[index, :gt_mask.shape[0], :gt_mask.\n                                    shape[1]] = gt_mask\n                        pos_mask[index] = True\n            mlvl_pos_mask_targets.append(mask_target[pos_mask])\n            mlvl_labels.append(labels)\n            mlvl_pos_masks.append(pos_mask)\n        return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks\n\n    def predict_by_feat(self, mlvl_mask_preds: List[Tensor],\n                        mlvl_cls_scores: List[Tensor],\n                        batch_img_metas: List[dict], **kwargs) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\n\n        Args:\n            mlvl_mask_preds (list[Tensor]): Multi-level mask prediction.\n                Each element in the list has shape\n                (batch_size, num_grids**2 ,h ,w).\n            mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids ,num_grids).\n            batch_img_metas (list[dict]): Meta information of all images.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        mlvl_cls_scores = [\n            item.permute(0, 2, 3, 1) for item in mlvl_cls_scores\n        ]\n        assert len(mlvl_mask_preds) == len(mlvl_cls_scores)\n        num_levels = len(mlvl_cls_scores)\n\n        results_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_pred_list = [\n                mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels)\n                for lvl in range(num_levels)\n            ]\n            mask_pred_list = [\n                mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels)\n            ]\n\n            cls_pred_list = torch.cat(cls_pred_list, dim=0)\n            mask_pred_list = torch.cat(mask_pred_list, dim=0)\n            img_meta = batch_img_metas[img_id]\n\n            results = self._predict_by_feat_single(\n                cls_pred_list, mask_pred_list, img_meta=img_meta)\n            results_list.append(results)\n\n        return results_list\n\n    def _predict_by_feat_single(self,\n                                cls_scores: Tensor,\n                                mask_preds: Tensor,\n                                img_meta: dict,\n                                cfg: OptConfigType = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        mask results.\n\n        Args:\n            cls_scores (Tensor): Classification score of all points\n                in single image, has shape (num_points, num_classes).\n            mask_preds (Tensor): Mask prediction of all points in\n                single image, has shape (num_points, feat_h, feat_w).\n            img_meta (dict): Meta information of corresponding image.\n            cfg (dict, optional): Config used in test phase.\n                Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n\n        def empty_results(cls_scores, ori_shape):\n            \"\"\"Generate a empty results.\"\"\"\n            results = InstanceData()\n            results.scores = cls_scores.new_ones(0)\n            results.masks = cls_scores.new_zeros(0, *ori_shape)\n            results.labels = cls_scores.new_ones(0)\n            results.bboxes = cls_scores.new_zeros(0, 4)\n            return results\n\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_scores) == len(mask_preds)\n\n        featmap_size = mask_preds.size()[-2:]\n\n        h, w = img_meta['img_shape'][:2]\n        upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4)\n\n        score_mask = (cls_scores > cfg.score_thr)\n        cls_scores = cls_scores[score_mask]\n        if len(cls_scores) == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n\n        inds = score_mask.nonzero()\n        cls_labels = inds[:, 1]\n\n        # Filter the mask mask with an area is smaller than\n        # stride of corresponding feature level\n        lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0)\n        strides = cls_scores.new_ones(lvl_interval[-1])\n        strides[:lvl_interval[0]] *= self.strides[0]\n        for lvl in range(1, self.num_levels):\n            strides[lvl_interval[lvl -\n                                 1]:lvl_interval[lvl]] *= self.strides[lvl]\n        strides = strides[inds[:, 0]]\n        mask_preds = mask_preds[inds[:, 0]]\n\n        masks = mask_preds > cfg.mask_thr\n        sum_masks = masks.sum((1, 2)).float()\n        keep = sum_masks > strides\n        if keep.sum() == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n        masks = masks[keep]\n        mask_preds = mask_preds[keep]\n        sum_masks = sum_masks[keep]\n        cls_scores = cls_scores[keep]\n        cls_labels = cls_labels[keep]\n\n        # maskness.\n        mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks\n        cls_scores *= mask_scores\n\n        scores, labels, _, keep_inds = mask_matrix_nms(\n            masks,\n            cls_labels,\n            cls_scores,\n            mask_area=sum_masks,\n            nms_pre=cfg.nms_pre,\n            max_num=cfg.max_per_img,\n            kernel=cfg.kernel,\n            sigma=cfg.sigma,\n            filter_thr=cfg.filter_thr)\n        # mask_matrix_nms may return an empty Tensor\n        if len(keep_inds) == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n        mask_preds = mask_preds[keep_inds]\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(0), size=upsampled_size,\n            mode='bilinear')[:, :, :h, :w]\n        mask_preds = F.interpolate(\n            mask_preds, size=img_meta['ori_shape'][:2],\n            mode='bilinear').squeeze(0)\n        masks = mask_preds > cfg.mask_thr\n\n        results = InstanceData()\n        results.masks = masks\n        results.labels = labels\n        results.scores = scores\n        # create an empty bbox in InstanceData to avoid bugs when\n        # calculating metrics.\n        results.bboxes = results.scores.new_zeros(len(scores), 4)\n        return results\n\n\n@MODELS.register_module()\nclass DecoupledSOLOHead(SOLOHead):\n    \"\"\"Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations.\n\n    <https://arxiv.org/abs/1912.04488>`_\n\n    Args:\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 init_cfg: MultiConfig = [\n                     dict(type='Normal', layer='Conv2d', std=0.01),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_x')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_y')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_cls'))\n                 ],\n                 **kwargs) -> None:\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self) -> None:\n        self.mask_convs_x = nn.ModuleList()\n        self.mask_convs_y = nn.ModuleList()\n        self.cls_convs = nn.ModuleList()\n\n        for i in range(self.stacked_convs):\n            chn = self.in_channels + 1 if i == 0 else self.feat_channels\n            self.mask_convs_x.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n            self.mask_convs_y.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n\n        self.conv_mask_list_x = nn.ModuleList()\n        self.conv_mask_list_y = nn.ModuleList()\n        for num_grid in self.num_grids:\n            self.conv_mask_list_x.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n            self.conv_mask_list_y.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and mask prediction.\n\n                - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                  from x branch. Each element in the list has shape\n                  (batch_size, num_grids ,h ,w).\n                - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction\n                  from y branch. Each element in the list has shape\n                  (batch_size, num_grids ,h ,w).\n                - mlvl_cls_preds (list[Tensor]): Multi-level scores.\n                  Each element in the list has shape\n                  (batch_size, num_classes, num_grids ,num_grids).\n        \"\"\"\n        assert len(x) == self.num_levels\n        feats = self.resize_feats(x)\n        mask_preds_x = []\n        mask_preds_y = []\n        cls_preds = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            mask_feat = x\n            cls_feat = x\n            # generate and concat the coordinate\n            coord_feat = generate_coordinate(mask_feat.size(),\n                                             mask_feat.device)\n            mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1)\n            mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1)\n\n            for mask_layer_x, mask_layer_y in \\\n                    zip(self.mask_convs_x, self.mask_convs_y):\n                mask_feat_x = mask_layer_x(mask_feat_x)\n                mask_feat_y = mask_layer_y(mask_feat_y)\n\n            mask_feat_x = F.interpolate(\n                mask_feat_x, scale_factor=2, mode='bilinear')\n            mask_feat_y = F.interpolate(\n                mask_feat_y, scale_factor=2, mode='bilinear')\n\n            mask_pred_x = self.conv_mask_list_x[i](mask_feat_x)\n            mask_pred_y = self.conv_mask_list_y[i](mask_feat_y)\n\n            # cls branch\n            for j, cls_layer in enumerate(self.cls_convs):\n                if j == self.cls_down_index:\n                    num_grid = self.num_grids[i]\n                    cls_feat = F.interpolate(\n                        cls_feat, size=num_grid, mode='bilinear')\n                cls_feat = cls_layer(cls_feat)\n\n            cls_pred = self.conv_cls(cls_feat)\n\n            if not self.training:\n                feat_wh = feats[0].size()[-2:]\n                upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2)\n                mask_pred_x = F.interpolate(\n                    mask_pred_x.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                mask_pred_y = F.interpolate(\n                    mask_pred_y.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                cls_pred = cls_pred.sigmoid()\n                # get local maximum\n                local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1)\n                keep_mask = local_max[:, :, :-1, :-1] == cls_pred\n                cls_pred = cls_pred * keep_mask\n\n            mask_preds_x.append(mask_pred_x)\n            mask_preds_y.append(mask_pred_y)\n            cls_preds.append(cls_pred)\n        return mask_preds_x, mask_preds_y, cls_preds\n\n    def loss_by_feat(self, mlvl_mask_preds_x: List[Tensor],\n                     mlvl_mask_preds_y: List[Tensor],\n                     mlvl_cls_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict], **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                from x branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction\n                from y branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids ,num_grids).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``masks``,\n                and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of multiple images.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_levels = self.num_levels\n        num_imgs = len(batch_img_metas)\n        featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x]\n\n        pos_mask_targets, labels, xy_pos_indexes = multi_apply(\n            self._get_targets_single,\n            batch_gt_instances,\n            featmap_sizes=featmap_sizes)\n\n        # change from the outside list meaning multi images\n        # to the outside list meaning multi levels\n        mlvl_pos_mask_targets = [[] for _ in range(num_levels)]\n        mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)]\n        mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)]\n        mlvl_labels = [[] for _ in range(num_levels)]\n        for img_id in range(num_imgs):\n\n            for lvl in range(num_levels):\n                mlvl_pos_mask_targets[lvl].append(\n                    pos_mask_targets[img_id][lvl])\n                mlvl_pos_mask_preds_x[lvl].append(\n                    mlvl_mask_preds_x[lvl][img_id,\n                                           xy_pos_indexes[img_id][lvl][:, 1]])\n                mlvl_pos_mask_preds_y[lvl].append(\n                    mlvl_mask_preds_y[lvl][img_id,\n                                           xy_pos_indexes[img_id][lvl][:, 0]])\n                mlvl_labels[lvl].append(labels[img_id][lvl].flatten())\n\n        # cat multiple image\n        temp_mlvl_cls_preds = []\n        for lvl in range(num_levels):\n            mlvl_pos_mask_targets[lvl] = torch.cat(\n                mlvl_pos_mask_targets[lvl], dim=0)\n            mlvl_pos_mask_preds_x[lvl] = torch.cat(\n                mlvl_pos_mask_preds_x[lvl], dim=0)\n            mlvl_pos_mask_preds_y[lvl] = torch.cat(\n                mlvl_pos_mask_preds_y[lvl], dim=0)\n            mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0)\n            temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute(\n                0, 2, 3, 1).reshape(-1, self.cls_out_channels))\n\n        num_pos = 0.\n        # dice loss\n        loss_mask = []\n        for pred_x, pred_y, target in \\\n                zip(mlvl_pos_mask_preds_x,\n                    mlvl_pos_mask_preds_y, mlvl_pos_mask_targets):\n            num_masks = pred_x.size(0)\n            if num_masks == 0:\n                # make sure can get grad\n                loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0))\n                continue\n            num_pos += num_masks\n            pred_mask = pred_y.sigmoid() * pred_x.sigmoid()\n            loss_mask.append(\n                self.loss_mask(pred_mask, target, reduction_override='none'))\n        if num_pos > 0:\n            loss_mask = torch.cat(loss_mask).sum() / num_pos\n        else:\n            loss_mask = torch.cat(loss_mask).mean()\n\n        # cate\n        flatten_labels = torch.cat(mlvl_labels)\n        flatten_cls_preds = torch.cat(temp_mlvl_cls_preds)\n\n        loss_cls = self.loss_cls(\n            flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)\n        return dict(loss_mask=loss_mask, loss_cls=loss_cls)\n\n    def _get_targets_single(self,\n                            gt_instances: InstanceData,\n                            featmap_sizes: Optional[list] = None) -> tuple:\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes``, ``labels``,\n                and ``masks`` attributes.\n            featmap_sizes (list[:obj:`torch.size`]): Size of each\n                feature map from feature pyramid, each element\n                means (feat_h, feat_w). Defaults to None.\n\n        Returns:\n            Tuple: Usually returns a tuple containing targets for predictions.\n\n                - mlvl_pos_mask_targets (list[Tensor]): Each element represent\n                  the binary mask targets for positive points in this\n                  level, has shape (num_pos, out_h, out_w).\n                - mlvl_labels (list[Tensor]): Each element is\n                  classification labels for all\n                  points in this level, has shape\n                  (num_grid, num_grid).\n                - mlvl_xy_pos_indexes (list[Tensor]): Each element\n                  in the list contains the index of positive samples in\n                  corresponding level, has shape (num_pos, 2), last\n                  dimension 2 present (index_x, index_y).\n        \"\"\"\n        mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks = \\\n            super()._get_targets_single(gt_instances,\n                                        featmap_sizes=featmap_sizes)\n\n        mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero()\n                               for item in mlvl_labels]\n\n        return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes\n\n    def predict_by_feat(self, mlvl_mask_preds_x: List[Tensor],\n                        mlvl_mask_preds_y: List[Tensor],\n                        mlvl_cls_scores: List[Tensor],\n                        batch_img_metas: List[dict], **kwargs) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\n\n        Args:\n            mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                from x branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction\n                from y branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes ,num_grids ,num_grids).\n            batch_img_metas (list[dict]): Meta information of all images.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        mlvl_cls_scores = [\n            item.permute(0, 2, 3, 1) for item in mlvl_cls_scores\n        ]\n        assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores)\n        num_levels = len(mlvl_cls_scores)\n\n        results_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_pred_list = [\n                mlvl_cls_scores[i][img_id].view(\n                    -1, self.cls_out_channels).detach()\n                for i in range(num_levels)\n            ]\n            mask_pred_list_x = [\n                mlvl_mask_preds_x[i][img_id] for i in range(num_levels)\n            ]\n            mask_pred_list_y = [\n                mlvl_mask_preds_y[i][img_id] for i in range(num_levels)\n            ]\n\n            cls_pred_list = torch.cat(cls_pred_list, dim=0)\n            mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0)\n            mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0)\n            img_meta = batch_img_metas[img_id]\n\n            results = self._predict_by_feat_single(\n                cls_pred_list,\n                mask_pred_list_x,\n                mask_pred_list_y,\n                img_meta=img_meta)\n            results_list.append(results)\n        return results_list\n\n    def _predict_by_feat_single(self,\n                                cls_scores: Tensor,\n                                mask_preds_x: Tensor,\n                                mask_preds_y: Tensor,\n                                img_meta: dict,\n                                cfg: OptConfigType = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        mask results.\n\n        Args:\n            cls_scores (Tensor): Classification score of all points\n                in single image, has shape (num_points, num_classes).\n            mask_preds_x (Tensor): Mask prediction of x branch of\n                all points in single image, has shape\n                (sum_num_grids, feat_h, feat_w).\n            mask_preds_y (Tensor): Mask prediction of y branch of\n                all points in single image, has shape\n                (sum_num_grids, feat_h, feat_w).\n            img_meta (dict): Meta information of corresponding image.\n            cfg (dict): Config used in test phase.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n\n        def empty_results(cls_scores, ori_shape):\n            \"\"\"Generate a empty results.\"\"\"\n            results = InstanceData()\n            results.scores = cls_scores.new_ones(0)\n            results.masks = cls_scores.new_zeros(0, *ori_shape)\n            results.labels = cls_scores.new_ones(0)\n            results.bboxes = cls_scores.new_zeros(0, 4)\n            return results\n\n        cfg = self.test_cfg if cfg is None else cfg\n\n        featmap_size = mask_preds_x.size()[-2:]\n\n        h, w = img_meta['img_shape'][:2]\n        upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4)\n\n        score_mask = (cls_scores > cfg.score_thr)\n        cls_scores = cls_scores[score_mask]\n        inds = score_mask.nonzero()\n        lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0)\n        num_all_points = lvl_interval[-1]\n        lvl_start_index = inds.new_ones(num_all_points)\n        num_grids = inds.new_ones(num_all_points)\n        seg_size = inds.new_tensor(self.num_grids).cumsum(0)\n        mask_lvl_start_index = inds.new_ones(num_all_points)\n        strides = inds.new_ones(num_all_points)\n\n        lvl_start_index[:lvl_interval[0]] *= 0\n        mask_lvl_start_index[:lvl_interval[0]] *= 0\n        num_grids[:lvl_interval[0]] *= self.num_grids[0]\n        strides[:lvl_interval[0]] *= self.strides[0]\n\n        for lvl in range(1, self.num_levels):\n            lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                lvl_interval[lvl - 1]\n            mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                seg_size[lvl - 1]\n            num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                self.num_grids[lvl]\n            strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                self.strides[lvl]\n\n        lvl_start_index = lvl_start_index[inds[:, 0]]\n        mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]]\n        num_grids = num_grids[inds[:, 0]]\n        strides = strides[inds[:, 0]]\n\n        y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids\n        x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids\n        y_inds = mask_lvl_start_index + y_lvl_offset\n        x_inds = mask_lvl_start_index + x_lvl_offset\n\n        cls_labels = inds[:, 1]\n        mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...]\n\n        masks = mask_preds > cfg.mask_thr\n        sum_masks = masks.sum((1, 2)).float()\n        keep = sum_masks > strides\n        if keep.sum() == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n\n        masks = masks[keep]\n        mask_preds = mask_preds[keep]\n        sum_masks = sum_masks[keep]\n        cls_scores = cls_scores[keep]\n        cls_labels = cls_labels[keep]\n\n        # maskness.\n        mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks\n        cls_scores *= mask_scores\n\n        scores, labels, _, keep_inds = mask_matrix_nms(\n            masks,\n            cls_labels,\n            cls_scores,\n            mask_area=sum_masks,\n            nms_pre=cfg.nms_pre,\n            max_num=cfg.max_per_img,\n            kernel=cfg.kernel,\n            sigma=cfg.sigma,\n            filter_thr=cfg.filter_thr)\n        # mask_matrix_nms may return an empty Tensor\n        if len(keep_inds) == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n        mask_preds = mask_preds[keep_inds]\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(0), size=upsampled_size,\n            mode='bilinear')[:, :, :h, :w]\n        mask_preds = F.interpolate(\n            mask_preds, size=img_meta['ori_shape'][:2],\n            mode='bilinear').squeeze(0)\n        masks = mask_preds > cfg.mask_thr\n\n        results = InstanceData()\n        results.masks = masks\n        results.labels = labels\n        results.scores = scores\n        # create an empty bbox in InstanceData to avoid bugs when\n        # calculating metrics.\n        results.bboxes = results.scores.new_zeros(len(scores), 4)\n\n        return results\n\n\n@MODELS.register_module()\nclass DecoupledSOLOLightHead(DecoupledSOLOHead):\n    \"\"\"Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by\n    Locations <https://arxiv.org/abs/1912.04488>`_\n\n    Args:\n        with_dcn (bool): Whether use dcn in mask_convs and cls_convs,\n            Defaults to False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 dcn_cfg: OptConfigType = None,\n                 init_cfg: MultiConfig = [\n                     dict(type='Normal', layer='Conv2d', std=0.01),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_x')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_y')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_cls'))\n                 ],\n                 **kwargs) -> None:\n        assert dcn_cfg is None or isinstance(dcn_cfg, dict)\n        self.dcn_cfg = dcn_cfg\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self) -> None:\n        self.mask_convs = nn.ModuleList()\n        self.cls_convs = nn.ModuleList()\n\n        for i in range(self.stacked_convs):\n            if self.dcn_cfg is not None \\\n                    and i == self.stacked_convs - 1:\n                conv_cfg = self.dcn_cfg\n            else:\n                conv_cfg = None\n\n            chn = self.in_channels + 2 if i == 0 else self.feat_channels\n            self.mask_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n        self.conv_mask_list_x = nn.ModuleList()\n        self.conv_mask_list_y = nn.ModuleList()\n        for num_grid in self.num_grids:\n            self.conv_mask_list_x.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n            self.conv_mask_list_y.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and mask prediction.\n\n                - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                  from x branch. Each element in the list has shape\n                  (batch_size, num_grids ,h ,w).\n                - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction\n                  from y branch. Each element in the list has shape\n                  (batch_size, num_grids ,h ,w).\n                - mlvl_cls_preds (list[Tensor]): Multi-level scores.\n                  Each element in the list has shape\n                  (batch_size, num_classes, num_grids ,num_grids).\n        \"\"\"\n        assert len(x) == self.num_levels\n        feats = self.resize_feats(x)\n        mask_preds_x = []\n        mask_preds_y = []\n        cls_preds = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            mask_feat = x\n            cls_feat = x\n            # generate and concat the coordinate\n            coord_feat = generate_coordinate(mask_feat.size(),\n                                             mask_feat.device)\n            mask_feat = torch.cat([mask_feat, coord_feat], 1)\n\n            for mask_layer in self.mask_convs:\n                mask_feat = mask_layer(mask_feat)\n\n            mask_feat = F.interpolate(\n                mask_feat, scale_factor=2, mode='bilinear')\n\n            mask_pred_x = self.conv_mask_list_x[i](mask_feat)\n            mask_pred_y = self.conv_mask_list_y[i](mask_feat)\n\n            # cls branch\n            for j, cls_layer in enumerate(self.cls_convs):\n                if j == self.cls_down_index:\n                    num_grid = self.num_grids[i]\n                    cls_feat = F.interpolate(\n                        cls_feat, size=num_grid, mode='bilinear')\n                cls_feat = cls_layer(cls_feat)\n\n            cls_pred = self.conv_cls(cls_feat)\n\n            if not self.training:\n                feat_wh = feats[0].size()[-2:]\n                upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2)\n                mask_pred_x = F.interpolate(\n                    mask_pred_x.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                mask_pred_y = F.interpolate(\n                    mask_pred_y.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                cls_pred = cls_pred.sigmoid()\n                # get local maximum\n                local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1)\n                keep_mask = local_max[:, :, :-1, :-1] == cls_pred\n                cls_pred = cls_pred * keep_mask\n\n            mask_preds_x.append(mask_pred_x)\n            mask_preds_y.append(mask_pred_y)\n            cls_preds.append(cls_pred)\n        return mask_preds_x, mask_preds_y, cls_preds\n"
  },
  {
    "path": "mmdet/models/dense_heads/solov2_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional, Tuple\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.utils.misc import floordiv\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType\nfrom ..layers import mask_matrix_nms\nfrom ..utils import center_of_mass, generate_coordinate, multi_apply\nfrom .solo_head import SOLOHead\n\n\nclass MaskFeatModule(BaseModule):\n    \"\"\"SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast\n    Instance Segmentation. <https://arxiv.org/pdf/2003.10152>`_\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels of the mask feature\n             map branch.\n        start_level (int): The starting feature map level from RPN that\n             will be used to predict the mask feature map.\n        end_level (int): The ending feature map level from rpn that\n             will be used to predict the mask feature map.\n        out_channels (int): Number of output channels of the mask feature\n             map branch. This is the channel count of the mask\n             feature map that to be dynamically convolved with the predicted\n             kernel.\n        mask_stride (int): Downsample factor of the mask feature map output.\n            Defaults to 4.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        feat_channels: int,\n        start_level: int,\n        end_level: int,\n        out_channels: int,\n        mask_stride: int = 4,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        init_cfg: MultiConfig = [\n            dict(type='Normal', layer='Conv2d', std=0.01)\n        ]\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.start_level = start_level\n        self.end_level = end_level\n        self.mask_stride = mask_stride\n        assert start_level >= 0 and end_level >= start_level\n        self.out_channels = out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self._init_layers()\n        self.fp16_enabled = False\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.convs_all_levels = nn.ModuleList()\n        for i in range(self.start_level, self.end_level + 1):\n            convs_per_level = nn.Sequential()\n            if i == 0:\n                convs_per_level.add_module(\n                    f'conv{i}',\n                    ConvModule(\n                        self.in_channels,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        inplace=False))\n                self.convs_all_levels.append(convs_per_level)\n                continue\n\n            for j in range(i):\n                if j == 0:\n                    if i == self.end_level:\n                        chn = self.in_channels + 2\n                    else:\n                        chn = self.in_channels\n                    convs_per_level.add_module(\n                        f'conv{j}',\n                        ConvModule(\n                            chn,\n                            self.feat_channels,\n                            3,\n                            padding=1,\n                            conv_cfg=self.conv_cfg,\n                            norm_cfg=self.norm_cfg,\n                            inplace=False))\n                    convs_per_level.add_module(\n                        f'upsample{j}',\n                        nn.Upsample(\n                            scale_factor=2,\n                            mode='bilinear',\n                            align_corners=False))\n                    continue\n\n                convs_per_level.add_module(\n                    f'conv{j}',\n                    ConvModule(\n                        self.feat_channels,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        inplace=False))\n                convs_per_level.add_module(\n                    f'upsample{j}',\n                    nn.Upsample(\n                        scale_factor=2, mode='bilinear', align_corners=False))\n\n            self.convs_all_levels.append(convs_per_level)\n\n        self.conv_pred = ConvModule(\n            self.feat_channels,\n            self.out_channels,\n            1,\n            padding=0,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg)\n\n    def forward(self, x: Tuple[Tensor]) -> Tensor:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            Tensor: The predicted mask feature map.\n        \"\"\"\n        inputs = x[self.start_level:self.end_level + 1]\n        assert len(inputs) == (self.end_level - self.start_level + 1)\n        feature_add_all_level = self.convs_all_levels[0](inputs[0])\n        for i in range(1, len(inputs)):\n            input_p = inputs[i]\n            if i == len(inputs) - 1:\n                coord_feat = generate_coordinate(input_p.size(),\n                                                 input_p.device)\n                input_p = torch.cat([input_p, coord_feat], 1)\n\n            feature_add_all_level = feature_add_all_level + \\\n                self.convs_all_levels[i](input_p)\n\n        feature_pred = self.conv_pred(feature_add_all_level)\n        return feature_pred\n\n\n@MODELS.register_module()\nclass SOLOV2Head(SOLOHead):\n    \"\"\"SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance\n    Segmentation. <https://arxiv.org/pdf/2003.10152>`_\n\n    Args:\n        mask_feature_head (dict): Config of SOLOv2MaskFeatHead.\n        dynamic_conv_size (int): Dynamic Conv kernel size. Defaults to 1.\n        dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv.\n            Defaults to None.\n        dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of\n            kernel_convs and cls_convs, or only the last layer. It shall be set\n            `True` for the normal version of SOLOv2 and `False` for the\n            light-weight version. Defaults to True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 mask_feature_head: ConfigType,\n                 dynamic_conv_size: int = 1,\n                 dcn_cfg: OptConfigType = None,\n                 dcn_apply_to_all_conv: bool = True,\n                 init_cfg: MultiConfig = [\n                     dict(type='Normal', layer='Conv2d', std=0.01),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_cls'))\n                 ],\n                 **kwargs) -> None:\n        assert dcn_cfg is None or isinstance(dcn_cfg, dict)\n        self.dcn_cfg = dcn_cfg\n        self.with_dcn = dcn_cfg is not None\n        self.dcn_apply_to_all_conv = dcn_apply_to_all_conv\n        self.dynamic_conv_size = dynamic_conv_size\n        mask_out_channels = mask_feature_head.get('out_channels')\n        self.kernel_out_channels = \\\n            mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size\n\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n\n        # update the in_channels of mask_feature_head\n        if mask_feature_head.get('in_channels', None) is not None:\n            if mask_feature_head.in_channels != self.in_channels:\n                warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and '\n                              'SOLOv2Head should be same, changing '\n                              'mask_feature_head.in_channels to '\n                              f'{self.in_channels}')\n                mask_feature_head.update(in_channels=self.in_channels)\n        else:\n            mask_feature_head.update(in_channels=self.in_channels)\n\n        self.mask_feature_head = MaskFeatModule(**mask_feature_head)\n        self.mask_stride = self.mask_feature_head.mask_stride\n        self.fp16_enabled = False\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        self.kernel_convs = nn.ModuleList()\n        conv_cfg = None\n        for i in range(self.stacked_convs):\n            if self.with_dcn:\n                if self.dcn_apply_to_all_conv:\n                    conv_cfg = self.dcn_cfg\n                elif i == self.stacked_convs - 1:\n                    # light head\n                    conv_cfg = self.dcn_cfg\n\n            chn = self.in_channels + 2 if i == 0 else self.feat_channels\n            self.kernel_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.norm_cfg is None))\n\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.norm_cfg is None))\n\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n        self.conv_kernel = nn.Conv2d(\n            self.feat_channels, self.kernel_out_channels, 3, padding=1)\n\n    def forward(self, x):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores, mask prediction,\n            and mask features.\n\n                - mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel\n                  prediction. The kernel is used to generate instance\n                  segmentation masks by dynamic convolution. Each element in\n                  the list has shape\n                  (batch_size, kernel_out_channels, num_grids, num_grids).\n                - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each\n                  element in the list has shape\n                  (batch_size, num_classes, num_grids, num_grids).\n                - mask_feats (Tensor): Unified mask feature map used to\n                  generate instance segmentation masks by dynamic convolution.\n                  Has shape (batch_size, mask_out_channels, h, w).\n        \"\"\"\n        assert len(x) == self.num_levels\n        mask_feats = self.mask_feature_head(x)\n        ins_kernel_feats = self.resize_feats(x)\n        mlvl_kernel_preds = []\n        mlvl_cls_preds = []\n        for i in range(self.num_levels):\n            ins_kernel_feat = ins_kernel_feats[i]\n            # ins branch\n            # concat coord\n            coord_feat = generate_coordinate(ins_kernel_feat.size(),\n                                             ins_kernel_feat.device)\n            ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1)\n\n            # kernel branch\n            kernel_feat = ins_kernel_feat\n            kernel_feat = F.interpolate(\n                kernel_feat,\n                size=self.num_grids[i],\n                mode='bilinear',\n                align_corners=False)\n\n            cate_feat = kernel_feat[:, :-2, :, :]\n\n            kernel_feat = kernel_feat.contiguous()\n            for i, kernel_conv in enumerate(self.kernel_convs):\n                kernel_feat = kernel_conv(kernel_feat)\n            kernel_pred = self.conv_kernel(kernel_feat)\n\n            # cate branch\n            cate_feat = cate_feat.contiguous()\n            for i, cls_conv in enumerate(self.cls_convs):\n                cate_feat = cls_conv(cate_feat)\n            cate_pred = self.conv_cls(cate_feat)\n\n            mlvl_kernel_preds.append(kernel_pred)\n            mlvl_cls_preds.append(cate_pred)\n\n        return mlvl_kernel_preds, mlvl_cls_preds, mask_feats\n\n    def _get_targets_single(self,\n                            gt_instances: InstanceData,\n                            featmap_sizes: Optional[list] = None) -> tuple:\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes``, ``labels``,\n                and ``masks`` attributes.\n            featmap_sizes (list[:obj:`torch.size`]): Size of each\n                feature map from feature pyramid, each element\n                means (feat_h, feat_w). Defaults to None.\n\n        Returns:\n            Tuple: Usually returns a tuple containing targets for predictions.\n\n                - mlvl_pos_mask_targets (list[Tensor]): Each element represent\n                  the binary mask targets for positive points in this\n                  level, has shape (num_pos, out_h, out_w).\n                - mlvl_labels (list[Tensor]): Each element is\n                  classification labels for all\n                  points in this level, has shape\n                  (num_grid, num_grid).\n                - mlvl_pos_masks  (list[Tensor]): Each element is\n                  a `BoolTensor` to represent whether the\n                  corresponding point in single level\n                  is positive, has shape (num_grid **2).\n                - mlvl_pos_indexes  (list[list]): Each element\n                  in the list contains the positive index in\n                  corresponding level, has shape (num_pos).\n        \"\"\"\n        gt_labels = gt_instances.labels\n        device = gt_labels.device\n\n        gt_bboxes = gt_instances.bboxes\n        gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                              (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n        gt_masks = gt_instances.masks.to_tensor(\n            dtype=torch.bool, device=device)\n\n        mlvl_pos_mask_targets = []\n        mlvl_pos_indexes = []\n        mlvl_labels = []\n        mlvl_pos_masks = []\n        for (lower_bound, upper_bound), num_grid \\\n                in zip(self.scale_ranges, self.num_grids):\n            mask_target = []\n            # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n            pos_index = []\n            labels = torch.zeros([num_grid, num_grid],\n                                 dtype=torch.int64,\n                                 device=device) + self.num_classes\n            pos_mask = torch.zeros([num_grid**2],\n                                   dtype=torch.bool,\n                                   device=device)\n\n            gt_inds = ((gt_areas >= lower_bound) &\n                       (gt_areas <= upper_bound)).nonzero().flatten()\n            if len(gt_inds) == 0:\n                mlvl_pos_mask_targets.append(\n                    torch.zeros([0, featmap_sizes[0], featmap_sizes[1]],\n                                dtype=torch.uint8,\n                                device=device))\n                mlvl_labels.append(labels)\n                mlvl_pos_masks.append(pos_mask)\n                mlvl_pos_indexes.append([])\n                continue\n            hit_gt_bboxes = gt_bboxes[gt_inds]\n            hit_gt_labels = gt_labels[gt_inds]\n            hit_gt_masks = gt_masks[gt_inds, ...]\n\n            pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] -\n                                  hit_gt_bboxes[:, 0]) * self.pos_scale\n            pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] -\n                                  hit_gt_bboxes[:, 1]) * self.pos_scale\n\n            # Make sure hit_gt_masks has a value\n            valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0\n\n            for gt_mask, gt_label, pos_h_range, pos_w_range, \\\n                valid_mask_flag in \\\n                    zip(hit_gt_masks, hit_gt_labels, pos_h_ranges,\n                        pos_w_ranges, valid_mask_flags):\n                if not valid_mask_flag:\n                    continue\n                upsampled_size = (featmap_sizes[0] * self.mask_stride,\n                                  featmap_sizes[1] * self.mask_stride)\n                center_h, center_w = center_of_mass(gt_mask)\n\n                coord_w = int(\n                    floordiv((center_w / upsampled_size[1]), (1. / num_grid),\n                             rounding_mode='trunc'))\n                coord_h = int(\n                    floordiv((center_h / upsampled_size[0]), (1. / num_grid),\n                             rounding_mode='trunc'))\n\n                # left, top, right, down\n                top_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_h - pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                down_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_h + pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                left_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_w - pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                right_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_w + pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n\n                top = max(top_box, coord_h - 1)\n                down = min(down_box, coord_h + 1)\n                left = max(coord_w - 1, left_box)\n                right = min(right_box, coord_w + 1)\n\n                labels[top:(down + 1), left:(right + 1)] = gt_label\n                # ins\n                gt_mask = np.uint8(gt_mask.cpu().numpy())\n                # Follow the original implementation, F.interpolate is\n                # different from cv2 and opencv\n                gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride)\n                gt_mask = torch.from_numpy(gt_mask).to(device=device)\n\n                for i in range(top, down + 1):\n                    for j in range(left, right + 1):\n                        index = int(i * num_grid + j)\n                        this_mask_target = torch.zeros(\n                            [featmap_sizes[0], featmap_sizes[1]],\n                            dtype=torch.uint8,\n                            device=device)\n                        this_mask_target[:gt_mask.shape[0], :gt_mask.\n                                         shape[1]] = gt_mask\n                        mask_target.append(this_mask_target)\n                        pos_mask[index] = True\n                        pos_index.append(index)\n            if len(mask_target) == 0:\n                mask_target = torch.zeros(\n                    [0, featmap_sizes[0], featmap_sizes[1]],\n                    dtype=torch.uint8,\n                    device=device)\n            else:\n                mask_target = torch.stack(mask_target, 0)\n            mlvl_pos_mask_targets.append(mask_target)\n            mlvl_labels.append(labels)\n            mlvl_pos_masks.append(pos_mask)\n            mlvl_pos_indexes.append(pos_index)\n        return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks,\n                mlvl_pos_indexes)\n\n    def loss_by_feat(self, mlvl_kernel_preds: List[Tensor],\n                     mlvl_cls_preds: List[Tensor], mask_feats: Tensor,\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict], **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel\n                prediction. The kernel is used to generate instance\n                segmentation masks by dynamic convolution. Each element in the\n                list has shape\n                (batch_size, kernel_out_channels, num_grids, num_grids).\n            mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids, num_grids).\n            mask_feats (Tensor): Unified mask feature map used to generate\n                instance segmentation masks by dynamic convolution. Has shape\n                (batch_size, mask_out_channels, h, w).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``masks``,\n                and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of multiple images.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = mask_feats.size()[-2:]\n\n        pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply(\n            self._get_targets_single,\n            batch_gt_instances,\n            featmap_sizes=featmap_sizes)\n\n        mlvl_mask_targets = [\n            torch.cat(lvl_mask_targets, 0)\n            for lvl_mask_targets in zip(*pos_mask_targets)\n        ]\n\n        mlvl_pos_kernel_preds = []\n        for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds,\n                                                     zip(*pos_indexes)):\n            lvl_pos_kernel_preds = []\n            for img_lvl_kernel_preds, img_lvl_pos_indexes in zip(\n                    lvl_kernel_preds, lvl_pos_indexes):\n                img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view(\n                    img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes]\n                lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds)\n            mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds)\n\n        # make multilevel mlvl_mask_pred\n        mlvl_mask_preds = []\n        for lvl_pos_kernel_preds in mlvl_pos_kernel_preds:\n            lvl_mask_preds = []\n            for img_id, img_lvl_pos_kernel_pred in enumerate(\n                    lvl_pos_kernel_preds):\n                if img_lvl_pos_kernel_pred.size()[-1] == 0:\n                    continue\n                img_mask_feats = mask_feats[[img_id]]\n                h, w = img_mask_feats.shape[-2:]\n                num_kernel = img_lvl_pos_kernel_pred.shape[1]\n                img_lvl_mask_pred = F.conv2d(\n                    img_mask_feats,\n                    img_lvl_pos_kernel_pred.permute(1, 0).view(\n                        num_kernel, -1, self.dynamic_conv_size,\n                        self.dynamic_conv_size),\n                    stride=1).view(-1, h, w)\n                lvl_mask_preds.append(img_lvl_mask_pred)\n            if len(lvl_mask_preds) == 0:\n                lvl_mask_preds = None\n            else:\n                lvl_mask_preds = torch.cat(lvl_mask_preds, 0)\n            mlvl_mask_preds.append(lvl_mask_preds)\n        # dice loss\n        num_pos = 0\n        for img_pos_masks in pos_masks:\n            for lvl_img_pos_masks in img_pos_masks:\n                # Fix `Tensor` object has no attribute `count_nonzero()`\n                # in PyTorch 1.6, the type of `lvl_img_pos_masks`\n                # should be `torch.bool`.\n                num_pos += lvl_img_pos_masks.nonzero().numel()\n        loss_mask = []\n        for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds,\n                                                    mlvl_mask_targets):\n            if lvl_mask_preds is None:\n                continue\n            loss_mask.append(\n                self.loss_mask(\n                    lvl_mask_preds,\n                    lvl_mask_targets,\n                    reduction_override='none'))\n        if num_pos > 0:\n            loss_mask = torch.cat(loss_mask).sum() / num_pos\n        else:\n            loss_mask = mask_feats.sum() * 0\n\n        # cate\n        flatten_labels = [\n            torch.cat(\n                [img_lvl_labels.flatten() for img_lvl_labels in lvl_labels])\n            for lvl_labels in zip(*labels)\n        ]\n        flatten_labels = torch.cat(flatten_labels)\n\n        flatten_cls_preds = [\n            lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes)\n            for lvl_cls_preds in mlvl_cls_preds\n        ]\n        flatten_cls_preds = torch.cat(flatten_cls_preds)\n\n        loss_cls = self.loss_cls(\n            flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)\n        return dict(loss_mask=loss_mask, loss_cls=loss_cls)\n\n    def predict_by_feat(self, mlvl_kernel_preds: List[Tensor],\n                        mlvl_cls_scores: List[Tensor], mask_feats: Tensor,\n                        batch_img_metas: List[dict], **kwargs) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\n\n        Args:\n            mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel\n                prediction. The kernel is used to generate instance\n                segmentation masks by dynamic convolution. Each element in the\n                list has shape\n                (batch_size, kernel_out_channels, num_grids, num_grids).\n            mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids, num_grids).\n            mask_feats (Tensor): Unified mask feature map used to generate\n                instance segmentation masks by dynamic convolution. Has shape\n                (batch_size, mask_out_channels, h, w).\n            batch_img_metas (list[dict]): Meta information of all images.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        num_levels = len(mlvl_cls_scores)\n        assert len(mlvl_kernel_preds) == len(mlvl_cls_scores)\n\n        for lvl in range(num_levels):\n            cls_scores = mlvl_cls_scores[lvl]\n            cls_scores = cls_scores.sigmoid()\n            local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1)\n            keep_mask = local_max[:, :, :-1, :-1] == cls_scores\n            cls_scores = cls_scores * keep_mask\n            mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1)\n\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            img_cls_pred = [\n                mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels)\n                for lvl in range(num_levels)\n            ]\n            img_mask_feats = mask_feats[[img_id]]\n            img_kernel_pred = [\n                mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view(\n                    -1, self.kernel_out_channels) for lvl in range(num_levels)\n            ]\n            img_cls_pred = torch.cat(img_cls_pred, dim=0)\n            img_kernel_pred = torch.cat(img_kernel_pred, dim=0)\n            result = self._predict_by_feat_single(\n                img_kernel_pred,\n                img_cls_pred,\n                img_mask_feats,\n                img_meta=batch_img_metas[img_id])\n            result_list.append(result)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                kernel_preds: Tensor,\n                                cls_scores: Tensor,\n                                mask_feats: Tensor,\n                                img_meta: dict,\n                                cfg: OptConfigType = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        mask results.\n\n        Args:\n            kernel_preds (Tensor): Dynamic kernel prediction of all points\n                in single image, has shape\n                (num_points, kernel_out_channels).\n            cls_scores (Tensor): Classification score of all points\n                in single image, has shape (num_points, num_classes).\n            mask_feats (Tensor): Mask prediction of all points in\n                single image, has shape (num_points, feat_h, feat_w).\n            img_meta (dict): Meta information of corresponding image.\n            cfg (dict, optional): Config used in test phase.\n                Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n\n        def empty_results(cls_scores, ori_shape):\n            \"\"\"Generate a empty results.\"\"\"\n            results = InstanceData()\n            results.scores = cls_scores.new_ones(0)\n            results.masks = cls_scores.new_zeros(0, *ori_shape)\n            results.labels = cls_scores.new_ones(0)\n            results.bboxes = cls_scores.new_zeros(0, 4)\n            return results\n\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(kernel_preds) == len(cls_scores)\n\n        featmap_size = mask_feats.size()[-2:]\n\n        # overall info\n        h, w = img_meta['img_shape'][:2]\n        upsampled_size = (featmap_size[0] * self.mask_stride,\n                          featmap_size[1] * self.mask_stride)\n\n        # process.\n        score_mask = (cls_scores > cfg.score_thr)\n        cls_scores = cls_scores[score_mask]\n        if len(cls_scores) == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n\n        # cate_labels & kernel_preds\n        inds = score_mask.nonzero()\n        cls_labels = inds[:, 1]\n        kernel_preds = kernel_preds[inds[:, 0]]\n\n        # trans vector.\n        lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0)\n        strides = kernel_preds.new_ones(lvl_interval[-1])\n\n        strides[:lvl_interval[0]] *= self.strides[0]\n        for lvl in range(1, self.num_levels):\n            strides[lvl_interval[lvl -\n                                 1]:lvl_interval[lvl]] *= self.strides[lvl]\n        strides = strides[inds[:, 0]]\n\n        # mask encoding.\n        kernel_preds = kernel_preds.view(\n            kernel_preds.size(0), -1, self.dynamic_conv_size,\n            self.dynamic_conv_size)\n        mask_preds = F.conv2d(\n            mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid()\n        # mask.\n        masks = mask_preds > cfg.mask_thr\n        sum_masks = masks.sum((1, 2)).float()\n        keep = sum_masks > strides\n        if keep.sum() == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n        masks = masks[keep]\n        mask_preds = mask_preds[keep]\n        sum_masks = sum_masks[keep]\n        cls_scores = cls_scores[keep]\n        cls_labels = cls_labels[keep]\n\n        # maskness.\n        mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks\n        cls_scores *= mask_scores\n\n        scores, labels, _, keep_inds = mask_matrix_nms(\n            masks,\n            cls_labels,\n            cls_scores,\n            mask_area=sum_masks,\n            nms_pre=cfg.nms_pre,\n            max_num=cfg.max_per_img,\n            kernel=cfg.kernel,\n            sigma=cfg.sigma,\n            filter_thr=cfg.filter_thr)\n        if len(keep_inds) == 0:\n            return empty_results(cls_scores, img_meta['ori_shape'][:2])\n        mask_preds = mask_preds[keep_inds]\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(0),\n            size=upsampled_size,\n            mode='bilinear',\n            align_corners=False)[:, :, :h, :w]\n        mask_preds = F.interpolate(\n            mask_preds,\n            size=img_meta['ori_shape'][:2],\n            mode='bilinear',\n            align_corners=False).squeeze(0)\n        masks = mask_preds > cfg.mask_thr\n\n        results = InstanceData()\n        results.masks = masks\n        results.labels = labels\n        results.scores = scores\n        # create an empty bbox in InstanceData to avoid bugs when\n        # calculating metrics.\n        results.bboxes = results.scores.new_zeros(len(scores), 4)\n\n        return results\n"
  },
  {
    "path": "mmdet/models/dense_heads/ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList\nfrom ..losses import smooth_l1_loss\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import multi_apply\nfrom .anchor_head import AnchorHead\n\n\n# TODO: add loss evaluator for SSD\n@MODELS.register_module()\nclass SSDHead(AnchorHead):\n    \"\"\"Implementation of `SSD head <https://arxiv.org/abs/1512.02325>`_\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (Sequence[int]): Number of channels in the input feature\n            map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Defaults to 0.\n        feat_channels (int): Number of hidden channels when stacked_convs\n            > 0. Defaults to 256.\n        use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n            Defaults to False.\n        conv_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct\n            and config conv layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct\n            and config norm layer. Defaults to None.\n        act_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct\n            and config activation layer. Defaults to None.\n        anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor\n            generator.\n        bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Defaults to False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of\n            anchor head.\n        test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of\n            anchor head.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], Optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(\n        self,\n        num_classes: int = 80,\n        in_channels: Sequence[int] = (512, 1024, 512, 256, 256, 256),\n        stacked_convs: int = 0,\n        feat_channels: int = 256,\n        use_depthwise: bool = False,\n        conv_cfg: Optional[ConfigType] = None,\n        norm_cfg: Optional[ConfigType] = None,\n        act_cfg: Optional[ConfigType] = None,\n        anchor_generator: ConfigType = dict(\n            type='SSDAnchorGenerator',\n            scale_major=False,\n            input_size=300,\n            strides=[8, 16, 32, 64, 100, 300],\n            ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n            basesize_ratio_range=(0.1, 0.9)),\n        bbox_coder: ConfigType = dict(\n            type='DeltaXYWHBBoxCoder',\n            clip_border=True,\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0],\n        ),\n        reg_decoded_bbox: bool = False,\n        train_cfg: Optional[ConfigType] = None,\n        test_cfg: Optional[ConfigType] = None,\n        init_cfg: MultiConfig = dict(\n            type='Xavier', layer='Conv2d', distribution='uniform', bias=0)\n    ) -> None:\n        super(AnchorHead, self).__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.stacked_convs = stacked_convs\n        self.feat_channels = feat_channels\n        self.use_depthwise = use_depthwise\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n\n        self.cls_out_channels = num_classes + 1  # add background class\n        self.prior_generator = TASK_UTILS.build(anchor_generator)\n\n        # Usually the numbers of anchors for each level are the same\n        # except SSD detectors. So it is an int in the most dense\n        # heads but a list of int in SSDHead\n        self.num_base_priors = self.prior_generator.num_base_priors\n\n        self._init_layers()\n\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n        self.reg_decoded_bbox = reg_decoded_bbox\n        self.use_sigmoid_cls = False\n        self.cls_focal_loss = False\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            if self.train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler(context=self)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        # TODO: Use registry to choose ConvModule type\n        conv = DepthwiseSeparableConvModule \\\n            if self.use_depthwise else ConvModule\n\n        for channel, num_base_priors in zip(self.in_channels,\n                                            self.num_base_priors):\n            cls_layers = []\n            reg_layers = []\n            in_channel = channel\n            # build stacked conv tower, not used in default ssd\n            for i in range(self.stacked_convs):\n                cls_layers.append(\n                    conv(\n                        in_channel,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                reg_layers.append(\n                    conv(\n                        in_channel,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                in_channel = self.feat_channels\n            # SSD-Lite head\n            if self.use_depthwise:\n                cls_layers.append(\n                    ConvModule(\n                        in_channel,\n                        in_channel,\n                        3,\n                        padding=1,\n                        groups=in_channel,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                reg_layers.append(\n                    ConvModule(\n                        in_channel,\n                        in_channel,\n                        3,\n                        padding=1,\n                        groups=in_channel,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n            cls_layers.append(\n                nn.Conv2d(\n                    in_channel,\n                    num_base_priors * self.cls_out_channels,\n                    kernel_size=1 if self.use_depthwise else 3,\n                    padding=0 if self.use_depthwise else 1))\n            reg_layers.append(\n                nn.Conv2d(\n                    in_channel,\n                    num_base_priors * 4,\n                    kernel_size=1 if self.use_depthwise else 3,\n                    padding=0 if self.use_depthwise else 1))\n            self.cls_convs.append(nn.Sequential(*cls_layers))\n            self.reg_convs.append(nn.Sequential(*reg_layers))\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple[list[Tensor], list[Tensor]]: A tuple of cls_scores list and\n            bbox_preds list.\n\n            - cls_scores (list[Tensor]): Classification scores for all scale \\\n            levels, each is a 4D-tensor, the channels number is \\\n            num_anchors * num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for all scale \\\n            levels, each is a 4D-tensor, the channels number is \\\n            num_anchors * 4.\n        \"\"\"\n        cls_scores = []\n        bbox_preds = []\n        for feat, reg_conv, cls_conv in zip(x, self.reg_convs, self.cls_convs):\n            cls_scores.append(cls_conv(feat))\n            bbox_preds.append(reg_conv(feat))\n        return cls_scores, bbox_preds\n\n    def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                            anchor: Tensor, labels: Tensor,\n                            label_weights: Tensor, bbox_targets: Tensor,\n                            bbox_weights: Tensor,\n                            avg_factor: int) -> Tuple[Tensor, Tensor]:\n        \"\"\"Compute loss of a single image.\n\n        Args:\n            cls_score (Tensor): Box scores for eachimage\n                Has shape (num_total_anchors, num_classes).\n            bbox_pred (Tensor): Box energies / deltas for each image\n                level with shape (num_total_anchors, 4).\n            anchors (Tensor): Box reference for each scale level with shape\n                (num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (num_total_anchors,).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (num_total_anchors,)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (num_total_anchors, 4).\n            bbox_weights (Tensor): BBox regression loss weights of each anchor\n                with shape (num_total_anchors, 4).\n            avg_factor (int): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n\n        Returns:\n            Tuple[Tensor, Tensor]: A tuple of cls loss and bbox loss of one\n            feature map.\n        \"\"\"\n\n        loss_cls_all = F.cross_entropy(\n            cls_score, labels, reduction='none') * label_weights\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(\n            as_tuple=False).reshape(-1)\n        neg_inds = (labels == self.num_classes).nonzero(\n            as_tuple=False).view(-1)\n\n        num_pos_samples = pos_inds.size(0)\n        num_neg_samples = self.train_cfg['neg_pos_ratio'] * num_pos_samples\n        if num_neg_samples > neg_inds.size(0):\n            num_neg_samples = neg_inds.size(0)\n        topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)\n        loss_cls_pos = loss_cls_all[pos_inds].sum()\n        loss_cls_neg = topk_loss_cls_neg.sum()\n        loss_cls = (loss_cls_pos + loss_cls_neg) / avg_factor\n\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)\n\n        loss_bbox = smooth_l1_loss(\n            bbox_pred,\n            bbox_targets,\n            bbox_weights,\n            beta=self.train_cfg['smoothl1_beta'],\n            avg_factor=avg_factor)\n        return loss_cls[None], loss_bbox\n\n    def loss_by_feat(\n        self,\n        cls_scores: List[Tensor],\n        bbox_preds: List[Tensor],\n        batch_gt_instances: InstanceList,\n        batch_img_metas: List[dict],\n        batch_gt_instances_ignore: OptInstanceList = None\n    ) -> Dict[str, List[Tensor]]:\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, list[Tensor]]: A dictionary of loss components. the dict\n            has components below:\n\n            - loss_cls (list[Tensor]): A list containing each feature map \\\n            classification loss.\n            - loss_bbox (list[Tensor]): A list containing each feature map \\\n            regression loss.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            unmap_outputs=True)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor) = cls_reg_targets\n\n        num_images = len(batch_img_metas)\n        all_cls_scores = torch.cat([\n            s.permute(0, 2, 3, 1).reshape(\n                num_images, -1, self.cls_out_channels) for s in cls_scores\n        ], 1)\n        all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n        all_label_weights = torch.cat(label_weights_list,\n                                      -1).view(num_images, -1)\n        all_bbox_preds = torch.cat([\n            b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n            for b in bbox_preds\n        ], -2)\n        all_bbox_targets = torch.cat(bbox_targets_list,\n                                     -2).view(num_images, -1, 4)\n        all_bbox_weights = torch.cat(bbox_weights_list,\n                                     -2).view(num_images, -1, 4)\n\n        # concat all level anchors to a single tensor\n        all_anchors = []\n        for i in range(num_images):\n            all_anchors.append(torch.cat(anchor_list[i]))\n\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_by_feat_single,\n            all_cls_scores,\n            all_bbox_preds,\n            all_anchors,\n            all_labels,\n            all_label_weights,\n            all_bbox_targets,\n            all_bbox_weights,\n            avg_factor=avg_factor)\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n"
  },
  {
    "path": "mmdet/models/dense_heads/tood_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmcv.ops import deform_conv2d\nfrom mmengine import MessageHub\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import bias_init_with_prob, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import distance2bbox\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..task_modules.prior_generators import anchor_inside_flags\nfrom ..utils import (filter_scores_and_topk, images_to_levels, multi_apply,\n                     sigmoid_geometric_mean, unmap)\nfrom .atss_head import ATSSHead\n\n\nclass TaskDecomposition(nn.Module):\n    \"\"\"Task decomposition module in task-aligned predictor of TOOD.\n\n    Args:\n        feat_channels (int): Number of feature channels in TOOD head.\n        stacked_convs (int): Number of conv layers in TOOD head.\n        la_down_rate (int): Downsample rate of layer attention.\n            Defaults to 8.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, optional):  Config dict for\n        normalization layer. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 feat_channels: int,\n                 stacked_convs: int,\n                 la_down_rate: int = 8,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None) -> None:\n        super().__init__()\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.in_channels = self.feat_channels * self.stacked_convs\n        self.norm_cfg = norm_cfg\n        self.layer_attention = nn.Sequential(\n            nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(\n                self.in_channels // la_down_rate,\n                self.stacked_convs,\n                1,\n                padding=0), nn.Sigmoid())\n\n        self.reduction_conv = ConvModule(\n            self.in_channels,\n            self.feat_channels,\n            1,\n            stride=1,\n            padding=0,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            bias=norm_cfg is None)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize the parameters.\"\"\"\n        for m in self.layer_attention.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, std=0.001)\n        normal_init(self.reduction_conv.conv, std=0.01)\n\n    def forward(self,\n                feat: Tensor,\n                avg_feat: Optional[Tensor] = None) -> Tensor:\n        \"\"\"Forward function of task decomposition module.\"\"\"\n        b, c, h, w = feat.shape\n        if avg_feat is None:\n            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))\n        weight = self.layer_attention(avg_feat)\n\n        # here we first compute the product between layer attention weight and\n        # conv weight, and then compute the convolution between new conv weight\n        # and feature map, in order to save memory and FLOPs.\n        conv_weight = weight.reshape(\n            b, 1, self.stacked_convs,\n            1) * self.reduction_conv.conv.weight.reshape(\n                1, self.feat_channels, self.stacked_convs, self.feat_channels)\n        conv_weight = conv_weight.reshape(b, self.feat_channels,\n                                          self.in_channels)\n        feat = feat.reshape(b, self.in_channels, h * w)\n        feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h,\n                                                    w)\n        if self.norm_cfg is not None:\n            feat = self.reduction_conv.norm(feat)\n        feat = self.reduction_conv.activate(feat)\n\n        return feat\n\n\n@MODELS.register_module()\nclass TOODHead(ATSSHead):\n    \"\"\"TOODHead used in `TOOD: Task-aligned One-stage Object Detection.\n\n    <https://arxiv.org/abs/2108.07755>`_.\n\n    TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment\n    Learning (TAL).\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        num_dcn (int): Number of deformable convolution in the head.\n            Defaults to 0.\n        anchor_type (str): If set to ``anchor_free``, the head will use centers\n            to regress bboxes. If set to ``anchor_based``, the head will\n            regress bboxes based on anchors. Defaults to ``anchor_free``.\n        initial_loss_cls (:obj:`ConfigDict` or dict): Config of initial loss.\n\n    Example:\n        >>> self = TOODHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_score, bbox_pred = self.forward(feats)\n        >>> assert len(cls_score) == len(self.scales)\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 num_dcn: int = 0,\n                 anchor_type: str = 'anchor_free',\n                 initial_loss_cls: ConfigType = dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     activated=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 **kwargs) -> None:\n        assert anchor_type in ['anchor_free', 'anchor_based']\n        self.num_dcn = num_dcn\n        self.anchor_type = anchor_type\n        super().__init__(\n            num_classes=num_classes, in_channels=in_channels, **kwargs)\n\n        if self.train_cfg:\n            self.initial_epoch = self.train_cfg['initial_epoch']\n            self.initial_assigner = TASK_UTILS.build(\n                self.train_cfg['initial_assigner'])\n            self.initial_loss_cls = MODELS.build(initial_loss_cls)\n            self.assigner = self.initial_assigner\n            self.alignment_assigner = TASK_UTILS.build(\n                self.train_cfg['assigner'])\n            self.alpha = self.train_cfg['alpha']\n            self.beta = self.train_cfg['beta']\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.inter_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            if i < self.num_dcn:\n                conv_cfg = dict(type='DCNv2', deform_groups=4)\n            else:\n                conv_cfg = self.conv_cfg\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.inter_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n        self.cls_decomp = TaskDecomposition(self.feat_channels,\n                                            self.stacked_convs,\n                                            self.stacked_convs * 8,\n                                            self.conv_cfg, self.norm_cfg)\n        self.reg_decomp = TaskDecomposition(self.feat_channels,\n                                            self.stacked_convs,\n                                            self.stacked_convs * 8,\n                                            self.conv_cfg, self.norm_cfg)\n\n        self.tood_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.tood_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n        self.cls_prob_module = nn.Sequential(\n            nn.Conv2d(self.feat_channels * self.stacked_convs,\n                      self.feat_channels // 4, 1), nn.ReLU(inplace=True),\n            nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1))\n        self.reg_offset_module = nn.Sequential(\n            nn.Conv2d(self.feat_channels * self.stacked_convs,\n                      self.feat_channels // 4, 1), nn.ReLU(inplace=True),\n            nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1))\n\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        bias_cls = bias_init_with_prob(0.01)\n        for m in self.inter_convs:\n            normal_init(m.conv, std=0.01)\n        for m in self.cls_prob_module:\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, std=0.01)\n        for m in self.reg_offset_module:\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, std=0.001)\n        normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls)\n\n        self.cls_decomp.init_weights()\n        self.reg_decomp.init_weights()\n\n        normal_init(self.tood_cls, std=0.01, bias=bias_cls)\n        normal_init(self.tood_reg, std=0.01)\n\n    def forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * num_classes.\n                bbox_preds (list[Tensor]): Decoded box for all scale levels,\n                    each is a 4D-tensor, the channels number is\n                    num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format.\n        \"\"\"\n        cls_scores = []\n        bbox_preds = []\n        for idx, (x, scale, stride) in enumerate(\n                zip(feats, self.scales, self.prior_generator.strides)):\n            b, c, h, w = x.shape\n            anchor = self.prior_generator.single_level_grid_priors(\n                (h, w), idx, device=x.device)\n            anchor = torch.cat([anchor for _ in range(b)])\n            # extract task interactive features\n            inter_feats = []\n            for inter_conv in self.inter_convs:\n                x = inter_conv(x)\n                inter_feats.append(x)\n            feat = torch.cat(inter_feats, 1)\n\n            # task decomposition\n            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))\n            cls_feat = self.cls_decomp(feat, avg_feat)\n            reg_feat = self.reg_decomp(feat, avg_feat)\n\n            # cls prediction and alignment\n            cls_logits = self.tood_cls(cls_feat)\n            cls_prob = self.cls_prob_module(feat)\n            cls_score = sigmoid_geometric_mean(cls_logits, cls_prob)\n\n            # reg prediction and alignment\n            if self.anchor_type == 'anchor_free':\n                reg_dist = scale(self.tood_reg(reg_feat).exp()).float()\n                reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4)\n                reg_bbox = distance2bbox(\n                    self.anchor_center(anchor) / stride[0],\n                    reg_dist).reshape(b, h, w, 4).permute(0, 3, 1,\n                                                          2)  # (b, c, h, w)\n            elif self.anchor_type == 'anchor_based':\n                reg_dist = scale(self.tood_reg(reg_feat)).float()\n                reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4)\n                reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape(\n                    b, h, w, 4).permute(0, 3, 1, 2) / stride[0]\n            else:\n                raise NotImplementedError(\n                    f'Unknown anchor type: {self.anchor_type}.'\n                    f'Please use `anchor_free` or `anchor_based`.')\n            reg_offset = self.reg_offset_module(feat)\n            bbox_pred = self.deform_sampling(reg_bbox.contiguous(),\n                                             reg_offset.contiguous())\n\n            # After deform_sampling, some boxes will become invalid (The\n            # left-top point is at the right or bottom of the right-bottom\n            # point), which will make the GIoULoss negative.\n            invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \\\n                               (bbox_pred[:, [1]] > bbox_pred[:, [3]])\n            invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred)\n            bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred)\n\n            cls_scores.append(cls_score)\n            bbox_preds.append(bbox_pred)\n        return tuple(cls_scores), tuple(bbox_preds)\n\n    def deform_sampling(self, feat: Tensor, offset: Tensor) -> Tensor:\n        \"\"\"Sampling the feature x according to offset.\n\n        Args:\n            feat (Tensor): Feature\n            offset (Tensor): Spatial offset for feature sampling\n        \"\"\"\n        # it is an equivalent implementation of bilinear interpolation\n        b, c, h, w = feat.shape\n        weight = feat.new_ones(c, 1, 1, 1)\n        y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c)\n        return y\n\n    def anchor_center(self, anchors: Tensor) -> Tensor:\n        \"\"\"Get anchor centers from anchors.\n\n        Args:\n            anchors (Tensor): Anchor list with shape (N, 4), \"xyxy\" format.\n\n        Returns:\n            Tensor: Anchor centers with shape (N, 2), \"xy\" format.\n        \"\"\"\n        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n        return torch.stack([anchors_cx, anchors_cy], dim=-1)\n\n    def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n                            bbox_pred: Tensor, labels: Tensor,\n                            label_weights: Tensor, bbox_targets: Tensor,\n                            alignment_metrics: Tensor,\n                            stride: Tuple[int, int]) -> dict:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Decoded bboxes for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors).\n            bbox_targets (Tensor): BBox regression targets of each anchor with\n                shape (N, num_total_anchors, 4).\n            alignment_metrics (Tensor): Alignment metrics with shape\n                (N, num_total_anchors).\n            stride (Tuple[int, int]): Downsample stride of the feature map.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        alignment_metrics = alignment_metrics.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        targets = labels if self.epoch < self.initial_epoch else (\n            labels, alignment_metrics)\n        cls_loss_func = self.initial_loss_cls \\\n            if self.epoch < self.initial_epoch else self.loss_cls\n\n        loss_cls = cls_loss_func(\n            cls_score, targets, label_weights, avg_factor=1.0)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n\n            pos_decode_bbox_pred = pos_bbox_pred\n            pos_decode_bbox_targets = pos_bbox_targets / stride[0]\n\n            # regression loss\n            pos_bbox_weight = self.centerness_target(\n                pos_anchors, pos_bbox_targets\n            ) if self.epoch < self.initial_epoch else alignment_metrics[\n                pos_inds]\n\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=pos_bbox_weight,\n                avg_factor=1.0)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            pos_bbox_weight = bbox_targets.new_tensor(0.)\n\n        return loss_cls, loss_bbox, alignment_metrics.sum(\n        ), pos_bbox_weight.sum()\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Decoded box for each scale\n                level with shape (N, num_anchors * 4, H, W) in\n                [tl_x, tl_y, br_x, br_y] format.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        flatten_cls_scores = torch.cat([\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.cls_out_channels)\n            for cls_score in cls_scores\n        ], 1)\n        flatten_bbox_preds = torch.cat([\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0]\n            for bbox_pred, stride in zip(bbox_preds,\n                                         self.prior_generator.strides)\n        ], 1)\n\n        cls_reg_targets = self.get_targets(\n            flatten_cls_scores,\n            flatten_bbox_preds,\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         alignment_metrics_list) = cls_reg_targets\n\n        losses_cls, losses_bbox, \\\n            cls_avg_factors, bbox_avg_factors = multi_apply(\n                self.loss_by_feat_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                alignment_metrics_list,\n                self.prior_generator.strides)\n\n        cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()\n        losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))\n\n        bbox_avg_factor = reduce_mean(\n            sum(bbox_avg_factors)).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: Optional[ConfigDict] = None,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (:obj:`ConfigDict`, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n\n        cfg = self.test_cfg if cfg is None else cfg\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for cls_score, bbox_pred, priors, stride in zip(\n                cls_score_list, bbox_pred_list, mlvl_priors,\n                self.prior_generator.strides):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0]\n            scores = cls_score.permute(1, 2,\n                                       0).reshape(-1, self.cls_out_channels)\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, keep_idxs, filtered_results = results\n\n            bboxes = filtered_results['bbox_pred']\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bboxes)\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n\n    def get_targets(self,\n                    cls_scores: List[List[Tensor]],\n                    bbox_preds: List[List[Tensor]],\n                    anchor_list: List[List[Tensor]],\n                    valid_flag_list: List[List[Tensor]],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            cls_scores (list[list[Tensor]]): Classification predictions of\n                images, a 3D-Tensor with shape [num_imgs, num_priors,\n                num_classes].\n            bbox_preds (list[list[Tensor]]): Decoded bboxes predictions of one\n                image, a 3D-Tensor with shape [num_imgs, num_priors, 4] in\n                [tl_x, tl_y, br_x, br_y] format.\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: a tuple containing learning targets.\n\n                - anchors_list (list[list[Tensor]]): Anchors of each level.\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - norm_alignment_metrics_list (list[Tensor]): Normalized\n                  alignment metrics of each level.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n        # anchor_list: list(b * [-1, 4])\n\n        # get epoch information from message hub\n        message_hub = MessageHub.get_current_instance()\n        self.epoch = message_hub.get_info('epoch')\n\n        if self.epoch < self.initial_epoch:\n            (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n             all_bbox_weights, pos_inds_list, neg_inds_list,\n             sampling_result) = multi_apply(\n                 super()._get_targets_single,\n                 anchor_list,\n                 valid_flag_list,\n                 num_level_anchors_list,\n                 batch_gt_instances,\n                 batch_img_metas,\n                 batch_gt_instances_ignore,\n                 unmap_outputs=unmap_outputs)\n            all_assign_metrics = [\n                weight[..., 0] for weight in all_bbox_weights\n            ]\n        else:\n            (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n             all_assign_metrics) = multi_apply(\n                 self._get_targets_single,\n                 cls_scores,\n                 bbox_preds,\n                 anchor_list,\n                 valid_flag_list,\n                 batch_gt_instances,\n                 batch_img_metas,\n                 batch_gt_instances_ignore,\n                 unmap_outputs=unmap_outputs)\n\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        norm_alignment_metrics_list = images_to_levels(all_assign_metrics,\n                                                       num_level_anchors)\n\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, norm_alignment_metrics_list)\n\n    def _get_targets_single(self,\n                            cls_scores: Tensor,\n                            bbox_preds: Tensor,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            cls_scores (Tensor): Box scores for each image.\n            bbox_preds (Tensor): Box energies / deltas for each image.\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors ,4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n                anchors (Tensor): All anchors in the image with shape (N, 4).\n                labels (Tensor): Labels of all anchors in the image with shape\n                    (N,).\n                label_weights (Tensor): Label weights of all anchor in the\n                    image with shape (N,).\n                bbox_targets (Tensor): BBox targets of all anchors in the\n                    image with shape (N, 4).\n                norm_alignment_metrics (Tensor): Normalized alignment metrics\n                    of all priors in the image with shape (N,).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n        pred_instances = InstanceData(\n            priors=anchors,\n            scores=cls_scores[inside_flags, :],\n            bboxes=bbox_preds[inside_flags, :])\n        assign_result = self.alignment_assigner.assign(pred_instances,\n                                                       gt_instances,\n                                                       gt_instances_ignore,\n                                                       self.alpha, self.beta)\n        assign_ious = assign_result.max_overlaps\n        assign_metrics = assign_result.assign_metrics\n\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n        norm_alignment_metrics = anchors.new_zeros(\n            num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            # point-based\n            pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        class_assigned_gt_inds = torch.unique(\n            sampling_result.pos_assigned_gt_inds)\n        for gt_inds in class_assigned_gt_inds:\n            gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds ==\n                                     gt_inds]\n            pos_alignment_metrics = assign_metrics[gt_class_inds]\n            pos_ious = assign_ious[gt_class_inds]\n            pos_norm_alignment_metrics = pos_alignment_metrics / (\n                pos_alignment_metrics.max() + 10e-8) * pos_ious.max()\n            norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            norm_alignment_metrics = unmap(norm_alignment_metrics,\n                                           num_total_anchors, inside_flags)\n        return (anchors, labels, label_weights, bbox_targets,\n                norm_alignment_metrics)\n"
  },
  {
    "path": "mmdet/models/dense_heads/vfnet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmcv.ops import DeformConv2d\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig,\n                         OptInstanceList, RangeType, reduce_mean)\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import multi_apply\nfrom .atss_head import ATSSHead\nfrom .fcos_head import FCOSHead\n\nINF = 1e8\n\n\n@MODELS.register_module()\nclass VFNetHead(ATSSHead, FCOSHead):\n    \"\"\"Head of `VarifocalNet (VFNet): An IoU-aware Dense Object\n    Detector.<https://arxiv.org/abs/2008.13367>`_.\n\n    The VFNet predicts IoU-aware classification scores which mix the\n    object presence confidence and object localization accuracy as the\n    detection score. It is built on the FCOS architecture and uses ATSS\n    for defining positive/negative training examples. The VFNet is trained\n    with Varifocal Loss and empolys star-shaped deformable convolution to\n    extract features for a bbox.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple\n            level points.\n        center_sampling (bool): If true, use center sampling. Defaults to False.\n        center_sample_radius (float): Radius of center sampling. Defaults to 1.5.\n        sync_num_pos (bool): If true, synchronize the number of positive\n            examples across GPUs. Defaults to True\n        gradient_mul (float): The multiplier to gradients from bbox refinement\n            and recognition. Defaults to 0.1.\n        bbox_norm_type (str): The bbox normalization type, 'reg_denom' or\n            'stride'. Defaults to reg_denom\n        loss_cls_fl (:obj:`ConfigDict` or dict): Config of focal loss.\n        use_vfl (bool): If true, use varifocal loss for training.\n            Defaults to True.\n        loss_cls (:obj:`ConfigDict` or dict): Config of varifocal loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss,\n            GIoU Loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization\n            refinement loss, GIoU Loss.\n        norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and\n            config norm layer. Defaults to norm_cfg=dict(type='GN',\n            num_groups=32, requires_grad=True).\n        use_atss (bool): If true, use ATSS to define positive/negative\n            examples. Defaults to True.\n        anchor_generator (:obj:`ConfigDict` or dict): Config of anchor\n            generator for ATSS.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`]): Initialization config dict.\n\n    Example:\n        >>> self = VFNetHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)\n        >>> assert len(cls_score) == len(self.scales)\n    \"\"\"  # noqa: E501\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256),\n                                              (256, 512), (512, INF)),\n                 center_sampling: bool = False,\n                 center_sample_radius: float = 1.5,\n                 sync_num_pos: bool = True,\n                 gradient_mul: float = 0.1,\n                 bbox_norm_type: str = 'reg_denom',\n                 loss_cls_fl: ConfigType = dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 use_vfl: bool = True,\n                 loss_cls: ConfigType = dict(\n                     type='VarifocalLoss',\n                     use_sigmoid=True,\n                     alpha=0.75,\n                     gamma=2.0,\n                     iou_weighted=True,\n                     loss_weight=1.0),\n                 loss_bbox: ConfigType = dict(\n                     type='GIoULoss', loss_weight=1.5),\n                 loss_bbox_refine: ConfigType = dict(\n                     type='GIoULoss', loss_weight=2.0),\n                 norm_cfg: ConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 use_atss: bool = True,\n                 reg_decoded_bbox: bool = True,\n                 anchor_generator: ConfigType = dict(\n                     type='AnchorGenerator',\n                     ratios=[1.0],\n                     octave_base_scale=8,\n                     scales_per_octave=1,\n                     center_offset=0.0,\n                     strides=[8, 16, 32, 64, 128]),\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='vfnet_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs) -> None:\n        # dcn base offsets, adapted from reppoints_head.py\n        self.num_dconv_points = 9\n        self.dcn_kernel = int(np.sqrt(self.num_dconv_points))\n        self.dcn_pad = int((self.dcn_kernel - 1) / 2)\n        dcn_base = np.arange(-self.dcn_pad,\n                             self.dcn_pad + 1).astype(np.float64)\n        dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)\n        dcn_base_x = np.tile(dcn_base, self.dcn_kernel)\n        dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(\n            (-1))\n        self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)\n\n        super(FCOSHead, self).__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            norm_cfg=norm_cfg,\n            init_cfg=init_cfg,\n            **kwargs)\n        self.regress_ranges = regress_ranges\n        self.reg_denoms = [\n            regress_range[-1] for regress_range in regress_ranges\n        ]\n        self.reg_denoms[-1] = self.reg_denoms[-2] * 2\n        self.center_sampling = center_sampling\n        self.center_sample_radius = center_sample_radius\n        self.sync_num_pos = sync_num_pos\n        self.bbox_norm_type = bbox_norm_type\n        self.gradient_mul = gradient_mul\n        self.use_vfl = use_vfl\n        if self.use_vfl:\n            self.loss_cls = MODELS.build(loss_cls)\n        else:\n            self.loss_cls = MODELS.build(loss_cls_fl)\n        self.loss_bbox = MODELS.build(loss_bbox)\n        self.loss_bbox_refine = MODELS.build(loss_bbox_refine)\n\n        # for getting ATSS targets\n        self.use_atss = use_atss\n        self.reg_decoded_bbox = reg_decoded_bbox\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n\n        self.anchor_center_offset = anchor_generator['center_offset']\n\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            if self.train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], default_args=dict(context=self))\n            else:\n                self.sampler = PseudoSampler()\n        # only be used in `get_atss_targets` when `use_atss` is True\n        self.atss_prior_generator = TASK_UTILS.build(anchor_generator)\n\n        self.fcos_prior_generator = MlvlPointGenerator(\n            anchor_generator['strides'],\n            self.anchor_center_offset if self.use_atss else 0.5)\n\n        # In order to reuse the `get_bboxes` in `BaseDenseHead.\n        # Only be used in testing phase.\n        self.prior_generator = self.fcos_prior_generator\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        super(FCOSHead, self)._init_cls_convs()\n        super(FCOSHead, self)._init_reg_convs()\n        self.relu = nn.ReLU()\n        self.vfnet_reg_conv = ConvModule(\n            self.feat_channels,\n            self.feat_channels,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            bias=self.conv_bias)\n        self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n        self.vfnet_reg_refine_dconv = DeformConv2d(\n            self.feat_channels,\n            self.feat_channels,\n            self.dcn_kernel,\n            1,\n            padding=self.dcn_pad)\n        self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n        self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n        self.vfnet_cls_dconv = DeformConv2d(\n            self.feat_channels,\n            self.feat_channels,\n            self.dcn_kernel,\n            1,\n            padding=self.dcn_pad)\n        self.vfnet_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple:\n\n            - cls_scores (list[Tensor]): Box iou-aware scores for each scale\n              level, each is a 4D-tensor, the channel number is\n              num_points * num_classes.\n            - bbox_preds (list[Tensor]): Box offsets for each\n              scale level, each is a 4D-tensor, the channel number is\n              num_points * 4.\n            - bbox_preds_refine (list[Tensor]): Refined Box offsets for\n              each scale level, each is a 4D-tensor, the channel\n              number is num_points * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, x, self.scales,\n                           self.scales_refine, self.strides, self.reg_denoms)\n\n    def forward_single(self, x: Tensor, scale: Scale, scale_refine: Scale,\n                       stride: int, reg_denom: int) -> tuple:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to\n                resize the refined bbox prediction.\n            stride (int): The corresponding stride for feature maps,\n                used to normalize the bbox prediction when\n                bbox_norm_type = 'stride'.\n            reg_denom (int): The corresponding regression range for feature\n                maps, only used to normalize the bbox prediction when\n                bbox_norm_type = 'reg_denom'.\n\n        Returns:\n            tuple: iou-aware cls scores for each box, bbox predictions and\n            refined bbox predictions of input feature maps.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n\n        for cls_layer in self.cls_convs:\n            cls_feat = cls_layer(cls_feat)\n\n        for reg_layer in self.reg_convs:\n            reg_feat = reg_layer(reg_feat)\n\n        # predict the bbox_pred of different level\n        reg_feat_init = self.vfnet_reg_conv(reg_feat)\n        if self.bbox_norm_type == 'reg_denom':\n            bbox_pred = scale(\n                self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom\n        elif self.bbox_norm_type == 'stride':\n            bbox_pred = scale(\n                self.vfnet_reg(reg_feat_init)).float().exp() * stride\n        else:\n            raise NotImplementedError\n\n        # compute star deformable convolution offsets\n        # converting dcn_offset to reg_feat.dtype thus VFNet can be\n        # trained with FP16\n        dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,\n                                          stride).to(reg_feat.dtype)\n\n        # refine the bbox_pred\n        reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))\n        bbox_pred_refine = scale_refine(\n            self.vfnet_reg_refine(reg_feat)).float().exp()\n        bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()\n\n        # predict the iou-aware cls score\n        cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))\n        cls_score = self.vfnet_cls(cls_feat)\n\n        if self.training:\n            return cls_score, bbox_pred, bbox_pred_refine\n        else:\n            return cls_score, bbox_pred_refine\n\n    def star_dcn_offset(self, bbox_pred: Tensor, gradient_mul: float,\n                        stride: int) -> Tensor:\n        \"\"\"Compute the star deformable conv offsets.\n\n        Args:\n            bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).\n            gradient_mul (float): Gradient multiplier.\n            stride (int): The corresponding stride for feature maps,\n                used to project the bbox onto the feature map.\n\n        Returns:\n            Tensor: The offsets for deformable convolution.\n        \"\"\"\n        dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)\n        bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \\\n            gradient_mul * bbox_pred\n        # map to the feature map scale\n        bbox_pred_grad_mul = bbox_pred_grad_mul / stride\n        N, C, H, W = bbox_pred.size()\n\n        x1 = bbox_pred_grad_mul[:, 0, :, :]\n        y1 = bbox_pred_grad_mul[:, 1, :, :]\n        x2 = bbox_pred_grad_mul[:, 2, :, :]\n        y2 = bbox_pred_grad_mul[:, 3, :, :]\n        bbox_pred_grad_mul_offset = bbox_pred.new_zeros(\n            N, 2 * self.num_dconv_points, H, W)\n        bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1  # -y1\n        bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1  # -x1\n        bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1  # -y1\n        bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1  # -y1\n        bbox_pred_grad_mul_offset[:, 5, :, :] = x2  # x2\n        bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1  # -x1\n        bbox_pred_grad_mul_offset[:, 11, :, :] = x2  # x2\n        bbox_pred_grad_mul_offset[:, 12, :, :] = y2  # y2\n        bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1  # -x1\n        bbox_pred_grad_mul_offset[:, 14, :, :] = y2  # y2\n        bbox_pred_grad_mul_offset[:, 16, :, :] = y2  # y2\n        bbox_pred_grad_mul_offset[:, 17, :, :] = x2  # x2\n        dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset\n\n        return dcn_offset\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            bbox_preds_refine: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box offsets for each\n                scale level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            bbox_preds_refine (list[Tensor]): Refined Box offsets for\n                each scale level, each is a 4D-tensor, the channel\n                number is num_points * 4.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.fcos_prior_generator.grid_priors(\n            featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)\n        labels, label_weights, bbox_targets, bbox_weights = self.get_targets(\n            cls_scores,\n            all_level_points,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        num_imgs = cls_scores[0].size(0)\n        # flatten cls_scores, bbox_preds and bbox_preds_refine\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3,\n                              1).reshape(-1,\n                                         self.cls_out_channels).contiguous()\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()\n            for bbox_pred in bbox_preds\n        ]\n        flatten_bbox_preds_refine = [\n            bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()\n            for bbox_pred_refine in bbox_preds_refine\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)\n        flatten_labels = torch.cat(labels)\n        flatten_bbox_targets = torch.cat(bbox_targets)\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = torch.where(\n            ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]\n        num_pos = len(pos_inds)\n\n        pos_bbox_preds = flatten_bbox_preds[pos_inds]\n        pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]\n        pos_labels = flatten_labels[pos_inds]\n\n        # sync num_pos across all gpus\n        if self.sync_num_pos:\n            num_pos_avg_per_gpu = reduce_mean(\n                pos_inds.new_tensor(num_pos).float()).item()\n            num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)\n        else:\n            num_pos_avg_per_gpu = num_pos\n\n        pos_bbox_targets = flatten_bbox_targets[pos_inds]\n        pos_points = flatten_points[pos_inds]\n\n        pos_decoded_bbox_preds = self.bbox_coder.decode(\n            pos_points, pos_bbox_preds)\n        pos_decoded_target_preds = self.bbox_coder.decode(\n            pos_points, pos_bbox_targets)\n        iou_targets_ini = bbox_overlaps(\n            pos_decoded_bbox_preds,\n            pos_decoded_target_preds.detach(),\n            is_aligned=True).clamp(min=1e-6)\n        bbox_weights_ini = iou_targets_ini.clone().detach()\n        bbox_avg_factor_ini = reduce_mean(\n            bbox_weights_ini.sum()).clamp_(min=1).item()\n\n        pos_decoded_bbox_preds_refine = \\\n            self.bbox_coder.decode(pos_points, pos_bbox_preds_refine)\n        iou_targets_rf = bbox_overlaps(\n            pos_decoded_bbox_preds_refine,\n            pos_decoded_target_preds.detach(),\n            is_aligned=True).clamp(min=1e-6)\n        bbox_weights_rf = iou_targets_rf.clone().detach()\n        bbox_avg_factor_rf = reduce_mean(\n            bbox_weights_rf.sum()).clamp_(min=1).item()\n\n        if num_pos > 0:\n            loss_bbox = self.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds.detach(),\n                weight=bbox_weights_ini,\n                avg_factor=bbox_avg_factor_ini)\n\n            loss_bbox_refine = self.loss_bbox_refine(\n                pos_decoded_bbox_preds_refine,\n                pos_decoded_target_preds.detach(),\n                weight=bbox_weights_rf,\n                avg_factor=bbox_avg_factor_rf)\n\n            # build IoU-aware cls_score targets\n            if self.use_vfl:\n                pos_ious = iou_targets_rf.clone().detach()\n                cls_iou_targets = torch.zeros_like(flatten_cls_scores)\n                cls_iou_targets[pos_inds, pos_labels] = pos_ious\n        else:\n            loss_bbox = pos_bbox_preds.sum() * 0\n            loss_bbox_refine = pos_bbox_preds_refine.sum() * 0\n            if self.use_vfl:\n                cls_iou_targets = torch.zeros_like(flatten_cls_scores)\n\n        if self.use_vfl:\n            loss_cls = self.loss_cls(\n                flatten_cls_scores,\n                cls_iou_targets,\n                avg_factor=num_pos_avg_per_gpu)\n        else:\n            loss_cls = self.loss_cls(\n                flatten_cls_scores,\n                flatten_labels,\n                weight=label_weights,\n                avg_factor=num_pos_avg_per_gpu)\n\n        return dict(\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            loss_bbox_rf=loss_bbox_refine)\n\n    def get_targets(\n            self,\n            cls_scores: List[Tensor],\n            mlvl_points: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> tuple:\n        \"\"\"A wrapper for computing ATSS and FCOS targets for points in multiple\n        images.\n\n        Args:\n            cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                level with shape (N, num_points * num_classes, H, W).\n            mlvl_points (list[Tensor]): Points of each fpn level, each has\n                shape (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            tuple:\n\n            - labels_list (list[Tensor]): Labels of each level.\n            - label_weights (Tensor/None): Label weights of all levels.\n            - bbox_targets_list (list[Tensor]): Regression targets of each\n              level, (l, t, r, b).\n            - bbox_weights (Tensor/None): Bbox weights of all levels.\n        \"\"\"\n        if self.use_atss:\n            return self.get_atss_targets(cls_scores, mlvl_points,\n                                         batch_gt_instances, batch_img_metas,\n                                         batch_gt_instances_ignore)\n        else:\n            self.norm_on_bbox = False\n            return self.get_fcos_targets(mlvl_points, batch_gt_instances)\n\n    def _get_targets_single(self, *args, **kwargs):\n        \"\"\"Avoid ambiguity in multiple inheritance.\"\"\"\n        if self.use_atss:\n            return ATSSHead._get_targets_single(self, *args, **kwargs)\n        else:\n            return FCOSHead._get_targets_single(self, *args, **kwargs)\n\n    def get_fcos_targets(self, points: List[Tensor],\n                         batch_gt_instances: InstanceList) -> tuple:\n        \"\"\"Compute FCOS regression and classification targets for points in\n        multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple:\n\n            - labels (list[Tensor]): Labels of each level.\n            - label_weights: None, to be compatible with ATSS targets.\n            - bbox_targets (list[Tensor]): BBox targets of each level.\n            - bbox_weights: None, to be compatible with ATSS targets.\n        \"\"\"\n        labels, bbox_targets = FCOSHead.get_targets(self, points,\n                                                    batch_gt_instances)\n        label_weights = None\n        bbox_weights = None\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def get_anchors(self,\n                    featmap_sizes: List[Tuple],\n                    batch_img_metas: List[dict],\n                    device: str = 'cuda') -> tuple:\n        \"\"\"Get anchors according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            batch_img_metas (list[dict]): Image meta info.\n            device (str): Device for returned tensors\n\n        Returns:\n            tuple:\n\n            - anchor_list (list[Tensor]): Anchors of each image.\n            - valid_flag_list (list[Tensor]): Valid flags of each image.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # anchors for one time\n        multi_level_anchors = self.atss_prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level anchors\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            multi_level_flags = self.atss_prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'], device=device)\n            valid_flag_list.append(multi_level_flags)\n\n        return anchor_list, valid_flag_list\n\n    def get_atss_targets(\n            self,\n            cls_scores: List[Tensor],\n            mlvl_points: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> tuple:\n        \"\"\"A wrapper for computing ATSS targets for points in multiple images.\n\n        Args:\n            cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                level with shape (N, num_points * num_classes, H, W).\n            mlvl_points (list[Tensor]): Points of each fpn level, each has\n                shape (num_points, 2).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            tuple:\n\n            - labels_list (list[Tensor]): Labels of each level.\n            - label_weights (Tensor): Label weights of all levels.\n            - bbox_targets_list (list[Tensor]): Regression targets of each\n              level, (l, t, r, b).\n            - bbox_weights (Tensor): Bbox weights of all levels.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(\n            featmap_sizes\n        ) == self.atss_prior_generator.num_levels == \\\n            self.fcos_prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = ATSSHead.get_targets(\n            self,\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore,\n            unmap_outputs=True)\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_reg_targets\n\n        bbox_targets_list = [\n            bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list\n        ]\n\n        num_imgs = len(batch_img_metas)\n        # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format\n        bbox_targets_list = self.transform_bbox_targets(\n            bbox_targets_list, mlvl_points, num_imgs)\n\n        labels_list = [labels.reshape(-1) for labels in labels_list]\n        label_weights_list = [\n            label_weights.reshape(-1) for label_weights in label_weights_list\n        ]\n        bbox_weights_list = [\n            bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list\n        ]\n        label_weights = torch.cat(label_weights_list)\n        bbox_weights = torch.cat(bbox_weights_list)\n        return labels_list, label_weights, bbox_targets_list, bbox_weights\n\n    def transform_bbox_targets(self, decoded_bboxes: List[Tensor],\n                               mlvl_points: List[Tensor],\n                               num_imgs: int) -> List[Tensor]:\n        \"\"\"Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.\n\n        Args:\n            decoded_bboxes (list[Tensor]): Regression targets of each level,\n                in the form of (x1, y1, x2, y2).\n            mlvl_points (list[Tensor]): Points of each fpn level, each has\n                shape (num_points, 2).\n            num_imgs (int): the number of images in a batch.\n\n        Returns:\n            bbox_targets (list[Tensor]): Regression targets of each level in\n                the form of (l, t, r, b).\n        \"\"\"\n        # TODO: Re-implemented in Class PointCoder\n        assert len(decoded_bboxes) == len(mlvl_points)\n        num_levels = len(decoded_bboxes)\n        mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]\n        bbox_targets = []\n        for i in range(num_levels):\n            bbox_target = self.bbox_coder.encode(mlvl_points[i],\n                                                 decoded_bboxes[i])\n            bbox_targets.append(bbox_target)\n\n        return bbox_targets\n\n    def _load_from_state_dict(self, state_dict: dict, prefix: str,\n                              local_metadata: dict, strict: bool,\n                              missing_keys: Union[List[str], str],\n                              unexpected_keys: Union[List[str], str],\n                              error_msgs: Union[List[str], str]) -> None:\n        \"\"\"Override the method in the parent class to avoid changing para's\n        name.\"\"\"\n        pass\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolact_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List, Optional\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule, ModuleList\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, OptMultiConfig)\nfrom ..layers import fast_nms\nfrom ..utils import images_to_levels, multi_apply, select_single_mlvl\nfrom ..utils.misc import empty_instances\nfrom .anchor_head import AnchorHead\nfrom .base_mask_head import BaseMaskHead\n\n\n@MODELS.register_module()\nclass YOLACTHead(AnchorHead):\n    \"\"\"YOLACT box head used in https://arxiv.org/abs/1904.02689.\n\n    Note that YOLACT head is a light version of RetinaNet head.\n    Four differences are described as follows:\n\n    1. YOLACT box head has three-times fewer anchors.\n    2. YOLACT box head shares the convs for box and cls branches.\n    3. YOLACT box head uses OHEM instead of Focal loss.\n    4. YOLACT box head predicts a set of mask coefficients for each box.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        anchor_generator (:obj:`ConfigDict` or dict): Config dict for\n            anchor generator\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.\n        num_head_convs (int): Number of the conv layers shared by\n            box and cls branches.\n        num_protos (int): Number of the mask coefficients.\n        use_ohem (bool): If true, ``loss_single_OHEM`` will be used for\n            cls loss calculation. If false, ``loss_single`` will be used.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to\n            construct and config conv layer.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to\n            construct and config norm layer.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: int,\n                 anchor_generator: ConfigType = dict(\n                     type='AnchorGenerator',\n                     octave_base_scale=3,\n                     scales_per_octave=1,\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[8, 16, 32, 64, 128]),\n                 loss_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     reduction='none',\n                     loss_weight=1.0),\n                 loss_bbox: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1.5),\n                 num_head_convs: int = 1,\n                 num_protos: int = 32,\n                 use_ohem: bool = True,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = dict(\n                     type='Xavier',\n                     distribution='uniform',\n                     bias=0,\n                     layer='Conv2d'),\n                 **kwargs) -> None:\n        self.num_head_convs = num_head_convs\n        self.num_protos = num_protos\n        self.use_ohem = use_ohem\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.head_convs = ModuleList()\n        for i in range(self.num_head_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.head_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.conv_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n        self.conv_coeff = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.num_protos,\n            3,\n            padding=1)\n\n    def forward_single(self, x: Tensor) -> tuple:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n\n            - cls_score (Tensor): Cls scores for a single scale level\n              the channels number is num_anchors * num_classes.\n            - bbox_pred (Tensor): Box energies / deltas for a single scale\n              level, the channels number is num_anchors * 4.\n            - coeff_pred (Tensor): Mask coefficients for a single scale\n              level, the channels number is num_anchors * num_protos.\n        \"\"\"\n        for head_conv in self.head_convs:\n            x = head_conv(x)\n        cls_score = self.conv_cls(x)\n        bbox_pred = self.conv_reg(x)\n        coeff_pred = self.conv_coeff(x).tanh()\n        return cls_score, bbox_pred, coeff_pred\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            coeff_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the bbox head.\n\n        When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,\n        otherwise, it follows ``AnchorHead.loss``.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            coeff_preds (list[Tensor]): Mask coefficients for each scale\n                level with shape (N, num_anchors * num_protos, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore,\n            unmap_outputs=not self.use_ohem,\n            return_sampling_results=True)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor, sampling_results) = cls_reg_targets\n\n        if self.use_ohem:\n            num_images = len(batch_img_metas)\n            all_cls_scores = torch.cat([\n                s.permute(0, 2, 3, 1).reshape(\n                    num_images, -1, self.cls_out_channels) for s in cls_scores\n            ], 1)\n            all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n            all_label_weights = torch.cat(label_weights_list,\n                                          -1).view(num_images, -1)\n            all_bbox_preds = torch.cat([\n                b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n                for b in bbox_preds\n            ], -2)\n            all_bbox_targets = torch.cat(bbox_targets_list,\n                                         -2).view(num_images, -1, 4)\n            all_bbox_weights = torch.cat(bbox_weights_list,\n                                         -2).view(num_images, -1, 4)\n\n            # concat all level anchors to a single tensor\n            all_anchors = []\n            for i in range(num_images):\n                all_anchors.append(torch.cat(anchor_list[i]))\n\n            # check NaN and Inf\n            assert torch.isfinite(all_cls_scores).all().item(), \\\n                'classification scores become infinite or NaN!'\n            assert torch.isfinite(all_bbox_preds).all().item(), \\\n                'bbox predications become infinite or NaN!'\n\n            losses_cls, losses_bbox = multi_apply(\n                self.OHEMloss_by_feat_single,\n                all_cls_scores,\n                all_bbox_preds,\n                all_anchors,\n                all_labels,\n                all_label_weights,\n                all_bbox_targets,\n                all_bbox_weights,\n                avg_factor=avg_factor)\n        else:\n            # anchor number of multi levels\n            num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n            # concat all level anchors and flags to a single tensor\n            concat_anchor_list = []\n            for i in range(len(anchor_list)):\n                concat_anchor_list.append(torch.cat(anchor_list[i]))\n            all_anchor_list = images_to_levels(concat_anchor_list,\n                                               num_level_anchors)\n            losses_cls, losses_bbox = multi_apply(\n                self.loss_by_feat_single,\n                cls_scores,\n                bbox_preds,\n                all_anchor_list,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                bbox_weights_list,\n                avg_factor=avg_factor)\n        losses = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n        # update `_raw_positive_infos`, which will be used when calling\n        # `get_positive_infos`.\n        self._raw_positive_infos.update(coeff_preds=coeff_preds)\n        return losses\n\n    def OHEMloss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,\n                                anchors: Tensor, labels: Tensor,\n                                label_weights: Tensor, bbox_targets: Tensor,\n                                bbox_weights: Tensor,\n                                avg_factor: int) -> tuple:\n        \"\"\"Compute loss of a single image. Similar to\n        func:``SSDHead.loss_by_feat_single``\n\n        Args:\n            cls_score (Tensor): Box scores for eachimage\n                Has shape (num_total_anchors, num_classes).\n            bbox_pred (Tensor): Box energies / deltas for each image\n                level with shape (num_total_anchors, 4).\n            anchors (Tensor): Box reference for each scale level with shape\n                (num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (num_total_anchors,).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (num_total_anchors,)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (num_total_anchors, 4).\n            bbox_weights (Tensor): BBox regression loss weights of each anchor\n                with shape (num_total_anchors, 4).\n            avg_factor (int): Average factor that is used to average\n                the loss. When using sampling method, avg_factor is usually\n                the sum of positive and negative priors. When using\n                `PseudoSampler`, `avg_factor` is usually equal to the number\n                of positive priors.\n\n        Returns:\n            Tuple[Tensor, Tensor]: A tuple of cls loss and bbox loss of one\n            feature map.\n        \"\"\"\n\n        loss_cls_all = self.loss_cls(cls_score, labels, label_weights)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(\n            as_tuple=False).reshape(-1)\n        neg_inds = (labels == self.num_classes).nonzero(\n            as_tuple=False).view(-1)\n\n        num_pos_samples = pos_inds.size(0)\n        if num_pos_samples == 0:\n            num_neg_samples = neg_inds.size(0)\n        else:\n            num_neg_samples = self.train_cfg['neg_pos_ratio'] * \\\n                              num_pos_samples\n            if num_neg_samples > neg_inds.size(0):\n                num_neg_samples = neg_inds.size(0)\n        topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)\n        loss_cls_pos = loss_cls_all[pos_inds].sum()\n        loss_cls_neg = topk_loss_cls_neg.sum()\n        loss_cls = (loss_cls_pos + loss_cls_neg) / avg_factor\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)\n        loss_bbox = self.loss_bbox(\n            bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor)\n        return loss_cls[None], loss_bbox\n\n    def get_positive_infos(self) -> InstanceList:\n        \"\"\"Get positive information from sampling results.\n\n        Returns:\n            list[:obj:`InstanceData`]: Positive Information of each image,\n            usually including positive bboxes, positive labels, positive\n            priors, positive coeffs, etc.\n        \"\"\"\n        assert len(self._raw_positive_infos) > 0\n        sampling_results = self._raw_positive_infos['sampling_results']\n        num_imgs = len(sampling_results)\n\n        coeff_pred_list = []\n        for coeff_pred_per_level in self._raw_positive_infos['coeff_preds']:\n            coeff_pred_per_level = \\\n                coeff_pred_per_level.permute(\n                    0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos)\n            coeff_pred_list.append(coeff_pred_per_level)\n        coeff_preds = torch.cat(coeff_pred_list, dim=1)\n\n        pos_info_list = []\n        for idx, sampling_result in enumerate(sampling_results):\n            pos_info = InstanceData()\n            coeff_preds_single = coeff_preds[idx]\n            pos_info.pos_assigned_gt_inds = \\\n                sampling_result.pos_assigned_gt_inds\n            pos_info.pos_inds = sampling_result.pos_inds\n            pos_info.coeffs = coeff_preds_single[sampling_result.pos_inds]\n            pos_info.bboxes = sampling_result.pos_gt_bboxes\n            pos_info_list.append(pos_info)\n        return pos_info_list\n\n    def predict_by_feat(self,\n                        cls_scores,\n                        bbox_preds,\n                        coeff_preds,\n                        batch_img_metas,\n                        cfg=None,\n                        rescale=True,\n                        **kwargs):\n        \"\"\"Similar to func:``AnchorHead.get_bboxes``, but additionally\n        processes coeff_preds.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                with shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            coeff_preds (list[Tensor]): Mask coefficients for each scale\n                level with shape (N, num_anchors * num_protos, H, W)\n            batch_img_metas (list[dict]): Batch image meta info.\n            cfg (:obj:`Config` | None): Test / postprocessing configuration,\n                if None, test_cfg would be used\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - coeffs (Tensor): the predicted mask coefficients of\n                  instance inside the corresponding box has a shape\n                  (n, num_protos).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n        num_levels = len(cls_scores)\n\n        device = cls_scores[0].device\n        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            cls_score_list = select_single_mlvl(cls_scores, img_id)\n            bbox_pred_list = select_single_mlvl(bbox_preds, img_id)\n            coeff_pred_list = select_single_mlvl(coeff_preds, img_id)\n            results = self._predict_by_feat_single(\n                cls_score_list=cls_score_list,\n                bbox_pred_list=bbox_pred_list,\n                coeff_preds_list=coeff_pred_list,\n                mlvl_priors=mlvl_priors,\n                img_meta=img_meta,\n                cfg=cfg,\n                rescale=rescale)\n            result_list.append(results)\n        return result_list\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                coeff_preds_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigType,\n                                rescale: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results. Similar to func:``AnchorHead._predict_by_feat_single``,\n        but additionally processes coeff_preds_list and uses fast NMS instead\n        of traditional NMS.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores for a single scale level\n                Has shape (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas for a single\n                scale level with shape (num_priors * 4, H, W).\n            coeff_preds_list (list[Tensor]): Mask coefficients for a single\n                scale level with shape (num_priors * num_protos, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid,\n                has shape (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmengine.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - coeffs (Tensor): the predicted mask coefficients of\n                  instance inside the corresponding box has a shape\n                  (n, num_protos).\n        \"\"\"\n        assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_priors)\n\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bbox_preds = []\n        mlvl_valid_priors = []\n        mlvl_scores = []\n        mlvl_coeffs = []\n        for cls_score, bbox_pred, coeff_pred, priors in \\\n                zip(cls_score_list, bbox_pred_list,\n                    coeff_preds_list, mlvl_priors):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            coeff_pred = coeff_pred.permute(1, 2,\n                                            0).reshape(-1, self.num_protos)\n\n            if 0 < nms_pre < scores.shape[0]:\n                # Get maximum scores for foreground classes.\n                if self.use_sigmoid_cls:\n                    max_scores, _ = scores.max(dim=1)\n                else:\n                    # remind that we set FG labels to [0, num_class-1]\n                    # since mmdet v2.0\n                    # BG cat_id: num_class\n                    max_scores, _ = scores[:, :-1].max(dim=1)\n                _, topk_inds = max_scores.topk(nms_pre)\n                priors = priors[topk_inds, :]\n                bbox_pred = bbox_pred[topk_inds, :]\n                scores = scores[topk_inds, :]\n                coeff_pred = coeff_pred[topk_inds, :]\n\n            mlvl_bbox_preds.append(bbox_pred)\n            mlvl_valid_priors.append(priors)\n            mlvl_scores.append(scores)\n            mlvl_coeffs.append(coeff_pred)\n\n        bbox_pred = torch.cat(mlvl_bbox_preds)\n        priors = torch.cat(mlvl_valid_priors)\n        multi_bboxes = self.bbox_coder.decode(\n            priors, bbox_pred, max_shape=img_shape)\n\n        multi_scores = torch.cat(mlvl_scores)\n        multi_coeffs = torch.cat(mlvl_coeffs)\n\n        return self._bbox_post_process(\n            multi_bboxes=multi_bboxes,\n            multi_scores=multi_scores,\n            multi_coeffs=multi_coeffs,\n            cfg=cfg,\n            rescale=rescale,\n            img_meta=img_meta)\n\n    def _bbox_post_process(self,\n                           multi_bboxes: Tensor,\n                           multi_scores: Tensor,\n                           multi_coeffs: Tensor,\n                           cfg: ConfigType,\n                           rescale: bool = False,\n                           img_meta: Optional[dict] = None,\n                           **kwargs) -> InstanceData:\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually `with_nms` is False is used for aug test.\n\n        Args:\n            multi_bboxes (Tensor): Predicted bbox that concat all levels.\n            multi_scores (Tensor): Bbox scores that concat all levels.\n            multi_coeffs (Tensor): Mask coefficients  that concat all levels.\n            cfg (ConfigDict): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default to False.\n            img_meta (dict, optional): Image meta info. Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - coeffs (Tensor): the predicted mask coefficients of\n                  instance inside the corresponding box has a shape\n                  (n, num_protos).\n        \"\"\"\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            multi_bboxes /= multi_bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n            # mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n\n        if self.use_sigmoid_cls:\n            # Add a dummy background class to the backend when using sigmoid\n            # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n            # BG cat_id: num_class\n\n            padding = multi_scores.new_zeros(multi_scores.shape[0], 1)\n            multi_scores = torch.cat([multi_scores, padding], dim=1)\n        det_bboxes, det_labels, det_coeffs = fast_nms(\n            multi_bboxes, multi_scores, multi_coeffs, cfg.score_thr,\n            cfg.iou_thr, cfg.top_k, cfg.max_per_img)\n        results = InstanceData()\n        results.bboxes = det_bboxes[:, :4]\n        results.scores = det_bboxes[:, -1]\n        results.labels = det_labels\n        results.coeffs = det_coeffs\n        return results\n\n\n@MODELS.register_module()\nclass YOLACTProtonet(BaseMaskHead):\n    \"\"\"YOLACT mask head used in https://arxiv.org/abs/1904.02689.\n\n    This head outputs the mask prototypes for YOLACT.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        proto_channels (tuple[int]): Output channels of protonet convs.\n        proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.\n        include_last_relu (bool): If keep the last relu of protonet.\n        num_protos (int): Number of prototypes.\n        num_classes (int): Number of categories excluding the background\n            category.\n        loss_mask_weight (float): Reweight the mask loss by this factor.\n        max_masks_to_train (int): Maximum number of masks to train for\n            each image.\n        with_seg_branch (bool): Whether to apply a semantic segmentation\n            branch and calculate loss during training to increase\n            performance with no speed penalty. Defaults to True.\n        loss_segm (:obj:`ConfigDict` or dict, optional): Config of\n            semantic segmentation loss.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config\n            of head.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            head.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int = 256,\n        proto_channels: tuple = (256, 256, 256, None, 256, 32),\n        proto_kernel_sizes: tuple = (3, 3, 3, -2, 3, 1),\n        include_last_relu: bool = True,\n        num_protos: int = 32,\n        loss_mask_weight: float = 1.0,\n        max_masks_to_train: int = 100,\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        with_seg_branch: bool = True,\n        loss_segm: ConfigType = dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        init_cfg=dict(\n            type='Xavier',\n            distribution='uniform',\n            override=dict(name='protonet'))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.proto_channels = proto_channels\n        self.proto_kernel_sizes = proto_kernel_sizes\n        self.include_last_relu = include_last_relu\n\n        # Segmentation branch\n        self.with_seg_branch = with_seg_branch\n        self.segm_branch = SegmentationModule(\n            num_classes=num_classes, in_channels=in_channels) \\\n            if with_seg_branch else None\n        self.loss_segm = MODELS.build(loss_segm) if with_seg_branch else None\n\n        self.loss_mask_weight = loss_mask_weight\n        self.num_protos = num_protos\n        self.num_classes = num_classes\n        self.max_masks_to_train = max_masks_to_train\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        # Possible patterns:\n        # ( 256, 3) -> conv\n        # ( 256,-2) -> deconv\n        # (None,-2) -> bilinear interpolate\n        in_channels = self.in_channels\n        protonets = ModuleList()\n        for num_channels, kernel_size in zip(self.proto_channels,\n                                             self.proto_kernel_sizes):\n            if kernel_size > 0:\n                layer = nn.Conv2d(\n                    in_channels,\n                    num_channels,\n                    kernel_size,\n                    padding=kernel_size // 2)\n            else:\n                if num_channels is None:\n                    layer = InterpolateModule(\n                        scale_factor=-kernel_size,\n                        mode='bilinear',\n                        align_corners=False)\n                else:\n                    layer = nn.ConvTranspose2d(\n                        in_channels,\n                        num_channels,\n                        -kernel_size,\n                        padding=kernel_size // 2)\n            protonets.append(layer)\n            protonets.append(nn.ReLU(inplace=True))\n            in_channels = num_channels if num_channels is not None \\\n                else in_channels\n        if not self.include_last_relu:\n            protonets = protonets[:-1]\n        self.protonet = nn.Sequential(*protonets)\n\n    def forward(self, x: tuple, positive_infos: InstanceList) -> tuple:\n        \"\"\"Forward feature from the upstream network to get prototypes and\n        linearly combine the prototypes, using masks coefficients, into\n        instance masks. Finally, crop the instance masks with given bboxes.\n\n        Args:\n            x (Tuple[Tensor]): Feature from the upstream network, which is\n                a 4D-tensor.\n            positive_infos (List[:obj:``InstanceData``]): Positive information\n                that calculate from detect head.\n\n        Returns:\n            tuple: Predicted instance segmentation masks and\n            semantic segmentation map.\n        \"\"\"\n        # YOLACT used single feature map to get segmentation masks\n        single_x = x[0]\n\n        # YOLACT segmentation branch, if not training or segmentation branch\n        # is None, will not process the forward function.\n        if self.segm_branch is not None and self.training:\n            segm_preds = self.segm_branch(single_x)\n        else:\n            segm_preds = None\n        # YOLACT mask head\n        prototypes = self.protonet(single_x)\n        prototypes = prototypes.permute(0, 2, 3, 1).contiguous()\n\n        num_imgs = single_x.size(0)\n\n        mask_pred_list = []\n        for idx in range(num_imgs):\n            cur_prototypes = prototypes[idx]\n            pos_coeffs = positive_infos[idx].coeffs\n\n            # Linearly combine the prototypes with the mask coefficients\n            mask_preds = cur_prototypes @ pos_coeffs.t()\n            mask_preds = torch.sigmoid(mask_preds)\n            mask_pred_list.append(mask_preds)\n        return mask_pred_list, segm_preds\n\n    def loss_by_feat(self, mask_preds: List[Tensor], segm_preds: List[Tensor],\n                     batch_gt_instances: InstanceList,\n                     batch_img_metas: List[dict], positive_infos: InstanceList,\n                     **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mask_preds (list[Tensor]): List of predicted prototypes, each has\n                shape (num_classes, H, W).\n            segm_preds (Tensor):  Predicted semantic segmentation map with\n                shape (N, num_classes, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``masks``,\n                and ``labels`` attributes.\n            batch_img_metas (list[dict]): Meta information of multiple images.\n            positive_infos (List[:obj:``InstanceData``]): Information of\n                positive samples of each image that are assigned in detection\n                head.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert positive_infos is not None, \\\n            'positive_infos should not be None in `YOLACTProtonet`'\n        losses = dict()\n\n        # crop\n        croped_mask_pred = self.crop_mask_preds(mask_preds, batch_img_metas,\n                                                positive_infos)\n\n        loss_mask = []\n        loss_segm = []\n        num_imgs, _, mask_h, mask_w = segm_preds.size()\n        assert num_imgs == len(croped_mask_pred)\n        segm_avg_factor = num_imgs * mask_h * mask_w\n        total_pos = 0\n\n        if self.segm_branch is not None:\n            assert segm_preds is not None\n\n        for idx in range(num_imgs):\n            img_meta = batch_img_metas[idx]\n\n            (mask_preds, pos_mask_targets, segm_targets, num_pos,\n             gt_bboxes_for_reweight) = self._get_targets_single(\n                 croped_mask_pred[idx], segm_preds[idx],\n                 batch_gt_instances[idx], positive_infos[idx])\n\n            # segmentation loss\n            if self.with_seg_branch:\n                if segm_targets is None:\n                    loss = segm_preds[idx].sum() * 0.\n                else:\n                    loss = self.loss_segm(\n                        segm_preds[idx],\n                        segm_targets,\n                        avg_factor=segm_avg_factor)\n                loss_segm.append(loss)\n            # mask loss\n            total_pos += num_pos\n            if num_pos == 0 or pos_mask_targets is None:\n                loss = mask_preds.sum() * 0.\n            else:\n                mask_preds = torch.clamp(mask_preds, 0, 1)\n                loss = F.binary_cross_entropy(\n                    mask_preds, pos_mask_targets,\n                    reduction='none') * self.loss_mask_weight\n\n                h, w = img_meta['img_shape'][:2]\n                gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -\n                                   gt_bboxes_for_reweight[:, 0]) / w\n                gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -\n                                    gt_bboxes_for_reweight[:, 1]) / h\n                loss = loss.mean(dim=(1,\n                                      2)) / gt_bboxes_width / gt_bboxes_height\n                loss = torch.sum(loss)\n            loss_mask.append(loss)\n\n        if total_pos == 0:\n            total_pos += 1  # avoid nan\n        loss_mask = [x / total_pos for x in loss_mask]\n\n        losses.update(loss_mask=loss_mask)\n        if self.with_seg_branch:\n            losses.update(loss_segm=loss_segm)\n\n        return losses\n\n    def _get_targets_single(self, mask_preds: Tensor, segm_pred: Tensor,\n                            gt_instances: InstanceData,\n                            positive_info: InstanceData):\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            mask_preds (Tensor): Predicted prototypes with shape\n                (num_classes, H, W).\n            segm_pred (Tensor): Predicted semantic segmentation map\n                with shape (num_classes, H, W).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes``, ``labels``,\n                and ``masks`` attributes.\n            positive_info (:obj:`InstanceData`): Information of positive\n                samples that are assigned in detection head. It usually\n                contains following keys.\n\n                    - pos_assigned_gt_inds (Tensor): Assigner GT indexes of\n                      positive proposals, has shape (num_pos, )\n                    - pos_inds (Tensor): Positive index of image, has\n                      shape (num_pos, ).\n                    - coeffs (Tensor): Positive mask coefficients\n                      with shape (num_pos, num_protos).\n                    - bboxes (Tensor): Positive bboxes with shape\n                      (num_pos, 4)\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n            - mask_preds (Tensor): Positive predicted mask with shape\n              (num_pos, mask_h, mask_w).\n            - pos_mask_targets (Tensor): Positive mask targets with shape\n              (num_pos, mask_h, mask_w).\n            - segm_targets (Tensor): Semantic segmentation targets with shape\n              (num_classes, segm_h, segm_w).\n            - num_pos (int): Positive numbers.\n            - gt_bboxes_for_reweight (Tensor): GT bboxes that match to the\n              positive priors has shape (num_pos, 4).\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        device = gt_bboxes.device\n        gt_masks = gt_instances.masks.to_tensor(\n            dtype=torch.bool, device=device).float()\n        if gt_masks.size(0) == 0:\n            return mask_preds, None, None, 0, None\n\n        # process with semantic segmentation targets\n        if segm_pred is not None:\n            num_classes, segm_h, segm_w = segm_pred.size()\n            with torch.no_grad():\n                downsampled_masks = F.interpolate(\n                    gt_masks.unsqueeze(0), (segm_h, segm_w),\n                    mode='bilinear',\n                    align_corners=False).squeeze(0)\n                downsampled_masks = downsampled_masks.gt(0.5).float()\n                segm_targets = torch.zeros_like(segm_pred, requires_grad=False)\n                for obj_idx in range(downsampled_masks.size(0)):\n                    segm_targets[gt_labels[obj_idx] - 1] = torch.max(\n                        segm_targets[gt_labels[obj_idx] - 1],\n                        downsampled_masks[obj_idx])\n        else:\n            segm_targets = None\n        # process with mask targets\n        pos_assigned_gt_inds = positive_info.pos_assigned_gt_inds\n        num_pos = pos_assigned_gt_inds.size(0)\n        # Since we're producing (near) full image masks,\n        # it'd take too much vram to backprop on every single mask.\n        # Thus we select only a subset.\n        if num_pos > self.max_masks_to_train:\n            perm = torch.randperm(num_pos)\n            select = perm[:self.max_masks_to_train]\n            mask_preds = mask_preds[select]\n            pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n            num_pos = self.max_masks_to_train\n\n        gt_bboxes_for_reweight = gt_bboxes[pos_assigned_gt_inds]\n\n        mask_h, mask_w = mask_preds.shape[-2:]\n        gt_masks = F.interpolate(\n            gt_masks.unsqueeze(0), (mask_h, mask_w),\n            mode='bilinear',\n            align_corners=False).squeeze(0)\n        gt_masks = gt_masks.gt(0.5).float()\n        pos_mask_targets = gt_masks[pos_assigned_gt_inds]\n\n        return (mask_preds, pos_mask_targets, segm_targets, num_pos,\n                gt_bboxes_for_reweight)\n\n    def crop_mask_preds(self, mask_preds: List[Tensor],\n                        batch_img_metas: List[dict],\n                        positive_infos: InstanceList) -> list:\n        \"\"\"Crop predicted masks by zeroing out everything not in the predicted\n        bbox.\n\n        Args:\n            mask_preds (list[Tensor]): Predicted prototypes with shape\n                (num_classes, H, W).\n            batch_img_metas (list[dict]): Meta information of multiple images.\n            positive_infos (List[:obj:``InstanceData``]): Positive\n                information that calculate from detect head.\n\n        Returns:\n            list: The cropped masks.\n        \"\"\"\n        croped_mask_preds = []\n        for img_meta, mask_preds, cur_info in zip(batch_img_metas, mask_preds,\n                                                  positive_infos):\n            bboxes_for_cropping = copy.deepcopy(cur_info.bboxes)\n            h, w = img_meta['img_shape'][:2]\n            bboxes_for_cropping[:, 0::2] /= w\n            bboxes_for_cropping[:, 1::2] /= h\n            mask_preds = self.crop_single(mask_preds, bboxes_for_cropping)\n            mask_preds = mask_preds.permute(2, 0, 1).contiguous()\n            croped_mask_preds.append(mask_preds)\n        return croped_mask_preds\n\n    def crop_single(self,\n                    masks: Tensor,\n                    boxes: Tensor,\n                    padding: int = 1) -> Tensor:\n        \"\"\"Crop single predicted masks by zeroing out everything not in the\n        predicted bbox.\n\n        Args:\n            masks (Tensor): Predicted prototypes, has shape [H, W, N].\n            boxes (Tensor): Bbox coords in relative point form with\n                shape [N, 4].\n            padding (int): Image padding size.\n\n        Return:\n            Tensor: The cropped masks.\n        \"\"\"\n        h, w, n = masks.size()\n        x1, x2 = self.sanitize_coordinates(\n            boxes[:, 0], boxes[:, 2], w, padding, cast=False)\n        y1, y2 = self.sanitize_coordinates(\n            boxes[:, 1], boxes[:, 3], h, padding, cast=False)\n\n        rows = torch.arange(\n            w, device=masks.device, dtype=x1.dtype).view(1, -1,\n                                                         1).expand(h, w, n)\n        cols = torch.arange(\n            h, device=masks.device, dtype=x1.dtype).view(-1, 1,\n                                                         1).expand(h, w, n)\n\n        masks_left = rows >= x1.view(1, 1, -1)\n        masks_right = rows < x2.view(1, 1, -1)\n        masks_up = cols >= y1.view(1, 1, -1)\n        masks_down = cols < y2.view(1, 1, -1)\n\n        crop_mask = masks_left * masks_right * masks_up * masks_down\n\n        return masks * crop_mask.float()\n\n    def sanitize_coordinates(self,\n                             x1: Tensor,\n                             x2: Tensor,\n                             img_size: int,\n                             padding: int = 0,\n                             cast: bool = True) -> tuple:\n        \"\"\"Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,\n        and x2 <= image_size. Also converts from relative to absolute\n        coordinates and casts the results to long tensors.\n\n        Warning: this does things in-place behind the scenes so\n        copy if necessary.\n\n        Args:\n            x1 (Tensor): shape (N, ).\n            x2 (Tensor): shape (N, ).\n            img_size (int): Size of the input image.\n            padding (int): x1 >= padding, x2 <= image_size-padding.\n            cast (bool): If cast is false, the result won't be cast to longs.\n\n        Returns:\n            tuple:\n\n            - x1 (Tensor): Sanitized _x1.\n            - x2 (Tensor): Sanitized _x2.\n        \"\"\"\n        x1 = x1 * img_size\n        x2 = x2 * img_size\n        if cast:\n            x1 = x1.long()\n            x2 = x2.long()\n        x1 = torch.min(x1, x2)\n        x2 = torch.max(x1, x2)\n        x1 = torch.clamp(x1 - padding, min=0)\n        x2 = torch.clamp(x2 + padding, max=img_size)\n        return x1, x2\n\n    def predict_by_feat(self,\n                        mask_preds: List[Tensor],\n                        segm_preds: Tensor,\n                        results_list: InstanceList,\n                        batch_img_metas: List[dict],\n                        rescale: bool = True,\n                        **kwargs) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\n\n        Args:\n            mask_preds (list[Tensor]): Predicted prototypes with shape\n                (num_classes, H, W).\n            results_list (List[:obj:``InstanceData``]): BBoxHead results.\n            batch_img_metas (list[dict]): Meta information of all images.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        assert len(mask_preds) == len(results_list) == len(batch_img_metas)\n\n        croped_mask_pred = self.crop_mask_preds(mask_preds, batch_img_metas,\n                                                results_list)\n\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            results = results_list[img_id]\n            bboxes = results.bboxes\n            mask_preds = croped_mask_pred[img_id]\n            if bboxes.shape[0] == 0 or mask_preds.shape[0] == 0:\n                results_list[img_id] = empty_instances(\n                    [img_meta],\n                    bboxes.device,\n                    task_type='mask',\n                    instance_results=[results])[0]\n            else:\n                im_mask = self._predict_by_feat_single(\n                    mask_preds=croped_mask_pred[img_id],\n                    bboxes=bboxes,\n                    img_meta=img_meta,\n                    rescale=rescale)\n                results.masks = im_mask\n        return results_list\n\n    def _predict_by_feat_single(self,\n                                mask_preds: Tensor,\n                                bboxes: Tensor,\n                                img_meta: dict,\n                                rescale: bool,\n                                cfg: OptConfigType = None):\n        \"\"\"Transform a single image's features extracted from the head into\n        mask results.\n\n        Args:\n            mask_preds (Tensor): Predicted prototypes, has shape [H, W, N].\n            bboxes (Tensor): Bbox coords in relative point form with\n                shape [N, 4].\n            img_meta (dict): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If rescale is False, then returned masks will\n                fit the scale of imgs[0].\n            cfg (dict, optional): Config used in test phase.\n                Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n            (1, 2))\n        img_h, img_w = img_meta['ori_shape'][:2]\n        if rescale:  # in-placed rescale the bboxes\n            scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n                (1, 2))\n            bboxes /= scale_factor\n        else:\n            w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1]\n            img_h = np.round(img_h * h_scale.item()).astype(np.int32)\n            img_w = np.round(img_w * w_scale.item()).astype(np.int32)\n\n        masks = F.interpolate(\n            mask_preds.unsqueeze(0), (img_h, img_w),\n            mode='bilinear',\n            align_corners=False).squeeze(0) > cfg.mask_thr\n\n        if cfg.mask_thr_binary < 0:\n            # for visualization and debugging\n            masks = (masks * 255).to(dtype=torch.uint8)\n\n        return masks\n\n\nclass SegmentationModule(BaseModule):\n    \"\"\"YOLACT segmentation branch used in <https://arxiv.org/abs/1904.02689>`_\n\n    In mmdet v2.x `segm_loss` is calculated in YOLACTSegmHead, while in\n    mmdet v3.x `SegmentationModule` is used to obtain the predicted semantic\n    segmentation map and `segm_loss` is calculated in YOLACTProtonet.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int = 256,\n        init_cfg: ConfigType = dict(\n            type='Xavier',\n            distribution='uniform',\n            override=dict(name='segm_conv'))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.segm_conv = nn.Conv2d(\n            self.in_channels, self.num_classes, kernel_size=1)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward feature from the upstream network.\n\n        Args:\n            x (Tensor): Feature from the upstream network, which is\n                a 4D-tensor.\n\n        Returns:\n            Tensor: Predicted semantic segmentation map with shape\n                (N, num_classes, H, W).\n        \"\"\"\n        return self.segm_conv(x)\n\n\nclass InterpolateModule(BaseModule):\n    \"\"\"This is a module version of F.interpolate.\n\n    Any arguments you give it just get passed along for the ride.\n    \"\"\"\n\n    def __init__(self, *args, init_cfg=None, **kwargs) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.args = args\n        self.kwargs = kwargs\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (Tensor): Feature from the upstream network, which is\n                a 4D-tensor.\n\n        Returns:\n            Tensor: A 4D-tensor feature map.\n        \"\"\"\n        return F.interpolate(x, *self.args, **self.kwargs)\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolo_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nimport copy\nimport warnings\nfrom typing import List, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, is_norm\nfrom mmengine.model import bias_init_with_prob, constant_init, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList)\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import filter_scores_and_topk, images_to_levels, multi_apply\nfrom .base_dense_head import BaseDenseHead\n\n\n@MODELS.register_module()\nclass YOLOV3Head(BaseDenseHead):\n    \"\"\"YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.\n\n    Args:\n        num_classes (int): The number of object classes (w/o background)\n        in_channels (Sequence[int]): Number of input channels per scale.\n        out_channels (Sequence[int]): The number of output channels per scale\n            before the final 1x1 layer. Default: (1024, 512, 256).\n        anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor\n            generator.\n        bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder.\n        featmap_strides (Sequence[int]): The stride of each scale.\n            Should be in descending order. Defaults to (32, 16, 8).\n        one_hot_smoother (float): Set a non-zero value to enable label-smooth\n            Defaults to 0.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and\n            config norm layer. Defaults to dict(type='BN', requires_grad=True).\n        act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.\n            Defaults to dict(type='LeakyReLU', negative_slope=0.1).\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_conf (:obj:`ConfigDict` or dict): Config of confidence loss.\n        loss_xy (:obj:`ConfigDict` or dict): Config of xy coordinate loss.\n        loss_wh (:obj:`ConfigDict` or dict): Config of wh coordinate loss.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config of\n            YOLOV3 head. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            YOLOV3 head. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: Sequence[int],\n                 out_channels: Sequence[int] = (1024, 512, 256),\n                 anchor_generator: ConfigType = dict(\n                     type='YOLOAnchorGenerator',\n                     base_sizes=[[(116, 90), (156, 198), (373, 326)],\n                                 [(30, 61), (62, 45), (59, 119)],\n                                 [(10, 13), (16, 30), (33, 23)]],\n                     strides=[32, 16, 8]),\n                 bbox_coder: ConfigType = dict(type='YOLOBBoxCoder'),\n                 featmap_strides: Sequence[int] = (32, 16, 8),\n                 one_hot_smoother: float = 0.,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 act_cfg: ConfigType = dict(\n                     type='LeakyReLU', negative_slope=0.1),\n                 loss_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_conf: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_xy: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_wh: ConfigType = dict(type='MSELoss', loss_weight=1.0),\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None) -> None:\n        super().__init__(init_cfg=None)\n        # Check params\n        assert (len(in_channels) == len(out_channels) == len(featmap_strides))\n\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.featmap_strides = featmap_strides\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            if train_cfg.get('sampler', None) is not None:\n                self.sampler = TASK_UTILS.build(\n                    self.train_cfg['sampler'], context=self)\n            else:\n                self.sampler = PseudoSampler()\n\n        self.one_hot_smoother = one_hot_smoother\n\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n\n        self.prior_generator = TASK_UTILS.build(anchor_generator)\n\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_conf = MODELS.build(loss_conf)\n        self.loss_xy = MODELS.build(loss_xy)\n        self.loss_wh = MODELS.build(loss_wh)\n\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n        assert len(\n            self.prior_generator.num_base_priors) == len(featmap_strides)\n        self._init_layers()\n\n    @property\n    def num_levels(self) -> int:\n        \"\"\"int: number of feature map levels\"\"\"\n        return len(self.featmap_strides)\n\n    @property\n    def num_attrib(self) -> int:\n        \"\"\"int: number of attributes in pred_map, bboxes (4) +\n        objectness (1) + num_classes\"\"\"\n\n        return 5 + self.num_classes\n\n    def _init_layers(self) -> None:\n        \"\"\"initialize conv layers in YOLOv3 head.\"\"\"\n        self.convs_bridge = nn.ModuleList()\n        self.convs_pred = nn.ModuleList()\n        for i in range(self.num_levels):\n            conv_bridge = ConvModule(\n                self.in_channels[i],\n                self.out_channels[i],\n                3,\n                padding=1,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                act_cfg=self.act_cfg)\n            conv_pred = nn.Conv2d(self.out_channels[i],\n                                  self.num_base_priors * self.num_attrib, 1)\n\n            self.convs_bridge.append(conv_bridge)\n            self.convs_pred.append(conv_pred)\n\n    def init_weights(self) -> None:\n        \"\"\"initialize weights.\"\"\"\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n\n        # Use prior in model initialization to improve stability\n        for conv_pred, stride in zip(self.convs_pred, self.featmap_strides):\n            bias = conv_pred.bias.reshape(self.num_base_priors, -1)\n            # init objectness with prior of 8 objects per feature map\n            # refer to https://github.com/ultralytics/yolov3\n            nn.init.constant_(bias.data[:, 4],\n                              bias_init_with_prob(8 / (608 / stride)**2))\n            nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01))\n\n    def forward(self, x: Tuple[Tensor, ...]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple[Tensor]: A tuple of multi-level predication map, each is a\n                4D-tensor of shape (batch_size, 5+num_classes, height, width).\n        \"\"\"\n\n        assert len(x) == self.num_levels\n        pred_maps = []\n        for i in range(self.num_levels):\n            feat = x[i]\n            feat = self.convs_bridge[i](feat)\n            pred_map = self.convs_pred[i](feat)\n            pred_maps.append(pred_map)\n\n        return tuple(pred_maps),\n\n    def predict_by_feat(self,\n                        pred_maps: Sequence[Tensor],\n                        batch_img_metas: Optional[List[dict]],\n                        cfg: OptConfigType = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results. It has been accelerated since PR #5991.\n\n        Args:\n            pred_maps (Sequence[Tensor]): Raw predictions for a batch of\n                images.\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n                Defaults to None.\n            cfg (:obj:`ConfigDict` or dict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(pred_maps) == self.num_levels\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n\n        num_imgs = len(batch_img_metas)\n        featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps]\n\n        mlvl_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=pred_maps[0].device)\n        flatten_preds = []\n        flatten_strides = []\n        for pred, stride in zip(pred_maps, self.featmap_strides):\n            pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                    self.num_attrib)\n            pred[..., :2].sigmoid_()\n            flatten_preds.append(pred)\n            flatten_strides.append(\n                pred.new_tensor(stride).expand(pred.size(1)))\n\n        flatten_preds = torch.cat(flatten_preds, dim=1)\n        flatten_bbox_preds = flatten_preds[..., :4]\n        flatten_objectness = flatten_preds[..., 4].sigmoid()\n        flatten_cls_scores = flatten_preds[..., 5:].sigmoid()\n        flatten_anchors = torch.cat(mlvl_anchors)\n        flatten_strides = torch.cat(flatten_strides)\n        flatten_bboxes = self.bbox_coder.decode(flatten_anchors,\n                                                flatten_bbox_preds,\n                                                flatten_strides.unsqueeze(-1))\n        results_list = []\n        for (bboxes, scores, objectness,\n             img_meta) in zip(flatten_bboxes, flatten_cls_scores,\n                              flatten_objectness, batch_img_metas):\n            # Filtering out all predictions with conf < conf_thr\n            conf_thr = cfg.get('conf_thr', -1)\n            if conf_thr > 0:\n                conf_inds = objectness >= conf_thr\n                bboxes = bboxes[conf_inds, :]\n                scores = scores[conf_inds, :]\n                objectness = objectness[conf_inds]\n\n            score_thr = cfg.get('score_thr', 0)\n            nms_pre = cfg.get('nms_pre', -1)\n            scores, labels, keep_idxs, _ = filter_scores_and_topk(\n                scores, score_thr, nms_pre)\n\n            results = InstanceData(\n                scores=scores,\n                labels=labels,\n                bboxes=bboxes[keep_idxs],\n                score_factors=objectness[keep_idxs],\n            )\n            results = self._bbox_post_process(\n                results=results,\n                cfg=cfg,\n                rescale=rescale,\n                with_nms=with_nms,\n                img_meta=img_meta)\n            results_list.append(results)\n        return results_list\n\n    def loss_by_feat(\n            self,\n            pred_maps: Sequence[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            pred_maps (list[Tensor]): Prediction map for each scale level,\n                shape (N, num_anchors * num_attrib, H, W)\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        device = pred_maps[0][0].device\n\n        featmap_sizes = [\n            pred_maps[i].shape[-2:] for i in range(self.num_levels)\n        ]\n        mlvl_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        anchor_list = [mlvl_anchors for _ in range(num_imgs)]\n\n        responsible_flag_list = []\n        for img_id in range(num_imgs):\n            responsible_flag_list.append(\n                self.responsible_flags(featmap_sizes,\n                                       batch_gt_instances[img_id].bboxes,\n                                       device))\n\n        target_maps_list, neg_maps_list = self.get_targets(\n            anchor_list, responsible_flag_list, batch_gt_instances)\n\n        losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(\n            self.loss_by_feat_single, pred_maps, target_maps_list,\n            neg_maps_list)\n\n        return dict(\n            loss_cls=losses_cls,\n            loss_conf=losses_conf,\n            loss_xy=losses_xy,\n            loss_wh=losses_wh)\n\n    def loss_by_feat_single(self, pred_map: Tensor, target_map: Tensor,\n                            neg_map: Tensor) -> tuple:\n        \"\"\"Calculate the loss of a single scale level based on the features\n        extracted by the detection head.\n\n        Args:\n            pred_map (Tensor): Raw predictions for a single level.\n            target_map (Tensor): The Ground-Truth target for a single level.\n            neg_map (Tensor): The negative masks for a single level.\n\n        Returns:\n            tuple:\n                loss_cls (Tensor): Classification loss.\n                loss_conf (Tensor): Confidence loss.\n                loss_xy (Tensor): Regression loss of x, y coordinate.\n                loss_wh (Tensor): Regression loss of w, h coordinate.\n        \"\"\"\n\n        num_imgs = len(pred_map)\n        pred_map = pred_map.permute(0, 2, 3,\n                                    1).reshape(num_imgs, -1, self.num_attrib)\n        neg_mask = neg_map.float()\n        pos_mask = target_map[..., 4]\n        pos_and_neg_mask = neg_mask + pos_mask\n        pos_mask = pos_mask.unsqueeze(dim=-1)\n        if torch.max(pos_and_neg_mask) > 1.:\n            warnings.warn('There is overlap between pos and neg sample.')\n            pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)\n\n        pred_xy = pred_map[..., :2]\n        pred_wh = pred_map[..., 2:4]\n        pred_conf = pred_map[..., 4]\n        pred_label = pred_map[..., 5:]\n\n        target_xy = target_map[..., :2]\n        target_wh = target_map[..., 2:4]\n        target_conf = target_map[..., 4]\n        target_label = target_map[..., 5:]\n\n        loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)\n        loss_conf = self.loss_conf(\n            pred_conf, target_conf, weight=pos_and_neg_mask)\n        loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)\n        loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)\n\n        return loss_cls, loss_conf, loss_xy, loss_wh\n\n    def get_targets(self, anchor_list: List[List[Tensor]],\n                    responsible_flag_list: List[List[Tensor]],\n                    batch_gt_instances: List[InstanceData]) -> tuple:\n        \"\"\"Compute target maps for anchors in multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_total_anchors, 4).\n            responsible_flag_list (list[list[Tensor]]): Multi level responsible\n                flags of each image. Each element is a tensor of shape\n                (num_total_anchors, )\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n                - target_map_list (list[Tensor]): Target map of each level.\n                - neg_map_list (list[Tensor]): Negative map of each level.\n        \"\"\"\n        num_imgs = len(anchor_list)\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        results = multi_apply(self._get_targets_single, anchor_list,\n                              responsible_flag_list, batch_gt_instances)\n\n        all_target_maps, all_neg_maps = results\n        assert num_imgs == len(all_target_maps) == len(all_neg_maps)\n        target_maps_list = images_to_levels(all_target_maps, num_level_anchors)\n        neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)\n\n        return target_maps_list, neg_maps_list\n\n    def _get_targets_single(self, anchors: List[Tensor],\n                            responsible_flags: List[Tensor],\n                            gt_instances: InstanceData) -> tuple:\n        \"\"\"Generate matching bounding box prior and converted GT.\n\n        Args:\n            anchors (List[Tensor]): Multi-level anchors of the image.\n            responsible_flags (List[Tensor]): Multi-level responsible flags of\n                anchors\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            tuple:\n                target_map (Tensor): Predication target map of each\n                    scale level, shape (num_total_anchors,\n                    5+num_classes)\n                neg_map (Tensor): Negative map of each scale level,\n                    shape (num_total_anchors,)\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        anchor_strides = []\n        for i in range(len(anchors)):\n            anchor_strides.append(\n                torch.tensor(self.featmap_strides[i],\n                             device=gt_bboxes.device).repeat(len(anchors[i])))\n        concat_anchors = torch.cat(anchors)\n        concat_responsible_flags = torch.cat(responsible_flags)\n\n        anchor_strides = torch.cat(anchor_strides)\n        assert len(anchor_strides) == len(concat_anchors) == \\\n               len(concat_responsible_flags)\n        pred_instances = InstanceData(\n            priors=concat_anchors, responsible_flags=concat_responsible_flags)\n\n        assign_result = self.assigner.assign(pred_instances, gt_instances)\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n\n        target_map = concat_anchors.new_zeros(\n            concat_anchors.size(0), self.num_attrib)\n\n        target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(\n            sampling_result.pos_priors, sampling_result.pos_gt_bboxes,\n            anchor_strides[sampling_result.pos_inds])\n\n        target_map[sampling_result.pos_inds, 4] = 1\n\n        gt_labels_one_hot = F.one_hot(\n            gt_labels, num_classes=self.num_classes).float()\n        if self.one_hot_smoother != 0:  # label smooth\n            gt_labels_one_hot = gt_labels_one_hot * (\n                1 - self.one_hot_smoother\n            ) + self.one_hot_smoother / self.num_classes\n        target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[\n            sampling_result.pos_assigned_gt_inds]\n\n        neg_map = concat_anchors.new_zeros(\n            concat_anchors.size(0), dtype=torch.uint8)\n        neg_map[sampling_result.neg_inds] = 1\n\n        return target_map, neg_map\n\n    def responsible_flags(self, featmap_sizes: List[tuple], gt_bboxes: Tensor,\n                          device: str) -> List[Tensor]:\n        \"\"\"Generate responsible anchor flags of grid cells in multiple scales.\n\n        Args:\n            featmap_sizes (List[tuple]): List of feature map sizes in multiple\n                feature levels.\n            gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).\n            device (str): Device where the anchors will be put on.\n\n        Return:\n            List[Tensor]: responsible flags of anchors in multiple level\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_responsible_flags = []\n        for i in range(self.num_levels):\n            anchor_stride = self.prior_generator.strides[i]\n            feat_h, feat_w = featmap_sizes[i]\n            gt_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)\n            gt_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)\n            gt_grid_x = torch.floor(gt_cx / anchor_stride[0]).long()\n            gt_grid_y = torch.floor(gt_cy / anchor_stride[1]).long()\n            # row major indexing\n            gt_bboxes_grid_idx = gt_grid_y * feat_w + gt_grid_x\n\n            responsible_grid = torch.zeros(\n                feat_h * feat_w, dtype=torch.uint8, device=device)\n            responsible_grid[gt_bboxes_grid_idx] = 1\n\n            responsible_grid = responsible_grid[:, None].expand(\n                responsible_grid.size(0),\n                self.prior_generator.num_base_priors[i]).contiguous().view(-1)\n\n            multi_level_responsible_flags.append(responsible_grid)\n        return multi_level_responsible_flags\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolof_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, is_norm\nfrom mmengine.model import bias_init_with_prob, constant_init, normal_init\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean\nfrom ..task_modules.prior_generators import anchor_inside_flags\nfrom ..utils import levels_to_images, multi_apply, unmap\nfrom .anchor_head import AnchorHead\n\nINF = 1e8\n\n\n@MODELS.register_module()\nclass YOLOFHead(AnchorHead):\n    \"\"\"Detection Head of `YOLOF <https://arxiv.org/abs/2103.09460>`_\n\n    Args:\n        num_classes (int): The number of object classes (w/o background)\n        in_channels (list[int]): The number of input channels per scale.\n        cls_num_convs (int): The number of convolutions of cls branch.\n           Defaults to 2.\n        reg_num_convs (int): The number of convolutions of reg branch.\n           Defaults to 4.\n        norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n            layer. Defaults to ``dict(type='BN', requires_grad=True)``.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 in_channels: List[int],\n                 num_cls_convs: int = 2,\n                 num_reg_convs: int = 4,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 **kwargs) -> None:\n        self.num_cls_convs = num_cls_convs\n        self.num_reg_convs = num_reg_convs\n        self.norm_cfg = norm_cfg\n        super().__init__(\n            num_classes=num_classes, in_channels=in_channels, **kwargs)\n\n    def _init_layers(self) -> None:\n        cls_subnet = []\n        bbox_subnet = []\n        for i in range(self.num_cls_convs):\n            cls_subnet.append(\n                ConvModule(\n                    self.in_channels,\n                    self.in_channels,\n                    kernel_size=3,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n        for i in range(self.num_reg_convs):\n            bbox_subnet.append(\n                ConvModule(\n                    self.in_channels,\n                    self.in_channels,\n                    kernel_size=3,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n        self.cls_subnet = nn.Sequential(*cls_subnet)\n        self.bbox_subnet = nn.Sequential(*bbox_subnet)\n        self.cls_score = nn.Conv2d(\n            self.in_channels,\n            self.num_base_priors * self.num_classes,\n            kernel_size=3,\n            stride=1,\n            padding=1)\n        self.bbox_pred = nn.Conv2d(\n            self.in_channels,\n            self.num_base_priors * 4,\n            kernel_size=3,\n            stride=1,\n            padding=1)\n        self.object_pred = nn.Conv2d(\n            self.in_channels,\n            self.num_base_priors,\n            kernel_size=3,\n            stride=1,\n            padding=1)\n\n    def init_weights(self) -> None:\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n\n        # Use prior in model initialization to improve stability\n        bias_cls = bias_init_with_prob(0.01)\n        torch.nn.init.constant_(self.cls_score.bias, bias_cls)\n\n    def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                normalized_cls_score (Tensor): Normalized Cls scores for a \\\n                    single scale level, the channels number is \\\n                    num_base_priors * num_classes.\n                bbox_reg (Tensor): Box energies / deltas for a single scale \\\n                    level, the channels number is num_base_priors * 4.\n        \"\"\"\n        cls_score = self.cls_score(self.cls_subnet(x))\n        N, _, H, W = cls_score.shape\n        cls_score = cls_score.view(N, -1, self.num_classes, H, W)\n\n        reg_feat = self.bbox_subnet(x)\n        bbox_reg = self.bbox_pred(reg_feat)\n        objectness = self.object_pred(reg_feat)\n\n        # implicit objectness\n        objectness = objectness.view(N, -1, 1, H, W)\n        normalized_cls_score = cls_score + objectness - torch.log(\n            1. + torch.clamp(cls_score.exp(), max=INF) +\n            torch.clamp(objectness.exp(), max=INF))\n        normalized_cls_score = normalized_cls_score.view(N, -1, H, W)\n        return normalized_cls_score, bbox_reg\n\n    def loss_by_feat(\n            self,\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == 1\n        assert self.prior_generator.num_levels == 1\n\n        device = cls_scores[0].device\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        # The output level is always 1\n        anchor_list = [anchors[0] for anchors in anchor_list]\n        valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list]\n\n        cls_scores_list = levels_to_images(cls_scores)\n        bbox_preds_list = levels_to_images(bbox_preds)\n\n        cls_reg_targets = self.get_targets(\n            cls_scores_list,\n            bbox_preds_list,\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        if cls_reg_targets is None:\n            return None\n        (batch_labels, batch_label_weights, avg_factor, batch_bbox_weights,\n         batch_pos_predicted_boxes, batch_target_boxes) = cls_reg_targets\n\n        flatten_labels = batch_labels.reshape(-1)\n        batch_label_weights = batch_label_weights.reshape(-1)\n        cls_score = cls_scores[0].permute(0, 2, 3,\n                                          1).reshape(-1, self.cls_out_channels)\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n        # classification loss\n        loss_cls = self.loss_cls(\n            cls_score,\n            flatten_labels,\n            batch_label_weights,\n            avg_factor=avg_factor)\n\n        # regression loss\n        if batch_pos_predicted_boxes.shape[0] == 0:\n            # no pos sample\n            loss_bbox = batch_pos_predicted_boxes.sum() * 0\n        else:\n            loss_bbox = self.loss_bbox(\n                batch_pos_predicted_boxes,\n                batch_target_boxes,\n                batch_bbox_weights.float(),\n                avg_factor=avg_factor)\n\n        return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)\n\n    def get_targets(self,\n                    cls_scores_list: List[Tensor],\n                    bbox_preds_list: List[Tensor],\n                    anchor_list: List[Tensor],\n                    valid_flag_list: List[Tensor],\n                    batch_gt_instances: InstanceList,\n                    batch_img_metas: List[dict],\n                    batch_gt_instances_ignore: OptInstanceList = None,\n                    unmap_outputs: bool = True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            cls_scores_list (list[Tensor])： Classification scores of\n                each image. each is a 4D-tensor, the shape is\n                (h * w, num_anchors * num_classes).\n            bbox_preds_list (list[Tensor])： Bbox preds of each image.\n                each is a 4D-tensor, the shape is (h * w, num_anchors * 4).\n            anchor_list (list[Tensor]): Anchors of each image. Each element of\n                is a tensor of shape (h * w * num_anchors, 4).\n            valid_flag_list (list[Tensor]): Valid flags of each image. Each\n               element of is a tensor of shape (h * w * num_anchors, )\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - batch_labels (Tensor): Label of all images. Each element \\\n                    of is a tensor of shape (batch, h * w * num_anchors)\n                - batch_label_weights (Tensor): Label weights of all images \\\n                    of is a tensor of shape (batch, h * w * num_anchors)\n                - num_total_pos (int): Number of positive samples in all \\\n                    images.\n                - num_total_neg (int): Number of negative samples in all \\\n                    images.\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # compute targets for each image\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n        results = multi_apply(\n            self._get_targets_single,\n            bbox_preds_list,\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore,\n            unmap_outputs=unmap_outputs)\n        (all_labels, all_label_weights, pos_inds, neg_inds,\n         sampling_results_list) = results[:5]\n        # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n        # When using sampling method, avg_factor is usually the sum of\n        # positive and negative priors. When using `PseudoSampler`,\n        # `avg_factor` is usually equal to the number of positive priors.\n        avg_factor = sum(\n            [results.avg_factor for results in sampling_results_list])\n        rest_results = list(results[5:])  # user-added return values\n\n        batch_labels = torch.stack(all_labels, 0)\n        batch_label_weights = torch.stack(all_label_weights, 0)\n\n        res = (batch_labels, batch_label_weights, avg_factor)\n        for i, rests in enumerate(rest_results):  # user-added return values\n            rest_results[i] = torch.cat(rests, 0)\n\n        return res + tuple(rest_results)\n\n    def _get_targets_single(self,\n                            bbox_preds: Tensor,\n                            flat_anchors: Tensor,\n                            valid_flags: Tensor,\n                            gt_instances: InstanceData,\n                            img_meta: dict,\n                            gt_instances_ignore: Optional[InstanceData] = None,\n                            unmap_outputs: bool = True) -> tuple:\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Args:\n            bbox_preds (Tensor): Bbox prediction of the image, which\n                shape is (h * w ,4)\n            flat_anchors (Tensor): Anchors of the image, which shape is\n                (h * w * num_anchors ,4)\n            valid_flags (Tensor): Valid flags of the image, which shape is\n                (h * w * num_anchors,).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple:\n                labels (Tensor): Labels of image, which shape is\n                    (h * w * num_anchors, ).\n                label_weights (Tensor): Label weights of image, which shape is\n                    (h * w * num_anchors, ).\n                pos_inds (Tensor): Pos index of image.\n                neg_inds (Tensor): Neg index of image.\n                sampling_result (obj:`SamplingResult`): Sampling result.\n                pos_bbox_weights (Tensor): The Weight of using to calculate\n                    the bbox branch loss, which shape is (num, ).\n                pos_predicted_boxes (Tensor): boxes predicted value of\n                    using to calculate the bbox branch loss, which shape is\n                    (num, 4).\n                pos_target_boxes (Tensor): boxes target value of\n                    using to calculate the bbox branch loss, which shape is\n                    (num, 4).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg['allowed_border'])\n        if not inside_flags.any():\n            raise ValueError(\n                'There is no valid anchor inside the image boundary. Please '\n                'check the image size and anchor sizes, or set '\n                '``allowed_border`` to -1 to skip the condition.')\n\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n        bbox_preds = bbox_preds.reshape(-1, 4)\n        bbox_preds = bbox_preds[inside_flags, :]\n\n        # decoded bbox\n        decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds)\n        pred_instances = InstanceData(\n            priors=anchors, decoder_priors=decoder_bbox_preds)\n        assign_result = self.assigner.assign(pred_instances, gt_instances,\n                                             gt_instances_ignore)\n\n        pos_bbox_weights = assign_result.get_extra_property('pos_idx')\n        pos_predicted_boxes = assign_result.get_extra_property(\n            'pos_predicted_boxes')\n        pos_target_boxes = assign_result.get_extra_property('target_boxes')\n\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n        num_valid_anchors = anchors.shape[0]\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            labels[pos_inds] = sampling_result.pos_gt_labels\n            if self.train_cfg['pos_weight'] <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg['pos_weight']\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags,\n                fill=self.num_classes)  # fill bg label\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n\n        return (labels, label_weights, pos_inds, neg_inds, sampling_result,\n                pos_bbox_weights, pos_predicted_boxes, pos_target_boxes)\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmcv.ops.nms import batched_nms\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import bias_init_with_prob\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import bbox_xyxy_to_cxcywh\nfrom mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,\n                         OptMultiConfig, reduce_mean)\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom ..task_modules.samplers import PseudoSampler\nfrom ..utils import multi_apply\nfrom .base_dense_head import BaseDenseHead\n\n\n@MODELS.register_module()\nclass YOLOXHead(BaseDenseHead):\n    \"\"\"YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels in stacking convs.\n            Defaults to 256\n        stacked_convs (int): Number of stacking convs of the head.\n            Defaults to (8, 16, 32).\n        strides (Sequence[int]): Downsample factor of each feature map.\n             Defaults to None.\n        use_depthwise (bool): Whether to depthwise separable convolution in\n            blocks. Defaults to False.\n        dcn_on_last_conv (bool): If true, use dcn in the last layer of\n            towers. Defaults to False.\n        conv_bias (bool or str): If specified as `auto`, it will be decided by\n            the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n            None, otherwise False. Defaults to \"auto\".\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n            layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).\n        act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.\n            Defaults to None.\n        loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.\n        loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.\n        loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss.\n        loss_l1 (:obj:`ConfigDict` or dict): Config of L1 loss.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config of\n            anchor head. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            anchor head. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        in_channels: int,\n        feat_channels: int = 256,\n        stacked_convs: int = 2,\n        strides: Sequence[int] = (8, 16, 32),\n        use_depthwise: bool = False,\n        dcn_on_last_conv: bool = False,\n        conv_bias: Union[bool, str] = 'auto',\n        conv_cfg: OptConfigType = None,\n        norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),\n        act_cfg: ConfigType = dict(type='Swish'),\n        loss_cls: ConfigType = dict(\n            type='CrossEntropyLoss',\n            use_sigmoid=True,\n            reduction='sum',\n            loss_weight=1.0),\n        loss_bbox: ConfigType = dict(\n            type='IoULoss',\n            mode='square',\n            eps=1e-16,\n            reduction='sum',\n            loss_weight=5.0),\n        loss_obj: ConfigType = dict(\n            type='CrossEntropyLoss',\n            use_sigmoid=True,\n            reduction='sum',\n            loss_weight=1.0),\n        loss_l1: ConfigType = dict(\n            type='L1Loss', reduction='sum', loss_weight=1.0),\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        init_cfg: OptMultiConfig = dict(\n            type='Kaiming',\n            layer='Conv2d',\n            a=math.sqrt(5),\n            distribution='uniform',\n            mode='fan_in',\n            nonlinearity='leaky_relu')\n    ) -> None:\n\n        super().__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.cls_out_channels = num_classes\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.strides = strides\n        self.use_depthwise = use_depthwise\n        self.dcn_on_last_conv = dcn_on_last_conv\n        assert conv_bias == 'auto' or isinstance(conv_bias, bool)\n        self.conv_bias = conv_bias\n        self.use_sigmoid_cls = True\n\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n\n        self.loss_cls: nn.Module = MODELS.build(loss_cls)\n        self.loss_bbox: nn.Module = MODELS.build(loss_bbox)\n        self.loss_obj: nn.Module = MODELS.build(loss_obj)\n\n        self.use_l1 = False  # This flag will be modified by hooks.\n        self.loss_l1: nn.Module = MODELS.build(loss_l1)\n\n        self.prior_generator = MlvlPointGenerator(strides, offset=0)\n\n        self.test_cfg = test_cfg\n        self.train_cfg = train_cfg\n\n        if self.train_cfg:\n            self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])\n            # YOLOX does not support sampling\n            self.sampler = PseudoSampler()\n\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize heads for all level feature maps.\"\"\"\n        self.multi_level_cls_convs = nn.ModuleList()\n        self.multi_level_reg_convs = nn.ModuleList()\n        self.multi_level_conv_cls = nn.ModuleList()\n        self.multi_level_conv_reg = nn.ModuleList()\n        self.multi_level_conv_obj = nn.ModuleList()\n        for _ in self.strides:\n            self.multi_level_cls_convs.append(self._build_stacked_convs())\n            self.multi_level_reg_convs.append(self._build_stacked_convs())\n            conv_cls, conv_reg, conv_obj = self._build_predictor()\n            self.multi_level_conv_cls.append(conv_cls)\n            self.multi_level_conv_reg.append(conv_reg)\n            self.multi_level_conv_obj.append(conv_obj)\n\n    def _build_stacked_convs(self) -> nn.Sequential:\n        \"\"\"Initialize conv layers of a single level head.\"\"\"\n        conv = DepthwiseSeparableConvModule \\\n            if self.use_depthwise else ConvModule\n        stacked_convs = []\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n                conv_cfg = dict(type='DCNv2')\n            else:\n                conv_cfg = self.conv_cfg\n            stacked_convs.append(\n                conv(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg,\n                    bias=self.conv_bias))\n        return nn.Sequential(*stacked_convs)\n\n    def _build_predictor(self) -> Tuple[nn.Module, nn.Module, nn.Module]:\n        \"\"\"Initialize predictor layers of a single level head.\"\"\"\n        conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)\n        conv_reg = nn.Conv2d(self.feat_channels, 4, 1)\n        conv_obj = nn.Conv2d(self.feat_channels, 1, 1)\n        return conv_cls, conv_reg, conv_obj\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        super(YOLOXHead, self).init_weights()\n        # Use prior in model initialization to improve stability\n        bias_init = bias_init_with_prob(0.01)\n        for conv_cls, conv_obj in zip(self.multi_level_conv_cls,\n                                      self.multi_level_conv_obj):\n            conv_cls.bias.data.fill_(bias_init)\n            conv_obj.bias.data.fill_(bias_init)\n\n    def forward_single(self, x: Tensor, cls_convs: nn.Module,\n                       reg_convs: nn.Module, conv_cls: nn.Module,\n                       conv_reg: nn.Module,\n                       conv_obj: nn.Module) -> Tuple[Tensor, Tensor, Tensor]:\n        \"\"\"Forward feature of a single scale level.\"\"\"\n\n        cls_feat = cls_convs(x)\n        reg_feat = reg_convs(x)\n\n        cls_score = conv_cls(cls_feat)\n        bbox_pred = conv_reg(reg_feat)\n        objectness = conv_obj(reg_feat)\n\n        return cls_score, bbox_pred, objectness\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (Tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n        Returns:\n            Tuple[List]: A tuple of multi-level classification scores, bbox\n            predictions, and objectnesses.\n        \"\"\"\n\n        return multi_apply(self.forward_single, x, self.multi_level_cls_convs,\n                           self.multi_level_reg_convs,\n                           self.multi_level_conv_cls,\n                           self.multi_level_conv_reg,\n                           self.multi_level_conv_obj)\n\n    def predict_by_feat(self,\n                        cls_scores: List[Tensor],\n                        bbox_preds: List[Tensor],\n                        objectnesses: Optional[List[Tensor]],\n                        batch_img_metas: Optional[List[dict]] = None,\n                        cfg: Optional[ConfigDict] = None,\n                        rescale: bool = False,\n                        with_nms: bool = True) -> List[InstanceData]:\n        \"\"\"Transform a batch of output features extracted by the head into\n        bbox results.\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            objectnesses (list[Tensor], Optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, 1, H, W).\n            batch_img_metas (list[dict], Optional): Batch image meta info.\n                Defaults to None.\n            cfg (ConfigDict, optional): Test / postprocessing\n                configuration, if None, test_cfg would be used.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`InstanceData`]: Object detection results of each image\n            after the post process. Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(objectnesses)\n        cfg = self.test_cfg if cfg is None else cfg\n\n        num_imgs = len(batch_img_metas)\n        featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device,\n            with_stride=True)\n\n        # flatten cls_scores, bbox_preds and objectness\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_objectness = [\n            objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)\n            for objectness in objectnesses\n        ]\n\n        flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)\n        flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()\n        flatten_priors = torch.cat(mlvl_priors)\n\n        flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)\n\n        result_list = []\n        for img_id, img_meta in enumerate(batch_img_metas):\n            max_scores, labels = torch.max(flatten_cls_scores[img_id], 1)\n            valid_mask = flatten_objectness[\n                img_id] * max_scores >= cfg.score_thr\n            results = InstanceData(\n                bboxes=flatten_bboxes[img_id][valid_mask],\n                scores=max_scores[valid_mask] *\n                flatten_objectness[img_id][valid_mask],\n                labels=labels[valid_mask])\n\n            result_list.append(\n                self._bbox_post_process(\n                    results=results,\n                    cfg=cfg,\n                    rescale=rescale,\n                    with_nms=with_nms,\n                    img_meta=img_meta))\n\n        return result_list\n\n    def _bbox_decode(self, priors: Tensor, bbox_preds: Tensor) -> Tensor:\n        \"\"\"Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x,\n        tl_y, br_x, br_y).\n\n        Args:\n            priors (Tensor): Center proiors of an image, has shape\n                (num_instances, 2).\n            bbox_preds (Tensor): Box energies / deltas for all instances,\n                has shape (batch_size, num_instances, 4).\n\n        Returns:\n            Tensor: Decoded bboxes in (tl_x, tl_y, br_x, br_y) format. Has\n            shape (batch_size, num_instances, 4).\n        \"\"\"\n        xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2]\n        whs = bbox_preds[..., 2:].exp() * priors[:, 2:]\n\n        tl_x = (xys[..., 0] - whs[..., 0] / 2)\n        tl_y = (xys[..., 1] - whs[..., 1] / 2)\n        br_x = (xys[..., 0] + whs[..., 0] / 2)\n        br_y = (xys[..., 1] + whs[..., 1] / 2)\n\n        decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)\n        return decoded_bboxes\n\n    def _bbox_post_process(self,\n                           results: InstanceData,\n                           cfg: ConfigDict,\n                           rescale: bool = False,\n                           with_nms: bool = True,\n                           img_meta: Optional[dict] = None) -> InstanceData:\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually `with_nms` is False is used for aug test.\n\n        Args:\n            results (:obj:`InstaceData`): Detection instance results,\n                each item has shape (num_bboxes, ).\n            cfg (mmengine.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default to True.\n            img_meta (dict, optional): Image meta info. Defaults to None.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            results.bboxes /= results.bboxes.new_tensor(\n                img_meta['scale_factor']).repeat((1, 2))\n\n        if with_nms and results.bboxes.numel() > 0:\n            det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores,\n                                                results.labels, cfg.nms)\n            results = results[keep_idxs]\n            # some nms would reweight the score, such as softnms\n            results.scores = det_bboxes[:, -1]\n        return results\n\n    def loss_by_feat(\n            self,\n            cls_scores: Sequence[Tensor],\n            bbox_preds: Sequence[Tensor],\n            objectnesses: Sequence[Tensor],\n            batch_gt_instances: Sequence[InstanceData],\n            batch_img_metas: Sequence[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (Sequence[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_priors * num_classes.\n            bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_priors * 4.\n            objectnesses (Sequence[Tensor]): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, 1, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n        Returns:\n            dict[str, Tensor]: A dictionary of losses.\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        if batch_gt_instances_ignore is None:\n            batch_gt_instances_ignore = [None] * num_imgs\n\n        featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device,\n            with_stride=True)\n\n        flatten_cls_preds = [\n            cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                 self.cls_out_channels)\n            for cls_pred in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_objectness = [\n            objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)\n            for objectness in objectnesses\n        ]\n\n        flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)\n        flatten_objectness = torch.cat(flatten_objectness, dim=1)\n        flatten_priors = torch.cat(mlvl_priors)\n        flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)\n\n        (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets,\n         num_fg_imgs) = multi_apply(\n             self._get_targets_single,\n             flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1),\n             flatten_cls_preds.detach(), flatten_bboxes.detach(),\n             flatten_objectness.detach(), batch_gt_instances, batch_img_metas,\n             batch_gt_instances_ignore)\n\n        # The experimental results show that 'reduce_mean' can improve\n        # performance on the COCO dataset.\n        num_pos = torch.tensor(\n            sum(num_fg_imgs),\n            dtype=torch.float,\n            device=flatten_cls_preds.device)\n        num_total_samples = max(reduce_mean(num_pos), 1.0)\n\n        pos_masks = torch.cat(pos_masks, 0)\n        cls_targets = torch.cat(cls_targets, 0)\n        obj_targets = torch.cat(obj_targets, 0)\n        bbox_targets = torch.cat(bbox_targets, 0)\n        if self.use_l1:\n            l1_targets = torch.cat(l1_targets, 0)\n\n        loss_obj = self.loss_obj(flatten_objectness.view(-1, 1),\n                                 obj_targets) / num_total_samples\n        if num_pos > 0:\n            loss_cls = self.loss_cls(\n                flatten_cls_preds.view(-1, self.num_classes)[pos_masks],\n                cls_targets) / num_total_samples\n            loss_bbox = self.loss_bbox(\n                flatten_bboxes.view(-1, 4)[pos_masks],\n                bbox_targets) / num_total_samples\n        else:\n            # Avoid cls and reg branch not participating in the gradient\n            # propagation when there is no ground-truth in the images.\n            # For more details, please refer to\n            # https://github.com/open-mmlab/mmdetection/issues/7298\n            loss_cls = flatten_cls_preds.sum() * 0\n            loss_bbox = flatten_bboxes.sum() * 0\n\n        loss_dict = dict(\n            loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj)\n\n        if self.use_l1:\n            if num_pos > 0:\n                loss_l1 = self.loss_l1(\n                    flatten_bbox_preds.view(-1, 4)[pos_masks],\n                    l1_targets) / num_total_samples\n            else:\n                # Avoid cls and reg branch not participating in the gradient\n                # propagation when there is no ground-truth in the images.\n                # For more details, please refer to\n                # https://github.com/open-mmlab/mmdetection/issues/7298\n                loss_l1 = flatten_bbox_preds.sum() * 0\n            loss_dict.update(loss_l1=loss_l1)\n\n        return loss_dict\n\n    @torch.no_grad()\n    def _get_targets_single(\n            self,\n            priors: Tensor,\n            cls_preds: Tensor,\n            decoded_bboxes: Tensor,\n            objectness: Tensor,\n            gt_instances: InstanceData,\n            img_meta: dict,\n            gt_instances_ignore: Optional[InstanceData] = None) -> tuple:\n        \"\"\"Compute classification, regression, and objectness targets for\n        priors in a single image.\n\n        Args:\n            priors (Tensor): All priors of one image, a 2D-Tensor with shape\n                [num_priors, 4] in [cx, xy, stride_w, stride_y] format.\n            cls_preds (Tensor): Classification predictions of one image,\n                a 2D-Tensor with shape [num_priors, num_classes]\n            decoded_bboxes (Tensor): Decoded bboxes predictions of one image,\n                a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,\n                br_x, br_y] format.\n            objectness (Tensor): Objectness predictions of one image,\n                a 1D-Tensor with shape [num_priors]\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It should includes ``bboxes`` and ``labels``\n                attributes.\n            img_meta (dict): Meta information for current image.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n        Returns:\n            tuple:\n                foreground_mask (list[Tensor]): Binary mask of foreground\n                targets.\n                cls_target (list[Tensor]): Classification targets of an image.\n                obj_target (list[Tensor]): Objectness targets of an image.\n                bbox_target (list[Tensor]): BBox targets of an image.\n                l1_target (int): BBox L1 targets of an image.\n                num_pos_per_img (int): Number of positive samples in an image.\n        \"\"\"\n\n        num_priors = priors.size(0)\n        num_gts = len(gt_instances)\n        # No target\n        if num_gts == 0:\n            cls_target = cls_preds.new_zeros((0, self.num_classes))\n            bbox_target = cls_preds.new_zeros((0, 4))\n            l1_target = cls_preds.new_zeros((0, 4))\n            obj_target = cls_preds.new_zeros((num_priors, 1))\n            foreground_mask = cls_preds.new_zeros(num_priors).bool()\n            return (foreground_mask, cls_target, obj_target, bbox_target,\n                    l1_target, 0)\n\n        # YOLOX uses center priors with 0.5 offset to assign targets,\n        # but use center priors without offset to regress bboxes.\n        offset_priors = torch.cat(\n            [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1)\n\n        scores = cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid()\n        pred_instances = InstanceData(\n            bboxes=decoded_bboxes, scores=scores.sqrt_(), priors=offset_priors)\n        assign_result = self.assigner.assign(\n            pred_instances=pred_instances,\n            gt_instances=gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n\n        sampling_result = self.sampler.sample(assign_result, pred_instances,\n                                              gt_instances)\n        pos_inds = sampling_result.pos_inds\n        num_pos_per_img = pos_inds.size(0)\n\n        pos_ious = assign_result.max_overlaps[pos_inds]\n        # IOU aware classification score\n        cls_target = F.one_hot(sampling_result.pos_gt_labels,\n                               self.num_classes) * pos_ious.unsqueeze(-1)\n        obj_target = torch.zeros_like(objectness).unsqueeze(-1)\n        obj_target[pos_inds] = 1\n        bbox_target = sampling_result.pos_gt_bboxes\n        l1_target = cls_preds.new_zeros((num_pos_per_img, 4))\n        if self.use_l1:\n            l1_target = self._get_l1_target(l1_target, bbox_target,\n                                            priors[pos_inds])\n        foreground_mask = torch.zeros_like(objectness).to(torch.bool)\n        foreground_mask[pos_inds] = 1\n        return (foreground_mask, cls_target, obj_target, bbox_target,\n                l1_target, num_pos_per_img)\n\n    def _get_l1_target(self,\n                       l1_target: Tensor,\n                       gt_bboxes: Tensor,\n                       priors: Tensor,\n                       eps: float = 1e-8) -> Tensor:\n        \"\"\"Convert gt bboxes to center offset and log width height.\"\"\"\n        gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes)\n        l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:]\n        l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps)\n        return l1_target\n"
  },
  {
    "path": "mmdet/models/detectors/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .atss import ATSS\nfrom .autoassign import AutoAssign\nfrom .base import BaseDetector\nfrom .base_detr import DetectionTransformer\nfrom .boxinst import BoxInst\nfrom .cascade_rcnn import CascadeRCNN\nfrom .centernet import CenterNet\nfrom .condinst import CondInst\nfrom .conditional_detr import ConditionalDETR\nfrom .cornernet import CornerNet\nfrom .crowddet import CrowdDet\nfrom .d2_wrapper import Detectron2Wrapper\nfrom .dab_detr import DABDETR\nfrom .ddod import DDOD\nfrom .deformable_detr import DeformableDETR\nfrom .detr import DETR\nfrom .dino import DINO\nfrom .fast_rcnn import FastRCNN\nfrom .faster_rcnn import FasterRCNN\nfrom .fcos import FCOS\nfrom .fovea import FOVEA\nfrom .fsaf import FSAF\nfrom .gfl import GFL\nfrom .grid_rcnn import GridRCNN\nfrom .htc import HybridTaskCascade\nfrom .kd_one_stage import KnowledgeDistillationSingleStageDetector\nfrom .lad import LAD\nfrom .mask2former import Mask2Former\nfrom .mask_rcnn import MaskRCNN\nfrom .mask_scoring_rcnn import MaskScoringRCNN\nfrom .maskformer import MaskFormer\nfrom .nasfcos import NASFCOS\nfrom .paa import PAA\nfrom .panoptic_fpn import PanopticFPN\nfrom .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor\nfrom .point_rend import PointRend\nfrom .queryinst import QueryInst\nfrom .reppoints_detector import RepPointsDetector\nfrom .retinanet import RetinaNet\nfrom .rpn import RPN\nfrom .rtmdet import RTMDet\nfrom .scnet import SCNet\nfrom .semi_base import SemiBaseDetector\nfrom .single_stage import SingleStageDetector\nfrom .soft_teacher import SoftTeacher\nfrom .solo import SOLO\nfrom .solov2 import SOLOv2\nfrom .sparse_rcnn import SparseRCNN\nfrom .tood import TOOD\nfrom .trident_faster_rcnn import TridentFasterRCNN\nfrom .two_stage import TwoStageDetector\nfrom .vfnet import VFNet\nfrom .yolact import YOLACT\nfrom .yolo import YOLOV3\nfrom .yolof import YOLOF\nfrom .yolox import YOLOX\nfrom .crosskd_single_stage import CrossKDSingleStageDetector\nfrom .crosskd_retinanet import CrossKDRetinaNet\nfrom .crosskd_gfl import CrossKDGFL\nfrom .crosskd_atss import CrossKDATSS\nfrom .crosskd_fcos import CrossKDFCOS\n\n__all__ = [\n    'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',\n    'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',\n    'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',\n    'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',\n    'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',\n    'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',\n    'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',\n    'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',\n    'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',\n    'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',\n    'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'CrossKDGFL',\n    'CrossKDSingleStageDetector', 'CrossKDRetinaNet', 'CrossKDATSS', 'CrossKDFCOS'\n]\n"
  },
  {
    "path": "mmdet/models/detectors/atss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass ATSS(SingleStageDetector):\n    \"\"\"Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of ATSS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of ATSS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/autoassign.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass AutoAssign(SingleStageDetector):\n    \"\"\"Implementation of `AutoAssign: Differentiable Label Assignment for Dense\n    Object Detection <https://arxiv.org/abs/2007.03496>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of AutoAssign. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of AutoAssign. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/base.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Dict, List, Tuple, Union\n\nimport torch\nfrom mmengine.model import BaseModel\nfrom torch import Tensor\n\nfrom mmdet.structures import DetDataSample, OptSampleList, SampleList\nfrom mmdet.utils import InstanceList, OptConfigType, OptMultiConfig\nfrom ..utils import samplelist_boxtype2tensor\n\nForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],\n                       Tuple[torch.Tensor], torch.Tensor]\n\n\nclass BaseDetector(BaseModel, metaclass=ABCMeta):\n    \"\"\"Base class for detectors.\n\n    Args:\n       data_preprocessor (dict or ConfigDict, optional): The pre-process\n           config of :class:`BaseDataPreprocessor`.  it usually includes,\n            ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.\n       init_cfg (dict or ConfigDict, optional): the config to control the\n           initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n\n    @property\n    def with_neck(self) -> bool:\n        \"\"\"bool: whether the detector has a neck\"\"\"\n        return hasattr(self, 'neck') and self.neck is not None\n\n    # TODO: these properties need to be carefully handled\n    # for both single stage & two stage detectors\n    @property\n    def with_shared_head(self) -> bool:\n        \"\"\"bool: whether the detector has a shared head in the RoI Head\"\"\"\n        return hasattr(self, 'roi_head') and self.roi_head.with_shared_head\n\n    @property\n    def with_bbox(self) -> bool:\n        \"\"\"bool: whether the detector has a bbox head\"\"\"\n        return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)\n                or (hasattr(self, 'bbox_head') and self.bbox_head is not None))\n\n    @property\n    def with_mask(self) -> bool:\n        \"\"\"bool: whether the detector has a mask head\"\"\"\n        return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)\n                or (hasattr(self, 'mask_head') and self.mask_head is not None))\n\n    def forward(self,\n                inputs: torch.Tensor,\n                data_samples: OptSampleList = None,\n                mode: str = 'tensor') -> ForwardResults:\n        \"\"\"The unified entry for a forward process in both training and test.\n\n        The method should accept three modes: \"tensor\", \"predict\" and \"loss\":\n\n        - \"tensor\": Forward the whole network and return tensor or tuple of\n        tensor without any post-processing, same as a common nn.Module.\n        - \"predict\": Forward and return the predictions, which are fully\n        processed to a list of :obj:`DetDataSample`.\n        - \"loss\": Forward and return a dict of losses according to the given\n        inputs and data samples.\n\n        Note that this method doesn't handle either back propagation or\n        parameter update, which are supposed to be done in :meth:`train_step`.\n\n        Args:\n            inputs (torch.Tensor): The input tensor with shape\n                (N, C, ...) in general.\n            data_samples (list[:obj:`DetDataSample`], optional): A batch of\n                data samples that contain annotations and predictions.\n                Defaults to None.\n            mode (str): Return what kind of value. Defaults to 'tensor'.\n\n        Returns:\n            The return type depends on ``mode``.\n\n            - If ``mode=\"tensor\"``, return a tensor or a tuple of tensor.\n            - If ``mode=\"predict\"``, return a list of :obj:`DetDataSample`.\n            - If ``mode=\"loss\"``, return a dict of tensor.\n        \"\"\"\n        if mode == 'loss':\n            return self.loss(inputs, data_samples)\n        elif mode == 'predict':\n            return self.predict(inputs, data_samples)\n        elif mode == 'tensor':\n            return self._forward(inputs, data_samples)\n        else:\n            raise RuntimeError(f'Invalid mode \"{mode}\". '\n                               'Only supports loss, predict and tensor mode')\n\n    @abstractmethod\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, tuple]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\"\"\"\n        pass\n\n    @abstractmethod\n    def predict(self, batch_inputs: Tensor,\n                batch_data_samples: SampleList) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\"\"\"\n        pass\n\n    @abstractmethod\n    def _forward(self,\n                 batch_inputs: Tensor,\n                 batch_data_samples: OptSampleList = None):\n        \"\"\"Network forward process.\n\n        Usually includes backbone, neck and head forward without any post-\n        processing.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def extract_feat(self, batch_inputs: Tensor):\n        \"\"\"Extract features from images.\"\"\"\n        pass\n\n    def add_pred_to_datasample(self, data_samples: SampleList,\n                               results_list: InstanceList) -> SampleList:\n        \"\"\"Add predictions to `DetDataSample`.\n\n        Args:\n            data_samples (list[:obj:`DetDataSample`], optional): A batch of\n                data samples that contain annotations and predictions.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances'. And the ``pred_instances`` usually\n            contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        for data_sample, pred_instances in zip(data_samples, results_list):\n            data_sample.pred_instances = pred_instances\n        samplelist_boxtype2tensor(data_samples)\n        return data_samples\n"
  },
  {
    "path": "mmdet/models/detectors/base_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Dict, List, Tuple, Union\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList, SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .base import BaseDetector\n\n\n@MODELS.register_module()\nclass DetectionTransformer(BaseDetector, metaclass=ABCMeta):\n    r\"\"\"Base class for Detection Transformer.\n\n    In Detection Transformer, an encoder is used to process output features of\n    neck, then several queries interact with the encoder features using a\n    decoder and do the regression and classification with the bounding box\n    head.\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): Config of the backbone.\n        neck (:obj:`ConfigDict` or dict, optional): Config of the neck.\n            Defaults to None.\n        encoder (:obj:`ConfigDict` or dict, optional): Config of the\n            Transformer encoder. Defaults to None.\n        decoder (:obj:`ConfigDict` or dict, optional): Config of the\n            Transformer decoder. Defaults to None.\n        bbox_head (:obj:`ConfigDict` or dict, optional): Config for the\n            bounding box head module. Defaults to None.\n        positional_encoding (:obj:`ConfigDict` or dict, optional): Config\n            of the positional encoding module. Defaults to None.\n        num_queries (int, optional): Number of decoder query in Transformer.\n            Defaults to 100.\n        train_cfg (:obj:`ConfigDict` or dict, optional): Training config of\n            the bounding box head module. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of\n            the bounding box head module. Defaults to None.\n        data_preprocessor (dict or ConfigDict, optional): The pre-process\n            config of :class:`BaseDataPreprocessor`.  it usually includes,\n            ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 encoder: OptConfigType = None,\n                 decoder: OptConfigType = None,\n                 bbox_head: OptConfigType = None,\n                 positional_encoding: OptConfigType = None,\n                 num_queries: int = 100,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        # process args\n        bbox_head.update(train_cfg=train_cfg)\n        bbox_head.update(test_cfg=test_cfg)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.encoder = encoder\n        self.decoder = decoder\n        self.positional_encoding = positional_encoding\n        self.num_queries = num_queries\n\n        # init model layers\n        self.backbone = MODELS.build(backbone)\n        if neck is not None:\n            self.neck = MODELS.build(neck)\n        self.bbox_head = MODELS.build(bbox_head)\n        self._init_layers()\n\n    @abstractmethod\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n        pass\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (bs, dim, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components\n        \"\"\"\n        img_feats = self.extract_feat(batch_inputs)\n        head_inputs_dict = self.forward_transformer(img_feats,\n                                                    batch_data_samples)\n        losses = self.bbox_head.loss(\n            **head_inputs_dict, batch_data_samples=batch_data_samples)\n\n        return losses\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the input images.\n            Each DetDataSample usually contain 'pred_instances'. And the\n            `pred_instances` usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        img_feats = self.extract_feat(batch_inputs)\n        head_inputs_dict = self.forward_transformer(img_feats,\n                                                    batch_data_samples)\n        results_list = self.bbox_head.predict(\n            **head_inputs_dict,\n            rescale=rescale,\n            batch_data_samples=batch_data_samples)\n        batch_data_samples = self.add_pred_to_datasample(\n            batch_data_samples, results_list)\n        return batch_data_samples\n\n    def _forward(\n            self,\n            batch_inputs: Tensor,\n            batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n         Args:\n            batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`], optional): The\n                batch data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            tuple[Tensor]: A tuple of features from ``bbox_head`` forward.\n        \"\"\"\n        img_feats = self.extract_feat(batch_inputs)\n        head_inputs_dict = self.forward_transformer(img_feats,\n                                                    batch_data_samples)\n        results = self.bbox_head.forward(**head_inputs_dict)\n        return results\n\n    def forward_transformer(self,\n                            img_feats: Tuple[Tensor],\n                            batch_data_samples: OptSampleList = None) -> Dict:\n        \"\"\"Forward process of Transformer, which includes four steps:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'. We\n        summarized the parameters flow of the existing DETR-like detector,\n        which can be illustrated as follow:\n\n        .. code:: text\n\n                 img_feats & batch_data_samples\n                               |\n                               V\n                      +-----------------+\n                      | pre_transformer |\n                      +-----------------+\n                          |          |\n                          |          V\n                          |    +-----------------+\n                          |    | forward_encoder |\n                          |    +-----------------+\n                          |             |\n                          |             V\n                          |     +---------------+\n                          |     |  pre_decoder  |\n                          |     +---------------+\n                          |         |       |\n                          V         V       |\n                      +-----------------+   |\n                      | forward_decoder |   |\n                      +-----------------+   |\n                                |           |\n                                V           V\n                               head_inputs_dict\n\n        Args:\n            img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each\n                    feature map has shape (bs, dim, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`], optional): The\n                batch data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            dict: The dictionary of bbox_head function inputs, which always\n            includes the `hidden_states` of the decoder output and may contain\n            `references` including the initial and intermediate references.\n        \"\"\"\n        encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer(\n            img_feats, batch_data_samples)\n\n        encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)\n\n        tmp_dec_in, head_inputs_dict = self.pre_decoder(**encoder_outputs_dict)\n        decoder_inputs_dict.update(tmp_dec_in)\n\n        decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)\n        head_inputs_dict.update(decoder_outputs_dict)\n        return head_inputs_dict\n\n    def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n        \"\"\"Extract features.\n\n        Args:\n            batch_inputs (Tensor): Image tensor, has shape (bs, dim, H, W).\n\n        Returns:\n            tuple[Tensor]: Tuple of feature maps from neck. Each feature map\n            has shape (bs, dim, H, W).\n        \"\"\"\n        x = self.backbone(batch_inputs)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    @abstractmethod\n    def pre_transformer(\n            self,\n            img_feats: Tuple[Tensor],\n            batch_data_samples: OptSampleList = None) -> Tuple[Dict, Dict]:\n        \"\"\"Process image features before feeding them to the transformer.\n\n        Args:\n            img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each\n                feature map has shape (bs, dim, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`], optional): The\n                batch data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            tuple[dict, dict]: The first dict contains the inputs of encoder\n            and the second dict contains the inputs of decoder.\n\n            - encoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_encoder()`, which includes 'feat', 'feat_mask',\n              'feat_pos', and other algorithm-specific arguments.\n            - decoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_decoder()`, which includes 'memory_mask', and\n              other algorithm-specific arguments.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def forward_encoder(self, feat: Tensor, feat_mask: Tensor,\n                        feat_pos: Tensor, **kwargs) -> Dict:\n        \"\"\"Forward with Transformer encoder.\n\n        Args:\n            feat (Tensor): Sequential features, has shape (bs, num_feat_points,\n                dim).\n            feat_mask (Tensor): ByteTensor, the padding mask of the features,\n                has shape (bs, num_feat_points).\n            feat_pos (Tensor): The positional embeddings of the features, has\n                shape (bs, num_feat_points, dim).\n\n        Returns:\n            dict: The dictionary of encoder outputs, which includes the\n            `memory` of the encoder output and other algorithm-specific\n            arguments.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def pre_decoder(self, memory: Tensor, **kwargs) -> Tuple[Dict, Dict]:\n        \"\"\"Prepare intermediate variables before entering Transformer decoder,\n        such as `query`, `query_pos`, and `reference_points`.\n\n        Args:\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n\n        Returns:\n            tuple[dict, dict]: The first dict contains the inputs of decoder\n            and the second dict contains the inputs of the bbox_head function.\n\n            - decoder_inputs_dict (dict): The keyword dictionary args of\n              `self.forward_decoder()`, which includes 'query', 'query_pos',\n              'memory', and other algorithm-specific arguments.\n            - head_inputs_dict (dict): The keyword dictionary args of the\n              bbox_head functions, which is usually empty, or includes\n              `enc_outputs_class` and `enc_outputs_class` when the detector\n              support 'two stage' or 'query selection' strategies.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,\n                        **kwargs) -> Dict:\n        \"\"\"Forward with Transformer decoder.\n\n        Args:\n            query (Tensor): The queries of decoder inputs, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional queries of decoder inputs,\n                has shape (bs, num_queries, dim).\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n\n        Returns:\n            dict: The dictionary of decoder outputs, which includes the\n            `hidden_states` of the decoder output, `references` including\n            the initial and intermediate reference_points, and other\n            algorithm-specific arguments.\n        \"\"\"\n        pass\n"
  },
  {
    "path": "mmdet/models/detectors/boxinst.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@MODELS.register_module()\nclass BoxInst(SingleStageInstanceSegmentor):\n    \"\"\"Implementation of `BoxInst <https://arxiv.org/abs/2012.02310>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 mask_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/cascade_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass CascadeRCNN(TwoStageDetector):\n    r\"\"\"Implementation of `Cascade R-CNN: Delving into High Quality Object\n    Detection <https://arxiv.org/abs/1906.09756>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 rpn_head: OptConfigType = None,\n                 roi_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/centernet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass CenterNet(SingleStageDetector):\n    \"\"\"Implementation of CenterNet(Objects as Points)\n\n    <https://arxiv.org/abs/1904.07850>.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/condinst.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@MODELS.register_module()\nclass CondInst(SingleStageInstanceSegmentor):\n    \"\"\"Implementation of `CondInst <https://arxiv.org/abs/2003.05664>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 mask_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/conditional_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict\n\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom ..layers import (ConditionalDetrTransformerDecoder,\n                      DetrTransformerEncoder, SinePositionalEncoding)\nfrom .detr import DETR\n\n\n@MODELS.register_module()\nclass ConditionalDETR(DETR):\n    r\"\"\"Implementation of `Conditional DETR for Fast Training Convergence.\n\n    <https://arxiv.org/abs/2108.06152>`_.\n\n    Code is modified from the `official github repo\n    <https://github.com/Atten4Vis/ConditionalDETR>`_.\n    \"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n        self.positional_encoding = SinePositionalEncoding(\n            **self.positional_encoding)\n        self.encoder = DetrTransformerEncoder(**self.encoder)\n        self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)\n        self.embed_dims = self.encoder.embed_dims\n        # NOTE The embed_dims is typically passed from the inside out.\n        # For example in DETR, The embed_dims is passed as\n        # self_attn -> the first encoder layer -> encoder -> detector.\n        self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n\n        num_feats = self.positional_encoding.num_feats\n        assert num_feats * 2 == self.embed_dims, \\\n            f'embed_dims should be exactly 2 times of num_feats. ' \\\n            f'Found {self.embed_dims} and {num_feats}.'\n\n    def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,\n                        memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n        \"\"\"Forward with Transformer decoder.\n\n        Args:\n            query (Tensor): The queries of decoder inputs, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional queries of decoder inputs,\n                has shape (bs, num_queries, dim).\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points).\n            memory_pos (Tensor): The positional embeddings of memory, has\n                shape (bs, num_feat_points, dim).\n\n        Returns:\n            dict: The dictionary of decoder outputs, which includes the\n            `hidden_states` and `references` of the decoder output.\n\n            - hidden_states (Tensor): Has shape\n                (num_decoder_layers, bs, num_queries, dim)\n            - references (Tensor): Has shape\n                (bs, num_queries, 2)\n        \"\"\"\n\n        hidden_states, references = self.decoder(\n            query=query,\n            key=memory,\n            query_pos=query_pos,\n            key_pos=memory_pos,\n            key_padding_mask=memory_mask)\n        head_inputs_dict = dict(\n            hidden_states=hidden_states, references=references)\n        return head_inputs_dict\n"
  },
  {
    "path": "mmdet/models/detectors/cornernet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass CornerNet(SingleStageDetector):\n    \"\"\"CornerNet.\n\n    This detector is the implementation of the paper `CornerNet: Detecting\n    Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/crosskd_atss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Union\n\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import (InstanceList, OptInstanceList, OptConfigType, reduce_mean)\nfrom ..utils import multi_apply, unpack_gt_instances\nfrom .crosskd_single_stage import CrossKDSingleStageDetector\n\n\n@MODELS.register_module()\nclass CrossKDATSS(CrossKDSingleStageDetector):\n\n    def __init__(self, \n                 kd_cfg: OptConfigType = None,\n                 **kwargs) -> None:\n        super().__init__(kd_cfg=kd_cfg,**kwargs)\n        self.loss_center_kd = None\n        if kd_cfg.get('loss_center_kd', None):\n            self.loss_center_kd = MODELS.build(kd_cfg['loss_center_kd'])\n                \n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        tea_x = self.teacher.extract_feat(batch_inputs)\n        tea_cls_scores, tea_bbox_preds, tea_centernesses, tea_cls_hold, tea_reg_hold = \\\n            multi_apply(self.forward_hkd_single, \n                        tea_x,\n                        self.teacher.bbox_head.scales, \n                        module=self.teacher)\n            \n        stu_x = self.extract_feat(batch_inputs)\n        stu_cls_scores, stu_bbox_preds, stu_centernesses, stu_cls_hold, stu_reg_hold = \\\n            multi_apply(self.forward_hkd_single, \n                        stu_x,\n                        self.bbox_head.scales, \n                        module=self)\n            \n        reused_cls_scores, reused_bbox_preds, reused_centernesses = multi_apply(\n            self.reuse_teacher_head, \n            tea_cls_hold, \n            tea_reg_hold, \n            stu_cls_hold,\n            stu_reg_hold, \n            self.teacher.bbox_head.scales)\n\n\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n        losses = self.loss_by_feat(tea_cls_scores, \n                                   tea_bbox_preds,\n                                   tea_centernesses,\n                                   tea_x,\n                                   stu_cls_scores,\n                                   stu_bbox_preds,\n                                   stu_centernesses,\n                                   stu_x,\n                                   reused_cls_scores,\n                                   reused_bbox_preds,\n                                   reused_centernesses,\n                                   batch_gt_instances,\n                                   batch_img_metas, \n                                   batch_gt_instances_ignore)\n        return losses\n    \n    def forward_hkd_single(self, x, scale, module):\n        cls_feat, reg_feat = x, x\n        cls_feat_hold, reg_feat_hold = x, x\n        for i, cls_conv in enumerate(module.bbox_head.cls_convs):\n            cls_feat = cls_conv(cls_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                cls_feat_hold = cls_feat\n            cls_feat = cls_conv.activate(cls_feat)\n        for i, reg_conv in enumerate(module.bbox_head.reg_convs):\n            reg_feat = reg_conv(reg_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                reg_feat_hold = reg_feat\n            reg_feat = reg_conv.activate(reg_feat)\n        cls_score = module.bbox_head.atss_cls(cls_feat)\n        bbox_pred = scale(module.bbox_head.atss_reg(reg_feat)).float()\n        centerness = module.bbox_head.atss_centerness(reg_feat)\n        return cls_score, bbox_pred, centerness, cls_feat_hold, reg_feat_hold\n    \n    def reuse_teacher_head(self, tea_cls_feat, tea_reg_feat, stu_cls_feat,\n                           stu_reg_feat, scale):\n        reused_cls_feat = self.align_scale(stu_cls_feat, tea_cls_feat)\n        reused_reg_feat = self.align_scale(stu_reg_feat, tea_reg_feat)\n        if self.reused_teacher_head_idx != 0:\n            reused_cls_feat = F.relu(reused_cls_feat)\n            reused_reg_feat = F.relu(reused_reg_feat)\n\n        module = self.teacher.bbox_head\n        for i in range(self.reused_teacher_head_idx, module.stacked_convs):\n            reused_cls_feat = module.cls_convs[i](reused_cls_feat)\n            reused_reg_feat = module.reg_convs[i](reused_reg_feat)\n        reused_cls_score = module.atss_cls(reused_cls_feat)\n        reused_bbox_pred = scale(module.atss_reg(reused_reg_feat)).float()\n        reused_centerness = module.atss_centerness(reused_reg_feat)\n        return reused_cls_score, reused_bbox_pred, reused_centerness\n    \n    def align_scale(self, stu_feat, tea_feat):\n        N, C, H, W = stu_feat.size()\n        # normalize student feature\n        stu_feat = stu_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        stu_mean = stu_feat.mean(dim=-1, keepdim=True)\n        stu_std = stu_feat.std(dim=-1, keepdim=True)\n        stu_feat = (stu_feat - stu_mean) / (stu_std + 1e-6)\n        #\n        tea_feat = tea_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        tea_mean = tea_feat.mean(dim=-1, keepdim=True)\n        tea_std = tea_feat.std(dim=-1, keepdim=True)\n        stu_feat = stu_feat * tea_std + tea_mean\n        return stu_feat.reshape(C, N, H, W).permute(1, 0, 2, 3)\n    \n    def loss_by_feat(\n            self,\n            tea_cls_scores: List[Tensor],\n            tea_bbox_preds: List[Tensor],\n            tea_centernesses: List[Tensor],\n            tea_feats: List[Tensor],\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            centernesses: List[Tensor],\n            feats: List[Tensor],\n            reused_cls_scores: List[Tensor],\n            reused_bbox_preds: List[Tensor],\n            reused_centernesses: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.bbox_head.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.bbox_head.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = self.bbox_head.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_reg_targets\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n        losses_cls, losses_bbox, loss_centerness, \\\n            bbox_avg_factor = multi_apply(\n                self.bbox_head.loss_by_feat_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                centernesses,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                avg_factor=avg_factor)\n\n        bbox_avg_factor = sum(bbox_avg_factor)\n        bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n        losses = dict(loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness)\n\n        losses_cls_kd, losses_reg_kd, losses_center_kd = multi_apply(\n            self.pred_imitation_loss_single,\n            labels_list,\n            anchor_list,\n            tea_cls_scores,\n            tea_bbox_preds,\n            tea_centernesses,\n            reused_cls_scores,\n            reused_bbox_preds,\n            reused_centernesses,\n            label_weights_list,\n            avg_factor=avg_factor)\n        losses.update(dict(loss_cls_kd=losses_cls_kd, loss_reg_kd=losses_reg_kd, losses_center_kd=losses_center_kd))\n        \n        if self.with_feat_distill:\n            losses_feat_kd = [\n                self.loss_feat_kd(feat, tea_feat)\n                for feat, tea_feat in zip(feats, tea_feats)\n            ]\n            losses.update(loss_feat_kd=losses_feat_kd)\n        return losses\n    \n    \n    def pred_imitation_loss_single(self, \n                                   labels,\n                                   anchors,\n                                   tea_cls_score, \n                                   tea_bbox_pred,\n                                   tea_centernesses,\n                                   reused_cls_score, \n                                   reused_bbox_pred,\n                                   reused_centernesses,\n                                   label_weights, \n                                   avg_factor):\n        # classification branch distillation\n        tea_cls_score = tea_cls_score.permute(0, 2, 3, 1).reshape(-1, self.bbox_head.cls_out_channels)\n        reused_cls_score = reused_cls_score.permute(0, 2, 3, 1).reshape(-1, self.bbox_head.cls_out_channels)\n        label_weights = label_weights.reshape(-1)\n        loss_cls_kd = self.loss_cls_kd(\n            reused_cls_score,\n            tea_cls_score,\n            label_weights,\n            avg_factor=avg_factor)\n\n        # regression branch distillation\n        bbox_coder = self.bbox_head.bbox_coder\n        tea_bbox_pred = tea_bbox_pred.permute(0, 2, 3, 1).reshape(-1, bbox_coder.encode_size)\n        reused_bbox_pred = reused_bbox_pred.permute(0, 2, 3, 1).reshape(-1, bbox_coder.encode_size)\n        anchors = anchors.reshape(-1, anchors.size(-1))\n        tea_bbox_pred = bbox_coder.decode(anchors, tea_bbox_pred)\n        reused_bbox_pred = bbox_coder.decode(anchors, reused_bbox_pred)\n        \n        reg_weights = tea_cls_score.max(dim=1)[0].sigmoid()\n        reg_weights[label_weights == 0] = 0\n\n        loss_reg_kd = self.loss_reg_kd(\n            reused_bbox_pred,\n            tea_bbox_pred,\n            weight=reg_weights,\n            avg_factor=avg_factor)\n        \n        # centernesses branch distillation\n        labels = labels.reshape(-1)\n        bg_class_ind = self.bbox_head.num_classes\n        pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)\n        tea_centernesses = tea_centernesses.permute(0, 2, 3, 1).reshape(-1)\n        reused_centernesses = reused_centernesses.permute(0, 2, 3, 1).reshape(-1)\n\n        if len(pos_inds) > 0:\n            loss_center_kd = self.loss_center_kd(\n                reused_centernesses[pos_inds],\n                tea_centernesses[pos_inds].sigmoid(),\n                avg_factor=avg_factor)\n        else:\n            loss_center_kd = reused_centernesses.new_tensor(0.)\n        return loss_cls_kd, loss_reg_kd, loss_center_kd"
  },
  {
    "path": "mmdet/models/detectors/crosskd_fcos.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Union\n\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import (InstanceList, OptInstanceList, reduce_mean)\nfrom ..utils import multi_apply, unpack_gt_instances\nfrom .crosskd_single_stage import CrossKDSingleStageDetector\n\nINF = 1e8\n\n@MODELS.register_module()\nclass CrossKDFCOS(CrossKDSingleStageDetector):\n    \n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        tea_x = self.teacher.extract_feat(batch_inputs)\n        tea_cls_scores, tea_bbox_preds, tea_centernesses, tea_cls_hold, tea_reg_hold = \\\n            multi_apply(self.forward_hkd_single, \n                        tea_x, \n                        self.teacher.bbox_head.scales,\n                        self.teacher.bbox_head.strides, \n                        module=self.teacher)\n        stu_x = self.extract_feat(batch_inputs)\n        stu_cls_scores, stu_bbox_preds,stu_centernesses, stu_cls_hold, stu_reg_hold = \\\n            multi_apply(self.forward_hkd_single, \n                        stu_x,\n                        self.bbox_head.scales,\n                        self.bbox_head.strides,  \n                        module=self)\n        reused_cls_scores, reused_bbox_preds, reused_centernesses = multi_apply(\n            self.reuse_teacher_head, \n            tea_cls_hold, \n            tea_reg_hold, \n            stu_cls_hold,\n            stu_reg_hold,\n            self.teacher.bbox_head.scales,\n            self.teacher.bbox_head.strides)\n\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n        losses = self.loss_by_feat(tea_cls_scores, \n                                   tea_bbox_preds,\n                                   tea_centernesses,\n                                   tea_x,\n                                   stu_cls_scores,\n                                   stu_bbox_preds,\n                                   stu_centernesses,\n                                   stu_x,\n                                   reused_cls_scores,\n                                   reused_bbox_preds,\n                                   reused_centernesses,\n                                   batch_gt_instances,\n                                   batch_img_metas, \n                                   batch_gt_instances_ignore)\n        return losses\n    \n    def forward_hkd_single(self, x, scale, stride, module):\n        cls_feat, reg_feat = x, x\n        cls_feat_hold, reg_feat_hold = x, x\n        for i, cls_conv in enumerate(module.bbox_head.cls_convs):\n            cls_feat = cls_conv(cls_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                cls_feat_hold = cls_feat\n            cls_feat = cls_conv.activate(cls_feat)\n        for i, reg_conv in enumerate(module.bbox_head.reg_convs):\n            reg_feat = reg_conv(reg_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                reg_feat_hold = reg_feat\n            reg_feat = reg_conv.activate(reg_feat)\n        cls_score = module.bbox_head.conv_cls(cls_feat)\n        bbox_pred = scale(module.bbox_head.conv_reg(reg_feat)).float()\n        if module.bbox_head.centerness_on_reg:\n            centerness = module.bbox_head.conv_centerness(reg_feat)\n        else:\n            centerness = module.bbox_head.conv_centerness(cls_feat)\n        if module.bbox_head.norm_on_bbox:\n            # bbox_pred needed for gradient computation has been modified\n            # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n            # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n            bbox_pred = bbox_pred.clamp(min=0)\n            if not module.bbox_head.training:\n                bbox_pred *= stride\n        else:\n            bbox_pred = bbox_pred.exp()\n        return cls_score, bbox_pred, centerness, cls_feat_hold, reg_feat_hold\n    \n    def reuse_teacher_head(self, tea_cls_feat, tea_reg_feat, stu_cls_feat, stu_reg_feat, scale, stride):\n        reused_cls_feat = self.align_scale(stu_cls_feat, tea_cls_feat)\n        reused_reg_feat = self.align_scale(stu_reg_feat, tea_reg_feat)\n        if self.reused_teacher_head_idx != 0:\n            reused_cls_feat = F.relu(reused_cls_feat)\n            reused_reg_feat = F.relu(reused_reg_feat)\n\n        module = self.teacher.bbox_head\n        for i in range(self.reused_teacher_head_idx, module.stacked_convs):\n            reused_cls_feat = module.cls_convs[i](reused_cls_feat)\n            reused_reg_feat = module.reg_convs[i](reused_reg_feat)\n        reused_cls_score = module.conv_cls(reused_cls_feat)\n        reused_bbox_pred = scale(module.conv_reg(reused_reg_feat)).float()\n        if module.centerness_on_reg:\n            reused_centerness = module.conv_centerness(reused_reg_feat)\n        else:\n            reused_centerness = module.conv_centerness(reused_cls_feat)\n        if module.norm_on_bbox:\n            # bbox_pred needed for gradient computation has been modified\n            # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n            # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n            reused_bbox_pred = reused_bbox_pred.clamp(min=0)\n            if not module.training:\n                reused_bbox_pred *= stride\n        else:\n            reused_bbox_pred = reused_bbox_pred.exp()\n        return reused_cls_score, reused_bbox_pred, reused_centerness\n    \n    def align_scale(self, stu_feat, tea_feat):\n        N, C, H, W = stu_feat.size()\n        # normalize student feature\n        stu_feat = stu_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        stu_mean = stu_feat.mean(dim=-1, keepdim=True)\n        stu_std = stu_feat.std(dim=-1, keepdim=True)\n        stu_feat = (stu_feat - stu_mean) / (stu_std + 1e-6)\n        #\n        tea_feat = tea_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        tea_mean = tea_feat.mean(dim=-1, keepdim=True)\n        tea_std = tea_feat.std(dim=-1, keepdim=True)\n        stu_feat = stu_feat * tea_std + tea_mean\n        return stu_feat.reshape(C, N, H, W).permute(1, 0, 2, 3)\n    \n    def loss_by_feat(\n            self,\n            tea_cls_scores: List[Tensor],\n            tea_bbox_preds: List[Tensor],\n            tea_centernesses: List[Tensor],\n            tea_feats: List[Tensor],\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            centernesses: List[Tensor],\n            feats: List[Tensor],\n            reused_cls_scores: List[Tensor],\n            reused_bbox_preds: List[Tensor],\n            reused_centernesses: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(centernesses)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.bbox_head.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        labels, bbox_targets = self.bbox_head.get_targets(all_level_points,\n                                                batch_gt_instances)\n\n        num_imgs = cls_scores[0].size(0)\n        # flatten cls_scores, bbox_preds and centerness\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.bbox_head.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_centerness = [\n            centerness.permute(0, 2, 3, 1).reshape(-1)\n            for centerness in centernesses\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_centerness = torch.cat(flatten_centerness)\n        flatten_labels = torch.cat(labels)\n        flatten_bbox_targets = torch.cat(bbox_targets)\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.bbox_head.num_classes\n        pos_inds = ((flatten_labels >= 0)\n                    & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)\n        num_pos = torch.tensor(\n            len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)\n        num_pos = max(reduce_mean(num_pos), 1.0)\n        loss_cls = self.bbox_head.loss_cls(\n            flatten_cls_scores, flatten_labels, avg_factor=num_pos)\n\n        pos_bbox_preds = flatten_bbox_preds[pos_inds]\n        pos_centerness = flatten_centerness[pos_inds]\n        pos_bbox_targets = flatten_bbox_targets[pos_inds]\n        pos_centerness_targets = self.bbox_head.centerness_target(pos_bbox_targets)\n        # centerness weighted iou loss\n        pos_centerness_denorm = max(\n            reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)\n\n        if len(pos_inds) > 0:\n            pos_points = flatten_points[pos_inds]\n            pos_decoded_bbox_preds = self.bbox_head.bbox_coder.decode(\n                pos_points, pos_bbox_preds)\n            pos_decoded_target_preds = self.bbox_head.bbox_coder.decode(\n                pos_points, pos_bbox_targets)\n            loss_bbox = self.bbox_head.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds,\n                weight=pos_centerness_targets,\n                avg_factor=pos_centerness_denorm)\n            loss_centerness = self.bbox_head.loss_centerness(\n                pos_centerness, pos_centerness_targets, avg_factor=num_pos)\n        else:\n            loss_bbox = pos_bbox_preds.sum()\n            loss_centerness = pos_centerness.sum()\n        \n        # flatten tea_cls_scores, tea_bbox_preds and tea_centernesses\n        flatten_tea_cls_scores = [\n            tea_cls_scores.permute(0, 2, 3, 1).reshape(-1, self.bbox_head.cls_out_channels)\n            for tea_cls_scores in tea_cls_scores\n        ]\n        flatten_tea_bbox_preds = [\n            tea_bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for tea_bbox_pred in tea_bbox_preds\n        ]\n        flatten_tea_centernesses = [\n            tea_centerness.permute(0, 2, 3, 1).reshape(-1, 1)\n            for tea_centerness in tea_centernesses\n        ]\n        flatten_tea_cls_scores = torch.cat(flatten_tea_cls_scores)\n        flatten_tea_bbox_preds = torch.cat(flatten_tea_bbox_preds)\n        flatten_tea_centernesses = torch.cat(flatten_tea_centernesses)\n        \n        # flatten reused_cls_scores, reused_bbox_preds and reused_centernesses\n        flatten_reused_cls_scores = [\n            reused_cls_score.permute(0, 2, 3, 1).reshape(-1, self.bbox_head.cls_out_channels)\n            for reused_cls_score in reused_cls_scores\n        ]\n        flatten_reused_bbox_preds = [\n            reused_bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for reused_bbox_pred in reused_bbox_preds\n        ]\n        flatten_reused_centernesses = [\n            reused_centerness.permute(0, 2, 3, 1).reshape(-1, 1)\n            for reused_centerness in reused_centernesses\n        ]\n        flatten_reused_cls_scores = torch.cat(flatten_reused_cls_scores)\n        flatten_reused_bbox_preds = torch.cat(flatten_reused_bbox_preds)\n        flatten_reused_centernesses = torch.cat(flatten_reused_centernesses)\n        \n        losses_cls_kd = self.loss_cls_kd(flatten_reused_cls_scores, \n                                         flatten_tea_cls_scores, \n                                         avg_factor=pos_centerness_denorm)\n        \n        flatten_tea_bbox_preds = self.bbox_head.bbox_coder.decode(\n                flatten_points, flatten_tea_bbox_preds)\n        flatten_reused_bbox_preds = self.bbox_head.bbox_coder.decode(\n                flatten_points, flatten_reused_bbox_preds)\n        \n        reg_weights = flatten_tea_cls_scores.max(dim=1)[0].sigmoid()\n\n        losses_reg_kd = self.loss_reg_kd(flatten_reused_bbox_preds,\n                                        flatten_tea_bbox_preds,\n                                        weight=reg_weights, \n                                        avg_factor=pos_centerness_denorm)\n        losses = dict(loss_cls=loss_cls,\n                      loss_bbox=loss_bbox,\n                      loss_centerness=loss_centerness,\n                      loss_cls_kd=losses_cls_kd,\n                      loss_reg_kd=losses_reg_kd)\n\n        if self.with_feat_distill:\n            losses_feat_kd = [\n                self.loss_feat_kd(feat, tea_feat)\n                for feat, tea_feat in zip(feats, tea_feats)\n            ]\n            for i, loss in enumerate(losses_feat_kd):\n                losses.update({\"loss_feat_kd_{}\".format(i):loss})\n        return losses"
  },
  {
    "path": "mmdet/models/detectors/crosskd_gfl.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList, OptInstanceList, reduce_mean\n\nfrom ..utils import multi_apply, unpack_gt_instances\nfrom .crosskd_single_stage import CrossKDSingleStageDetector\n\n\n@MODELS.register_module()\nclass CrossKDGFL(CrossKDSingleStageDetector):\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        tea_x = self.teacher.extract_feat(batch_inputs)\n        tea_cls_scores, tea_bbox_preds, tea_cls_hold, tea_reg_hold = \\\n            multi_apply(self.forward_crosskd_single, tea_x,\n                        self.teacher.bbox_head.scales, module=self.teacher)\n        stu_x = self.extract_feat(batch_inputs)\n        stu_cls_scores, stu_bbox_preds, stu_cls_hold, stu_reg_hold = \\\n            multi_apply(self.forward_crosskd_single, stu_x,\n                        self.bbox_head.scales, module=self)\n        reused_cls_scores, reused_bbox_preds = multi_apply(\n            self.reuse_teacher_head, tea_cls_hold, tea_reg_hold, stu_cls_hold,\n            stu_reg_hold, self.teacher.bbox_head.scales)\n\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n        losses = self.loss_by_feat(tea_cls_scores, tea_bbox_preds, tea_x,\n                                   stu_cls_scores, stu_bbox_preds, stu_x,\n                                   reused_cls_scores, reused_bbox_preds,\n                                   batch_gt_instances, batch_img_metas,\n                                   batch_gt_instances_ignore)\n        return losses\n\n    def forward_crosskd_single(self, x, scale, module):\n        cls_feat, reg_feat = x, x\n        cls_feat_hold, reg_feat_hold = x, x\n        for i, cls_conv in enumerate(module.bbox_head.cls_convs):\n            cls_feat = cls_conv(cls_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                cls_feat_hold = cls_feat\n            cls_feat = cls_conv.activate(cls_feat)\n        for i, reg_conv in enumerate(module.bbox_head.reg_convs):\n            reg_feat = reg_conv(reg_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                reg_feat_hold = reg_feat\n            reg_feat = reg_conv.activate(reg_feat)\n        cls_score = module.bbox_head.gfl_cls(cls_feat)\n        bbox_pred = scale(module.bbox_head.gfl_reg(reg_feat)).float()\n        return cls_score, bbox_pred, cls_feat_hold, reg_feat_hold\n\n    def reuse_teacher_head(self, tea_cls_feat, tea_reg_feat, stu_cls_feat,\n                           stu_reg_feat, scale):\n        reused_cls_feat = self.align_scale(stu_cls_feat, tea_cls_feat)\n        reused_reg_feat = self.align_scale(stu_reg_feat, tea_reg_feat)\n        if self.reused_teacher_head_idx != 0:\n            reused_cls_feat = F.relu(reused_cls_feat)\n            reused_reg_feat = F.relu(reused_reg_feat)\n\n        module = self.teacher.bbox_head\n        for i in range(self.reused_teacher_head_idx, module.stacked_convs):\n            reused_cls_feat = module.cls_convs[i](reused_cls_feat)\n            reused_reg_feat = module.reg_convs[i](reused_reg_feat)\n        reused_cls_score = module.gfl_cls(reused_cls_feat)\n        reused_bbox_pred = scale(module.gfl_reg(reused_reg_feat)).float()\n        return reused_cls_score, reused_bbox_pred\n\n    def align_scale(self, stu_feat, tea_feat):\n        N, C, H, W = stu_feat.size()\n        # normalize student feature\n        stu_feat = stu_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        stu_mean = stu_feat.mean(dim=-1, keepdim=True)\n        stu_std = stu_feat.std(dim=-1, keepdim=True)\n        stu_feat = (stu_feat - stu_mean) / (stu_std + 1e-6)\n        #\n        tea_feat = tea_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        tea_mean = tea_feat.mean(dim=-1, keepdim=True)\n        tea_std = tea_feat.std(dim=-1, keepdim=True)\n        stu_feat = stu_feat * tea_std + tea_mean\n        return stu_feat.reshape(C, N, H, W).permute(1, 0, 2, 3)\n\n    def loss_by_feat(\n            self,\n            tea_cls_scores: List[Tensor],\n            tea_bbox_preds: List[Tensor],\n            tea_feats: List[Tensor],\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            feats: List[Tensor],\n            reused_cls_scores: List[Tensor],\n            reused_bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.bbox_head.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.bbox_head.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n\n        cls_reg_targets = self.bbox_head.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, avg_factor) = cls_reg_targets\n\n        avg_factor = reduce_mean(\n            torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n        losses_cls, losses_bbox, losses_dfl,\\\n            new_avg_factor = multi_apply(\n                self.bbox_head.loss_by_feat_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                self.bbox_head.prior_generator.strides,\n                avg_factor=avg_factor)\n\n        new_avg_factor = sum(new_avg_factor)\n        new_avg_factor = reduce_mean(new_avg_factor).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / new_avg_factor, losses_bbox))\n        losses_dfl = list(map(lambda x: x / new_avg_factor, losses_dfl))\n        losses = dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)\n\n        losses_cls_kd, losses_reg_kd, kd_avg_factor = multi_apply(\n            self.pred_mimicking_loss_single,\n            tea_cls_scores,\n            tea_bbox_preds,\n            reused_cls_scores,\n            reused_bbox_preds,\n            label_weights_list,\n            avg_factor=avg_factor)\n        kd_avg_factor = sum(kd_avg_factor)\n        losses_reg_kd = list(map(lambda x: x / kd_avg_factor, losses_reg_kd))\n        losses.update(\n            dict(loss_cls_kd=losses_cls_kd, loss_reg_kd=losses_reg_kd))\n\n        if self.with_feat_distill:\n            losses_feat_kd = [\n                self.loss_feat_kd(feat, tea_feat)\n                for feat, tea_feat in zip(feats, tea_feats)\n            ]\n            losses.update(loss_feat_kd=losses_feat_kd)\n        return losses\n\n    def pred_mimicking_loss_single(self, tea_cls_score, tea_bbox_pred,\n                                   reused_cls_score, reused_bbox_pred,\n                                   label_weights, avg_factor):\n        # classification branch distillation\n        tea_cls_score = tea_cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.bbox_head.cls_out_channels)\n        reused_cls_score = reused_cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.bbox_head.cls_out_channels)\n        label_weights = label_weights.reshape(-1)\n        loss_cls_kd = self.loss_cls_kd(\n            reused_cls_score,\n            tea_cls_score,\n            label_weights,\n            avg_factor=avg_factor)\n\n        # regression branch distillation\n        reg_max = self.bbox_head.reg_max\n        tea_bbox_pred = tea_bbox_pred.permute(0, 2, 3,\n                                              1).reshape(-1, reg_max + 1)\n        reused_bbox_pred = reused_bbox_pred.permute(0, 2, 3, 1).reshape(\n            -1, reg_max + 1)\n        reg_weights = tea_cls_score.max(dim=1)[0].sigmoid()\n        reg_weights[label_weights == 0] = 0\n        loss_reg_kd = self.loss_reg_kd(\n            reused_bbox_pred,\n            tea_bbox_pred,\n            weight=reg_weights[:, None].expand(-1, 4).reshape(-1),\n            avg_factor=4.0)\n\n        return loss_cls_kd, loss_reg_kd, reg_weights.sum()"
  },
  {
    "path": "mmdet/models/detectors/crosskd_retinanet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Union\n\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import cat_boxes\nfrom mmdet.utils import InstanceList, OptInstanceList\n\nfrom ..utils import images_to_levels, multi_apply, unpack_gt_instances\nfrom .crosskd_single_stage import CrossKDSingleStageDetector\n\n\n@MODELS.register_module()\nclass CrossKDRetinaNet(CrossKDSingleStageDetector):\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        tea_x = self.teacher.extract_feat(batch_inputs)\n        tea_cls_scores, tea_bbox_preds, tea_cls_hold, tea_reg_hold = \\\n            multi_apply(self.forward_crosskd_single, tea_x, module=self.teacher)\n        stu_x = self.extract_feat(batch_inputs)\n        stu_cls_scores, stu_bbox_preds, stu_cls_hold, stu_reg_hold = \\\n            multi_apply(self.forward_crosskd_single, stu_x, module=self)\n        reused_cls_scores, reused_bbox_preds = multi_apply(\n            self.reuse_teacher_head, tea_cls_hold, tea_reg_hold, stu_cls_hold,\n            stu_reg_hold)\n\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n        losses = self.loss_by_feat(tea_cls_scores, tea_bbox_preds, tea_x,\n                                   stu_cls_scores, stu_bbox_preds, stu_x,\n                                   reused_cls_scores, reused_bbox_preds,\n                                   batch_gt_instances, batch_img_metas,\n                                   batch_gt_instances_ignore)\n        return losses\n\n    def forward_crosskd_single(self, x, module):\n        cls_feat, reg_feat = x, x\n        cls_feat_hold, reg_feat_hold = x, x\n        for i, cls_conv in enumerate(module.bbox_head.cls_convs):\n            cls_feat = cls_conv(cls_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                cls_feat_hold = cls_feat\n            cls_feat = cls_conv.activate(cls_feat)\n        for i, reg_conv in enumerate(module.bbox_head.reg_convs):\n            reg_feat = reg_conv(reg_feat, activate=False)\n            if i + 1 == self.reused_teacher_head_idx:\n                reg_feat_hold = reg_feat\n            reg_feat = reg_conv.activate(reg_feat)\n        cls_score = module.bbox_head.retina_cls(cls_feat)\n        bbox_pred = module.bbox_head.retina_reg(reg_feat)\n        return cls_score, bbox_pred, cls_feat_hold, reg_feat_hold\n\n    def reuse_teacher_head(self, tea_cls_feat, tea_reg_feat, stu_cls_feat,\n                           stu_reg_feat):\n        reused_cls_feat = self.align_scale(stu_cls_feat, tea_cls_feat)\n        reused_reg_feat = self.align_scale(stu_reg_feat, tea_reg_feat)\n        if self.reused_teacher_head_idx != 0:\n            reused_cls_feat = F.relu(reused_cls_feat)\n            reused_reg_feat = F.relu(reused_reg_feat)\n\n        module = self.teacher.bbox_head\n        for i in range(self.reused_teacher_head_idx, module.stacked_convs):\n            reused_cls_feat = module.cls_convs[i](reused_cls_feat)\n            reused_reg_feat = module.reg_convs[i](reused_reg_feat)\n        reused_cls_score = module.retina_cls(reused_cls_feat)\n        reused_bbox_pred = module.retina_reg(reused_reg_feat)\n        return reused_cls_score, reused_bbox_pred\n\n    def align_scale(self, stu_feat, tea_feat):\n        N, C, H, W = stu_feat.size()\n        # normalize student feature\n        stu_feat = stu_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        stu_mean = stu_feat.mean(dim=-1, keepdim=True)\n        stu_std = stu_feat.std(dim=-1, keepdim=True)\n        stu_feat = (stu_feat - stu_mean) / (stu_std + 1e-6)\n        #\n        tea_feat = tea_feat.permute(1, 0, 2, 3).reshape(C, -1)\n        tea_mean = tea_feat.mean(dim=-1, keepdim=True)\n        tea_std = tea_feat.std(dim=-1, keepdim=True)\n        stu_feat = stu_feat * tea_std + tea_mean\n        return stu_feat.reshape(C, N, H, W).permute(1, 0, 2, 3)\n\n    def loss_by_feat(\n            self,\n            tea_cls_scores: List[Tensor],\n            tea_bbox_preds: List[Tensor],\n            tea_feats: List[Tensor],\n            cls_scores: List[Tensor],\n            bbox_preds: List[Tensor],\n            feats: List[Tensor],\n            reused_cls_scores: List[Tensor],\n            reused_bbox_preds: List[Tensor],\n            batch_gt_instances: InstanceList,\n            batch_img_metas: List[dict],\n            batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the detection\n        head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                has shape (N, num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            batch_img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == \\\n            self.bbox_head.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.bbox_head.get_anchors(\n            featmap_sizes, batch_img_metas, device=device)\n        cls_reg_targets = self.bbox_head.get_targets(\n            anchor_list,\n            valid_flag_list,\n            batch_gt_instances,\n            batch_img_metas,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         avg_factor) = cls_reg_targets\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(cat_boxes(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        losses_cls, losses_bbox = multi_apply(\n            self.bbox_head.loss_by_feat_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            avg_factor=avg_factor)\n        losses = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n        losses_cls_kd, losses_reg_kd = multi_apply(\n            self.pred_mimicking_loss_single,\n            tea_cls_scores,\n            tea_bbox_preds,\n            reused_cls_scores,\n            reused_bbox_preds,\n            all_anchor_list,\n            label_weights_list,\n            avg_factor=avg_factor)\n        losses.update(\n            dict(loss_cls_kd=losses_cls_kd, loss_reg_kd=losses_reg_kd))\n\n        if self.with_feat_distill:\n            losses_feat_kd = [\n                self.loss_feat_kd(feat, tea_feat)\n                for feat, tea_feat in zip(feats, tea_feats)\n            ]\n            losses.update(loss_feat_kd=losses_feat_kd)\n        return losses\n\n    def pred_mimicking_loss_single(self, tea_cls_score, tea_bbox_pred,\n                                   reused_cls_score, reused_bbox_pred, anchors,\n                                   label_weights, avg_factor):\n        # classification branch distillation\n        tea_cls_score = tea_cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.bbox_head.cls_out_channels)\n        reused_cls_score = reused_cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.bbox_head.cls_out_channels)\n        label_weights = label_weights.reshape(-1)\n        loss_cls_kd = self.loss_cls_kd(\n            reused_cls_score,\n            tea_cls_score,\n            label_weights,\n            avg_factor=avg_factor)\n        # regression branch distillation\n        bbox_coder = self.bbox_head.bbox_coder\n        tea_bbox_pred = tea_bbox_pred.permute(0, 2, 3, 1).reshape(\n            -1, bbox_coder.encode_size)\n        reused_bbox_pred = reused_bbox_pred.permute(0, 2, 3, 1).reshape(\n            -1, bbox_coder.encode_size)\n        anchors = anchors.reshape(-1, anchors.size(-1))\n        tea_bbox_pred = bbox_coder.decode(anchors, tea_bbox_pred)\n        reused_bbox_pred = bbox_coder.decode(anchors, reused_bbox_pred)\n        reg_weights = tea_cls_score.max(dim=1)[0].sigmoid()\n        reg_weights[label_weights == 0] = 0\n        loss_reg_kd = self.loss_reg_kd(\n            reused_bbox_pred,\n            tea_bbox_pred,\n            reg_weights,\n            avg_factor=avg_factor)\n        return loss_cls_kd, loss_reg_kd"
  },
  {
    "path": "mmdet/models/detectors/crosskd_single_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.config import Config\nfrom mmengine.runner import load_checkpoint\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import cat_boxes\nfrom mmdet.utils import (ConfigType, InstanceList, OptConfigType,\n                         OptInstanceList, reduce_mean)\nfrom ..utils import images_to_levels, multi_apply, unpack_gt_instances\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass CrossKDSingleStageDetector(SingleStageDetector):\n    r\"\"\"Implementation of `Distilling the Knowledge in a Neural Network.\n    <https://arxiv.org/abs/1503.02531>`_.\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file\n            path or the config object of teacher model.\n        teacher_ckpt (str, optional): Checkpoint path of teacher model.\n            If left as None, the model will not load any weights.\n            Defaults to True.\n        eval_teacher (bool): Set the train mode for teacher.\n            Defaults to True.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of ATSS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of ATSS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(\n        self,\n        backbone: ConfigType,\n        neck: ConfigType,\n        bbox_head: ConfigType,\n        teacher_config: Union[ConfigType, str, Path],\n        teacher_ckpt: Optional[str] = None,\n        kd_cfg: OptConfigType = None,\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        data_preprocessor: OptConfigType = None,\n    ) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor)\n        # Build teacher model\n        if isinstance(teacher_config, (str, Path)):\n            teacher_config = Config.fromfile(teacher_config)\n        self.teacher = MODELS.build(teacher_config['model'])\n        if teacher_ckpt is not None:\n            load_checkpoint(self.teacher, teacher_ckpt, map_location='cpu')\n        # In order to reforward teacher model,\n        # set requires_grad of teacher model to False\n        self.freeze(self.teacher)\n        self.loss_cls_kd = MODELS.build(kd_cfg['loss_cls_kd'])\n        self.loss_reg_kd = MODELS.build(kd_cfg['loss_reg_kd'])\n        self.with_feat_distill = False\n        if kd_cfg.get('loss_feat_kd', None):\n            self.loss_feat_kd = MODELS.build(kd_cfg['loss_feat_kd'])\n            self.with_feat_distill = True\n        self.reused_teacher_head_idx = kd_cfg['reused_teacher_head_idx']\n\n    @staticmethod\n    def freeze(model: nn.Module):\n        \"\"\"Freeze the model.\"\"\"\n        model.eval()\n        for param in model.parameters():\n            param.requires_grad = False\n\n    def cuda(self, device: Optional[str] = None) -> nn.Module:\n        \"\"\"Since teacher is registered as a plain object, it is necessary to\n        put the teacher model to cuda when calling ``cuda`` function.\"\"\"\n        self.teacher.cuda(device=device)\n        return super().cuda(device=device)\n\n    def to(self, device: Optional[str] = None) -> nn.Module:\n        \"\"\"Since teacher is registered as a plain object, it is necessary to\n        put the teacher model to other device when calling ``to`` function.\"\"\"\n        self.teacher.to(device=device)\n        return super().to(device=device)\n\n    def train(self, mode: bool = True) -> None:\n        \"\"\"Set the same train mode for teacher and student model.\"\"\"\n        self.teacher.train(False)\n        super().train(mode)\n\n    def __setattr__(self, name: str, value: Any) -> None:\n        \"\"\"Set attribute, i.e. self.name = value\n\n        This reloading prevent the teacher model from being registered as a\n        nn.Module. The teacher module is registered as a plain object, so that\n        the teacher parameters will not show up when calling\n        ``self.parameters``, ``self.modules``, ``self.children`` methods.\n        \"\"\"\n        if name == 'teacher':\n            object.__setattr__(self, name, value)\n        else:\n            super().__setattr__(name, value)\n"
  },
  {
    "path": "mmdet/models/detectors/crowddet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass CrowdDet(TwoStageDetector):\n    \"\"\"Implementation of `CrowdDet <https://arxiv.org/abs/2003.09163>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        rpn_head (:obj:`ConfigDict` or dict): The rpn config.\n        roi_head (:obj:`ConfigDict` or dict): The roi config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of FCOS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of FCOS. Defaults to None.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 rpn_head: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            data_preprocessor=data_preprocessor)\n"
  },
  {
    "path": "mmdet/models/detectors/d2_wrapper.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Union\n\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import BaseBoxes\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\nfrom mmdet.utils import ConfigType\nfrom .base import BaseDetector\n\ntry:\n    import detectron2\n    from detectron2.config import get_cfg\n    from detectron2.modeling import build_model\n    from detectron2.structures.masks import BitMasks as D2_BitMasks\n    from detectron2.structures.masks import PolygonMasks as D2_PolygonMasks\n    from detectron2.utils.events import EventStorage\nexcept ImportError:\n    detectron2 = None\n\n\ndef _to_cfgnode_list(cfg: ConfigType,\n                     config_list: list = [],\n                     father_name: str = 'MODEL') -> tuple:\n    \"\"\"Convert the key and value of mmengine.ConfigDict into a list.\n\n    Args:\n        cfg (ConfigDict): The detectron2 model config.\n        config_list (list): A list contains the key and value of ConfigDict.\n            Defaults to [].\n        father_name (str): The father name add before the key.\n            Defaults to \"MODEL\".\n\n    Returns:\n        tuple:\n\n        - config_list: A list contains the key and value of ConfigDict.\n        - father_name (str): The father name add before the key.\n          Defaults to \"MODEL\".\n    \"\"\"\n    for key, value in cfg.items():\n        name = f'{father_name}.{key.upper()}'\n        if isinstance(value, ConfigDict) or isinstance(value, dict):\n            config_list, fater_name = \\\n                _to_cfgnode_list(value, config_list, name)\n        else:\n            config_list.append(name)\n            config_list.append(value)\n\n    return config_list, father_name\n\n\ndef convert_d2_pred_to_datasample(data_samples: SampleList,\n                                  d2_results_list: list) -> SampleList:\n    \"\"\"Convert the Detectron2's result to DetDataSample.\n\n    Args:\n        data_samples (list[:obj:`DetDataSample`]): The batch\n            data samples. It usually includes information such\n            as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n        d2_results_list (list): The list of the results of Detectron2's model.\n\n    Returns:\n        list[:obj:`DetDataSample`]: Detection results of the\n        input images. Each DetDataSample usually contain\n        'pred_instances'. And the ``pred_instances`` usually\n        contains following keys.\n\n        - scores (Tensor): Classification scores, has a shape\n          (num_instance, )\n        - labels (Tensor): Labels of bboxes, has a shape\n          (num_instances, ).\n        - bboxes (Tensor): Has a shape (num_instances, 4),\n          the last dimension 4 arrange as (x1, y1, x2, y2).\n    \"\"\"\n    assert len(data_samples) == len(d2_results_list)\n    for data_sample, d2_results in zip(data_samples, d2_results_list):\n        d2_instance = d2_results['instances']\n\n        results = InstanceData()\n        results.bboxes = d2_instance.pred_boxes.tensor\n        results.scores = d2_instance.scores\n        results.labels = d2_instance.pred_classes\n\n        if d2_instance.has('pred_masks'):\n            results.masks = d2_instance.pred_masks\n        data_sample.pred_instances = results\n\n    return data_samples\n\n\n@MODELS.register_module()\nclass Detectron2Wrapper(BaseDetector):\n    \"\"\"Wrapper of a Detectron2 model. Input/output formats of this class follow\n    MMDetection's convention, so a Detectron2 model can be trained and\n    evaluated in MMDetection.\n\n    Args:\n        detector (:obj:`ConfigDict` or dict): The module config of\n            Detectron2.\n        bgr_to_rgb (bool): whether to convert image from BGR to RGB.\n            Defaults to False.\n        rgb_to_bgr (bool): whether to convert image from RGB to BGR.\n            Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 detector: ConfigType,\n                 bgr_to_rgb: bool = False,\n                 rgb_to_bgr: bool = False) -> None:\n        if detectron2 is None:\n            raise ImportError('Please install Detectron2 first')\n        assert not (bgr_to_rgb and rgb_to_bgr), (\n            '`bgr2rgb` and `rgb2bgr` cannot be set to True at the same time')\n        super().__init__()\n        self._channel_conversion = rgb_to_bgr or bgr_to_rgb\n        cfgnode_list, _ = _to_cfgnode_list(detector)\n        self.cfg = get_cfg()\n        self.cfg.merge_from_list(cfgnode_list)\n        self.d2_model = build_model(self.cfg)\n        self.storage = EventStorage()\n\n    def init_weights(self) -> None:\n        \"\"\"Initialization Backbone.\n\n        NOTE: The initialization of other layers are in Detectron2,\n        if users want to change the initialization way, please\n        change the code in Detectron2.\n        \"\"\"\n        from detectron2.checkpoint import DetectionCheckpointer\n        checkpointer = DetectionCheckpointer(model=self.d2_model)\n        checkpointer.load(self.cfg.MODEL.WEIGHTS, checkpointables=[])\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, tuple]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        The inputs will first convert to the Detectron2 type and feed into\n        D2 models.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        d2_batched_inputs = self._convert_to_d2_inputs(\n            batch_inputs=batch_inputs,\n            batch_data_samples=batch_data_samples,\n            training=True)\n\n        with self.storage as storage:  # noqa\n            losses = self.d2_model(d2_batched_inputs)\n        # storage contains some training information, such as cls_accuracy.\n        # you can use storage.latest() to get the detail information\n        return losses\n\n    def predict(self, batch_inputs: Tensor,\n                batch_data_samples: SampleList) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        The inputs will first convert to the Detectron2 type and feed into\n        D2 models. And the results will convert back to the MMDet type.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances'. And the ``pred_instances`` usually\n            contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        d2_batched_inputs = self._convert_to_d2_inputs(\n            batch_inputs=batch_inputs,\n            batch_data_samples=batch_data_samples,\n            training=False)\n        # results in detectron2 has already rescale\n        d2_results_list = self.d2_model(d2_batched_inputs)\n        batch_data_samples = convert_d2_pred_to_datasample(\n            data_samples=batch_data_samples, d2_results_list=d2_results_list)\n\n        return batch_data_samples\n\n    def _forward(self, *args, **kwargs):\n        \"\"\"Network forward process.\n\n        Usually includes backbone, neck and head forward without any post-\n        processing.\n        \"\"\"\n        raise NotImplementedError(\n            f'`_forward` is not implemented in {self.__class__.__name__}')\n\n    def extract_feat(self, *args, **kwargs):\n        \"\"\"Extract features from images.\n\n        `extract_feat` will not be used in obj:``Detectron2Wrapper``.\n        \"\"\"\n        pass\n\n    def _convert_to_d2_inputs(self,\n                              batch_inputs: Tensor,\n                              batch_data_samples: SampleList,\n                              training=True) -> list:\n        \"\"\"Convert inputs type to support Detectron2's model.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n            training (bool): Whether to enable training time processing.\n\n        Returns:\n            list[dict]: A list of dict, which will be fed into Detectron2's\n            model. And the dict usually contains following keys.\n\n            - image (Tensor): Image in (C, H, W) format.\n            - instances (Instances): GT Instance.\n            - height (int): the output height resolution of the model\n            - width (int): the output width resolution of the model\n        \"\"\"\n        from detectron2.data.detection_utils import filter_empty_instances\n        from detectron2.structures import Boxes, Instances\n\n        batched_d2_inputs = []\n        for image, data_samples in zip(batch_inputs, batch_data_samples):\n            d2_inputs = dict()\n            # deal with metainfo\n            meta_info = data_samples.metainfo\n            d2_inputs['file_name'] = meta_info['img_path']\n            d2_inputs['height'], d2_inputs['width'] = meta_info['ori_shape']\n            d2_inputs['image_id'] = meta_info['img_id']\n            # deal with image\n            if self._channel_conversion:\n                image = image[[2, 1, 0], ...]\n            d2_inputs['image'] = image\n            # deal with gt_instances\n            gt_instances = data_samples.gt_instances\n            d2_instances = Instances(meta_info['img_shape'])\n\n            gt_boxes = gt_instances.bboxes\n            # TODO: use mmdet.structures.box.get_box_tensor after PR 8658\n            #  has merged\n            if isinstance(gt_boxes, BaseBoxes):\n                gt_boxes = gt_boxes.tensor\n            d2_instances.gt_boxes = Boxes(gt_boxes)\n\n            d2_instances.gt_classes = gt_instances.labels\n            if gt_instances.get('masks', None) is not None:\n                gt_masks = gt_instances.masks\n                if isinstance(gt_masks, PolygonMasks):\n                    d2_instances.gt_masks = D2_PolygonMasks(gt_masks.masks)\n                elif isinstance(gt_masks, BitmapMasks):\n                    d2_instances.gt_masks = D2_BitMasks(gt_masks.masks)\n                else:\n                    raise TypeError('The type of `gt_mask` can be '\n                                    '`PolygonMasks` or `BitMasks`, but get '\n                                    f'{type(gt_masks)}.')\n            # convert to cpu and convert back to cuda to avoid\n            # some potential error\n            if training:\n                device = gt_boxes.device\n                d2_instances = filter_empty_instances(\n                    d2_instances.to('cpu')).to(device)\n                d2_inputs['instances'] = d2_instances\n            batched_d2_inputs.append(d2_inputs)\n\n        return batched_d2_inputs\n"
  },
  {
    "path": "mmdet/models/detectors/dab_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, Tuple\n\nfrom mmengine.model import uniform_init\nfrom torch import Tensor, nn\n\nfrom mmdet.registry import MODELS\nfrom ..layers import SinePositionalEncoding\nfrom ..layers.transformer import (DABDetrTransformerDecoder,\n                                  DABDetrTransformerEncoder, inverse_sigmoid)\nfrom .detr import DETR\n\n\n@MODELS.register_module()\nclass DABDETR(DETR):\n    r\"\"\"Implementation of `DAB-DETR:\n    Dynamic Anchor Boxes are Better Queries for DETR.\n\n    <https://arxiv.org/abs/2201.12329>`_.\n\n    Code is modified from the `official github repo\n    <https://github.com/IDEA-Research/DAB-DETR>`_.\n\n    Args:\n        with_random_refpoints (bool): Whether to randomly initialize query\n            embeddings and not update them during training.\n            Defaults to False.\n        num_patterns (int): Inspired by Anchor-DETR. Defaults to 0.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 with_random_refpoints: bool = False,\n                 num_patterns: int = 0,\n                 **kwargs) -> None:\n        self.with_random_refpoints = with_random_refpoints\n        assert isinstance(num_patterns, int), \\\n            f'num_patterns should be int but {num_patterns}.'\n        self.num_patterns = num_patterns\n\n        super().__init__(*args, **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n        self.positional_encoding = SinePositionalEncoding(\n            **self.positional_encoding)\n        self.encoder = DABDetrTransformerEncoder(**self.encoder)\n        self.decoder = DABDetrTransformerDecoder(**self.decoder)\n        self.embed_dims = self.encoder.embed_dims\n        self.query_dim = self.decoder.query_dim\n        self.query_embedding = nn.Embedding(self.num_queries, self.query_dim)\n        if self.num_patterns > 0:\n            self.patterns = nn.Embedding(self.num_patterns, self.embed_dims)\n\n        num_feats = self.positional_encoding.num_feats\n        assert num_feats * 2 == self.embed_dims, \\\n            f'embed_dims should be exactly 2 times of num_feats. ' \\\n            f'Found {self.embed_dims} and {num_feats}.'\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights for Transformer and other components.\"\"\"\n        super(DABDETR, self).init_weights()\n        if self.with_random_refpoints:\n            uniform_init(self.query_embedding)\n            self.query_embedding.weight.data[:, :2] = \\\n                inverse_sigmoid(self.query_embedding.weight.data[:, :2])\n            self.query_embedding.weight.data[:, :2].requires_grad = False\n\n    def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:\n        \"\"\"Prepare intermediate variables before entering Transformer decoder,\n        such as `query`, `query_pos`.\n\n        Args:\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n\n        Returns:\n            tuple[dict, dict]: The first dict contains the inputs of decoder\n            and the second dict contains the inputs of the bbox_head function.\n\n            - decoder_inputs_dict (dict): The keyword args dictionary of\n                `self.forward_decoder()`, which includes 'query', 'query_pos',\n                'memory' and 'reg_branches'.\n            - head_inputs_dict (dict): The keyword args dictionary of the\n                bbox_head functions, which is usually empty, or includes\n                `enc_outputs_class` and `enc_outputs_class` when the detector\n                support 'two stage' or 'query selection' strategies.\n        \"\"\"\n        batch_size = memory.size(0)\n        query_pos = self.query_embedding.weight\n        query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)\n        if self.num_patterns == 0:\n            query = query_pos.new_zeros(batch_size, self.num_queries,\n                                        self.embed_dims)\n        else:\n            query = self.patterns.weight[:, None, None, :]\\\n                .repeat(1, self.num_queries, batch_size, 1)\\\n                .view(-1, batch_size, self.embed_dims)\\\n                .permute(1, 0, 2)\n            query_pos = query_pos.repeat(1, self.num_patterns, 1)\n\n        decoder_inputs_dict = dict(\n            query_pos=query_pos, query=query, memory=memory)\n        head_inputs_dict = dict()\n        return decoder_inputs_dict, head_inputs_dict\n\n    def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,\n                        memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n        \"\"\"Forward with Transformer decoder.\n\n        Args:\n            query (Tensor): The queries of decoder inputs, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional queries of decoder inputs,\n                has shape (bs, num_queries, dim).\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points).\n            memory_pos (Tensor): The positional embeddings of memory, has\n                shape (bs, num_feat_points, dim).\n\n        Returns:\n            dict: The dictionary of decoder outputs, which includes the\n            `hidden_states` and `references` of the decoder output.\n        \"\"\"\n\n        hidden_states, references = self.decoder(\n            query=query,\n            key=memory,\n            query_pos=query_pos,\n            key_pos=memory_pos,\n            key_padding_mask=memory_mask,\n            reg_branches=self.bbox_head.\n            fc_reg  # iterative refinement for anchor boxes\n        )\n        head_inputs_dict = dict(\n            hidden_states=hidden_states, references=references)\n        return head_inputs_dict\n"
  },
  {
    "path": "mmdet/models/detectors/ddod.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass DDOD(SingleStageDetector):\n    \"\"\"Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_.\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of ATSS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of ATSS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/deformable_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Dict, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention\nfrom mmengine.model import xavier_init\nfrom torch import Tensor, nn\nfrom torch.nn.init import normal_\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList\nfrom mmdet.utils import OptConfigType\nfrom ..layers import (DeformableDetrTransformerDecoder,\n                      DeformableDetrTransformerEncoder, SinePositionalEncoding)\nfrom .base_detr import DetectionTransformer\n\n\n@MODELS.register_module()\nclass DeformableDETR(DetectionTransformer):\n    r\"\"\"Implementation of `Deformable DETR: Deformable Transformers for\n    End-to-End Object Detection <https://arxiv.org/abs/2010.04159>`_\n\n    Code is modified from the `official github repo\n    <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n    Args:\n        decoder (:obj:`ConfigDict` or dict, optional): Config of the\n            Transformer decoder. Defaults to None.\n        bbox_head (:obj:`ConfigDict` or dict, optional): Config for the\n            bounding box head module. Defaults to None.\n        with_box_refine (bool, optional): Whether to refine the references\n            in the decoder. Defaults to `False`.\n        as_two_stage (bool, optional): Whether to generate the proposal\n            from the outputs of encoder. Defaults to `False`.\n        num_feature_levels (int, optional): Number of feature levels.\n            Defaults to 4.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 decoder: OptConfigType = None,\n                 bbox_head: OptConfigType = None,\n                 with_box_refine: bool = False,\n                 as_two_stage: bool = False,\n                 num_feature_levels: int = 4,\n                 **kwargs) -> None:\n        self.with_box_refine = with_box_refine\n        self.as_two_stage = as_two_stage\n        self.num_feature_levels = num_feature_levels\n\n        if bbox_head is not None:\n            assert 'share_pred_layer' not in bbox_head and \\\n                   'num_pred_layer' not in bbox_head and \\\n                   'as_two_stage' not in bbox_head, \\\n                'The two keyword args `share_pred_layer`, `num_pred_layer`, ' \\\n                'and `as_two_stage are set in `detector.__init__()`, users ' \\\n                'should not set them in `bbox_head` config.'\n            # The last prediction layer is used to generate proposal\n            # from encode feature map when `as_two_stage` is `True`.\n            # And all the prediction layers should share parameters\n            # when `with_box_refine` is `True`.\n            bbox_head['share_pred_layer'] = not with_box_refine\n            bbox_head['num_pred_layer'] = (decoder['num_layers'] + 1) \\\n                if self.as_two_stage else decoder['num_layers']\n            bbox_head['as_two_stage'] = as_two_stage\n\n        super().__init__(*args, decoder=decoder, bbox_head=bbox_head, **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n        self.positional_encoding = SinePositionalEncoding(\n            **self.positional_encoding)\n        self.encoder = DeformableDetrTransformerEncoder(**self.encoder)\n        self.decoder = DeformableDetrTransformerDecoder(**self.decoder)\n        self.embed_dims = self.encoder.embed_dims\n        if not self.as_two_stage:\n            self.query_embedding = nn.Embedding(self.num_queries,\n                                                self.embed_dims * 2)\n            # NOTE The query_embedding will be split into query and query_pos\n            # in self.pre_decoder, hence, the embed_dims are doubled.\n\n        num_feats = self.positional_encoding.num_feats\n        assert num_feats * 2 == self.embed_dims, \\\n            'embed_dims should be exactly 2 times of num_feats. ' \\\n            f'Found {self.embed_dims} and {num_feats}.'\n\n        self.level_embed = nn.Parameter(\n            torch.Tensor(self.num_feature_levels, self.embed_dims))\n\n        if self.as_two_stage:\n            self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims)\n            self.memory_trans_norm = nn.LayerNorm(self.embed_dims)\n            self.pos_trans_fc = nn.Linear(self.embed_dims * 2,\n                                          self.embed_dims * 2)\n            self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2)\n        else:\n            self.reference_points_fc = nn.Linear(self.embed_dims, 2)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights for Transformer and other components.\"\"\"\n        super().init_weights()\n        for coder in self.encoder, self.decoder:\n            for p in coder.parameters():\n                if p.dim() > 1:\n                    nn.init.xavier_uniform_(p)\n        for m in self.modules():\n            if isinstance(m, MultiScaleDeformableAttention):\n                m.init_weights()\n        if self.as_two_stage:\n            nn.init.xavier_uniform_(self.memory_trans_fc.weight)\n            nn.init.xavier_uniform_(self.pos_trans_fc.weight)\n        else:\n            xavier_init(\n                self.reference_points_fc, distribution='uniform', bias=0.)\n        normal_(self.level_embed)\n\n    def pre_transformer(\n            self,\n            mlvl_feats: Tuple[Tensor],\n            batch_data_samples: OptSampleList = None) -> Tuple[Dict]:\n        \"\"\"Process image features before feeding them to the transformer.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            mlvl_feats (tuple[Tensor]): Multi-level features that may have\n                different resolutions, output from neck. Each feature has\n                shape (bs, dim, h_lvl, w_lvl), where 'lvl' means 'layer'.\n            batch_data_samples (list[:obj:`DetDataSample`], optional): The\n                batch data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            tuple[dict]: The first dict contains the inputs of encoder and the\n            second dict contains the inputs of decoder.\n\n            - encoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_encoder()`, which includes 'feat', 'feat_mask',\n              and 'feat_pos'.\n            - decoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_decoder()`, which includes 'memory_mask'.\n        \"\"\"\n        batch_size = mlvl_feats[0].size(0)\n\n        # construct binary masks for the transformer.\n        assert batch_data_samples is not None\n        batch_input_shape = batch_data_samples[0].batch_input_shape\n        img_shape_list = [sample.img_shape for sample in batch_data_samples]\n        input_img_h, input_img_w = batch_input_shape\n        masks = mlvl_feats[0].new_ones((batch_size, input_img_h, input_img_w))\n        for img_id in range(batch_size):\n            img_h, img_w = img_shape_list[img_id]\n            masks[img_id, :img_h, :img_w] = 0\n        # NOTE following the official DETR repo, non-zero values representing\n        # ignored positions, while zero values means valid positions.\n\n        mlvl_masks = []\n        mlvl_pos_embeds = []\n        for feat in mlvl_feats:\n            mlvl_masks.append(\n                F.interpolate(masks[None],\n                              size=feat.shape[-2:]).to(torch.bool).squeeze(0))\n            mlvl_pos_embeds.append(self.positional_encoding(mlvl_masks[-1]))\n\n        feat_flatten = []\n        lvl_pos_embed_flatten = []\n        mask_flatten = []\n        spatial_shapes = []\n        for lvl, (feat, mask, pos_embed) in enumerate(\n                zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)):\n            batch_size, c, h, w = feat.shape\n            # [bs, c, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl, c]\n            feat = feat.view(batch_size, c, -1).permute(0, 2, 1)\n            pos_embed = pos_embed.view(batch_size, c, -1).permute(0, 2, 1)\n            lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)\n            # [bs, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl]\n            mask = mask.flatten(1)\n            spatial_shape = (h, w)\n\n            feat_flatten.append(feat)\n            lvl_pos_embed_flatten.append(lvl_pos_embed)\n            mask_flatten.append(mask)\n            spatial_shapes.append(spatial_shape)\n\n        # (bs, num_feat_points, dim)\n        feat_flatten = torch.cat(feat_flatten, 1)\n        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n        # (bs, num_feat_points), where num_feat_points = sum_lvl(h_lvl*w_lvl)\n        mask_flatten = torch.cat(mask_flatten, 1)\n\n        spatial_shapes = torch.as_tensor(  # (num_level, 2)\n            spatial_shapes,\n            dtype=torch.long,\n            device=feat_flatten.device)\n        level_start_index = torch.cat((\n            spatial_shapes.new_zeros((1, )),  # (num_level)\n            spatial_shapes.prod(1).cumsum(0)[:-1]))\n        valid_ratios = torch.stack(  # (bs, num_level, 2)\n            [self.get_valid_ratio(m) for m in mlvl_masks], 1)\n\n        encoder_inputs_dict = dict(\n            feat=feat_flatten,\n            feat_mask=mask_flatten,\n            feat_pos=lvl_pos_embed_flatten,\n            spatial_shapes=spatial_shapes,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios)\n        decoder_inputs_dict = dict(\n            memory_mask=mask_flatten,\n            spatial_shapes=spatial_shapes,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios)\n        return encoder_inputs_dict, decoder_inputs_dict\n\n    def forward_encoder(self, feat: Tensor, feat_mask: Tensor,\n                        feat_pos: Tensor, spatial_shapes: Tensor,\n                        level_start_index: Tensor,\n                        valid_ratios: Tensor) -> Dict:\n        \"\"\"Forward with Transformer encoder.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            feat (Tensor): Sequential features, has shape (bs, num_feat_points,\n                dim).\n            feat_mask (Tensor): ByteTensor, the padding mask of the features,\n                has shape (bs, num_feat_points).\n            feat_pos (Tensor): The positional embeddings of the features, has\n                shape (bs, num_feat_points, dim).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n\n        Returns:\n            dict: The dictionary of encoder outputs, which includes the\n            `memory` of the encoder output.\n        \"\"\"\n        memory = self.encoder(\n            query=feat,\n            query_pos=feat_pos,\n            key_padding_mask=feat_mask,  # for self_attn\n            spatial_shapes=spatial_shapes,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios)\n        encoder_outputs_dict = dict(\n            memory=memory,\n            memory_mask=feat_mask,\n            spatial_shapes=spatial_shapes)\n        return encoder_outputs_dict\n\n    def pre_decoder(self, memory: Tensor, memory_mask: Tensor,\n                    spatial_shapes: Tensor) -> Tuple[Dict, Dict]:\n        \"\"\"Prepare intermediate variables before entering Transformer decoder,\n        such as `query`, `query_pos`, and `reference_points`.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points). It will only be used when\n                `as_two_stage` is `True`.\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n                It will only be used when `as_two_stage` is `True`.\n\n        Returns:\n            tuple[dict, dict]: The decoder_inputs_dict and head_inputs_dict.\n\n            - decoder_inputs_dict (dict): The keyword dictionary args of\n              `self.forward_decoder()`, which includes 'query', 'query_pos',\n              'memory', and `reference_points`. The reference_points of\n              decoder input here are 4D boxes when `as_two_stage` is `True`,\n              otherwise 2D points, although it has `points` in its name.\n              The reference_points in encoder is always 2D points.\n            - head_inputs_dict (dict): The keyword dictionary args of the\n              bbox_head functions, which includes `enc_outputs_class` and\n              `enc_outputs_coord`. They are both `None` when 'as_two_stage'\n              is `False`. The dict is empty when `self.training` is `False`.\n        \"\"\"\n        batch_size, _, c = memory.shape\n        if self.as_two_stage:\n            output_memory, output_proposals = \\\n                self.gen_encoder_output_proposals(\n                    memory, memory_mask, spatial_shapes)\n            enc_outputs_class = self.bbox_head.cls_branches[\n                self.decoder.num_layers](\n                    output_memory)\n            enc_outputs_coord_unact = self.bbox_head.reg_branches[\n                self.decoder.num_layers](output_memory) + output_proposals\n            enc_outputs_coord = enc_outputs_coord_unact.sigmoid()\n            # We only use the first channel in enc_outputs_class as foreground,\n            # the other (num_classes - 1) channels are actually not used.\n            # Its targets are set to be 0s, which indicates the first\n            # class (foreground) because we use [0, num_classes - 1] to\n            # indicate class labels, background class is indicated by\n            # num_classes (similar convention in RPN).\n            # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa\n            # This follows the official implementation of Deformable DETR.\n            topk_proposals = torch.topk(\n                enc_outputs_class[..., 0], self.num_queries, dim=1)[1]\n            topk_coords_unact = torch.gather(\n                enc_outputs_coord_unact, 1,\n                topk_proposals.unsqueeze(-1).repeat(1, 1, 4))\n            topk_coords_unact = topk_coords_unact.detach()\n            reference_points = topk_coords_unact.sigmoid()\n            pos_trans_out = self.pos_trans_fc(\n                self.get_proposal_pos_embed(topk_coords_unact))\n            pos_trans_out = self.pos_trans_norm(pos_trans_out)\n            query_pos, query = torch.split(pos_trans_out, c, dim=2)\n        else:\n            enc_outputs_class, enc_outputs_coord = None, None\n            query_embed = self.query_embedding.weight\n            query_pos, query = torch.split(query_embed, c, dim=1)\n            query_pos = query_pos.unsqueeze(0).expand(batch_size, -1, -1)\n            query = query.unsqueeze(0).expand(batch_size, -1, -1)\n            reference_points = self.reference_points_fc(query_pos).sigmoid()\n\n        decoder_inputs_dict = dict(\n            query=query,\n            query_pos=query_pos,\n            memory=memory,\n            reference_points=reference_points)\n        head_inputs_dict = dict(\n            enc_outputs_class=enc_outputs_class,\n            enc_outputs_coord=enc_outputs_coord) if self.training else dict()\n        return decoder_inputs_dict, head_inputs_dict\n\n    def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,\n                        memory_mask: Tensor, reference_points: Tensor,\n                        spatial_shapes: Tensor, level_start_index: Tensor,\n                        valid_ratios: Tensor) -> Dict:\n        \"\"\"Forward with Transformer decoder.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            query (Tensor): The queries of decoder inputs, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional queries of decoder inputs,\n                has shape (bs, num_queries, dim).\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points).\n            reference_points (Tensor): The initial reference, has shape\n                (bs, num_queries, 4) with the last dimension arranged as\n                (cx, cy, w, h) when `as_two_stage` is `True`, otherwise has\n                shape (bs, num_queries, 2) with the last dimension arranged as\n                (cx, cy).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n\n        Returns:\n            dict: The dictionary of decoder outputs, which includes the\n            `hidden_states` of the decoder output and `references` including\n            the initial and intermediate reference_points.\n        \"\"\"\n        inter_states, inter_references = self.decoder(\n            query=query,\n            value=memory,\n            query_pos=query_pos,\n            key_padding_mask=memory_mask,  # for cross_attn\n            reference_points=reference_points,\n            spatial_shapes=spatial_shapes,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios,\n            reg_branches=self.bbox_head.reg_branches\n            if self.with_box_refine else None)\n        references = [reference_points, *inter_references]\n        decoder_outputs_dict = dict(\n            hidden_states=inter_states, references=references)\n        return decoder_outputs_dict\n\n    @staticmethod\n    def get_valid_ratio(mask: Tensor) -> Tensor:\n        \"\"\"Get the valid radios of feature map in a level.\n\n        .. code:: text\n\n                    |---> valid_W <---|\n                 ---+-----------------+-----+---\n                  A |                 |     | A\n                  | |                 |     | |\n                  | |                 |     | |\n            valid_H |                 |     | |\n                  | |                 |     | H\n                  | |                 |     | |\n                  V |                 |     | |\n                 ---+-----------------+     | |\n                    |                       | V\n                    +-----------------------+---\n                    |---------> W <---------|\n\n          The valid_ratios are defined as:\n                r_h = valid_H / H,  r_w = valid_W / W\n          They are the factors to re-normalize the relative coordinates of the\n          image to the relative coordinates of the current level feature map.\n\n        Args:\n            mask (Tensor): Binary mask of a feature map, has shape (bs, H, W).\n\n        Returns:\n            Tensor: valid ratios [r_w, r_h] of a feature map, has shape (1, 2).\n        \"\"\"\n        _, H, W = mask.shape\n        valid_H = torch.sum(~mask[:, :, 0], 1)\n        valid_W = torch.sum(~mask[:, 0, :], 1)\n        valid_ratio_h = valid_H.float() / H\n        valid_ratio_w = valid_W.float() / W\n        valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n        return valid_ratio\n\n    def gen_encoder_output_proposals(\n            self, memory: Tensor, memory_mask: Tensor,\n            spatial_shapes: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Generate proposals from encoded memory. The function will only be\n        used when `as_two_stage` is `True`.\n\n        Args:\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n\n        Returns:\n            tuple: A tuple of transformed memory and proposals.\n\n            - output_memory (Tensor): The transformed memory for obtaining\n              top-k proposals, has shape (bs, num_feat_points, dim).\n            - output_proposals (Tensor): The inverse-normalized proposal, has\n              shape (batch_size, num_keys, 4) with the last dimension arranged\n              as (cx, cy, w, h).\n        \"\"\"\n\n        bs = memory.size(0)\n        proposals = []\n        _cur = 0  # start index in the sequence of the current level\n        for lvl, (H, W) in enumerate(spatial_shapes):\n            mask_flatten_ = memory_mask[:,\n                                        _cur:(_cur + H * W)].view(bs, H, W, 1)\n            valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1).unsqueeze(-1)\n            valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1).unsqueeze(-1)\n\n            grid_y, grid_x = torch.meshgrid(\n                torch.linspace(\n                    0, H - 1, H, dtype=torch.float32, device=memory.device),\n                torch.linspace(\n                    0, W - 1, W, dtype=torch.float32, device=memory.device))\n            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n            scale = torch.cat([valid_W, valid_H], 1).view(bs, 1, 1, 2)\n            grid = (grid.unsqueeze(0).expand(bs, -1, -1, -1) + 0.5) / scale\n            wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n            proposal = torch.cat((grid, wh), -1).view(bs, -1, 4)\n            proposals.append(proposal)\n            _cur += (H * W)\n        output_proposals = torch.cat(proposals, 1)\n        output_proposals_valid = ((output_proposals > 0.01) &\n                                  (output_proposals < 0.99)).all(\n                                      -1, keepdim=True)\n        # inverse_sigmoid\n        output_proposals = torch.log(output_proposals / (1 - output_proposals))\n        output_proposals = output_proposals.masked_fill(\n            memory_mask.unsqueeze(-1), float('inf'))\n        output_proposals = output_proposals.masked_fill(\n            ~output_proposals_valid, float('inf'))\n\n        output_memory = memory\n        output_memory = output_memory.masked_fill(\n            memory_mask.unsqueeze(-1), float(0))\n        output_memory = output_memory.masked_fill(~output_proposals_valid,\n                                                  float(0))\n        output_memory = self.memory_trans_fc(output_memory)\n        output_memory = self.memory_trans_norm(output_memory)\n        # [bs, sum(hw), 2]\n        return output_memory, output_proposals\n\n    @staticmethod\n    def get_proposal_pos_embed(proposals: Tensor,\n                               num_pos_feats: int = 128,\n                               temperature: int = 10000) -> Tensor:\n        \"\"\"Get the position embedding of the proposal.\n\n        Args:\n            proposals (Tensor): Not normalized proposals, has shape\n                (bs, num_queries, 4) with the last dimension arranged as\n                (cx, cy, w, h).\n            num_pos_feats (int, optional): The feature dimension for each\n                position along x, y, w, and h-axis. Note the final returned\n                dimension for each position is 4 times of num_pos_feats.\n                Default to 128.\n            temperature (int, optional): The temperature used for scaling the\n                position embedding. Defaults to 10000.\n\n        Returns:\n            Tensor: The position embedding of proposal, has shape\n            (bs, num_queries, num_pos_feats * 4), with the last dimension\n            arranged as (cx, cy, w, h)\n        \"\"\"\n        scale = 2 * math.pi\n        dim_t = torch.arange(\n            num_pos_feats, dtype=torch.float32, device=proposals.device)\n        dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)\n        # N, L, 4\n        proposals = proposals.sigmoid() * scale\n        # N, L, 4, 128\n        pos = proposals[:, :, :, None] / dim_t\n        # N, L, 4, 64, 2\n        pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),\n                          dim=4).flatten(2)\n        return pos\n"
  },
  {
    "path": "mmdet/models/detectors/detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList\nfrom ..layers import (DetrTransformerDecoder, DetrTransformerEncoder,\n                      SinePositionalEncoding)\nfrom .base_detr import DetectionTransformer\n\n\n@MODELS.register_module()\nclass DETR(DetectionTransformer):\n    r\"\"\"Implementation of `DETR: End-to-End Object Detection with Transformers.\n\n    <https://arxiv.org/pdf/2005.12872>`_.\n\n    Code is modified from the `official github repo\n    <https://github.com/facebookresearch/detr>`_.\n    \"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n        self.positional_encoding = SinePositionalEncoding(\n            **self.positional_encoding)\n        self.encoder = DetrTransformerEncoder(**self.encoder)\n        self.decoder = DetrTransformerDecoder(**self.decoder)\n        self.embed_dims = self.encoder.embed_dims\n        # NOTE The embed_dims is typically passed from the inside out.\n        # For example in DETR, The embed_dims is passed as\n        # self_attn -> the first encoder layer -> encoder -> detector.\n        self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n\n        num_feats = self.positional_encoding.num_feats\n        assert num_feats * 2 == self.embed_dims, \\\n            'embed_dims should be exactly 2 times of num_feats. ' \\\n            f'Found {self.embed_dims} and {num_feats}.'\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights for Transformer and other components.\"\"\"\n        super().init_weights()\n        for coder in self.encoder, self.decoder:\n            for p in coder.parameters():\n                if p.dim() > 1:\n                    nn.init.xavier_uniform_(p)\n\n    def pre_transformer(\n            self,\n            img_feats: Tuple[Tensor],\n            batch_data_samples: OptSampleList = None) -> Tuple[Dict, Dict]:\n        \"\"\"Prepare the inputs of the Transformer.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            img_feats (Tuple[Tensor]): Tuple of features output from the neck,\n                has shape (bs, c, h, w).\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such as\n                `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            tuple[dict, dict]: The first dict contains the inputs of encoder\n            and the second dict contains the inputs of decoder.\n\n            - encoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_encoder()`, which includes 'feat', 'feat_mask',\n              and 'feat_pos'.\n            - decoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_decoder()`, which includes 'memory_mask',\n              and 'memory_pos'.\n        \"\"\"\n\n        feat = img_feats[-1]  # NOTE img_feats contains only one feature.\n        batch_size, feat_dim, _, _ = feat.shape\n        # construct binary masks which for the transformer.\n        assert batch_data_samples is not None\n        batch_input_shape = batch_data_samples[0].batch_input_shape\n        img_shape_list = [sample.img_shape for sample in batch_data_samples]\n\n        input_img_h, input_img_w = batch_input_shape\n        masks = feat.new_ones((batch_size, input_img_h, input_img_w))\n        for img_id in range(batch_size):\n            img_h, img_w = img_shape_list[img_id]\n            masks[img_id, :img_h, :img_w] = 0\n        # NOTE following the official DETR repo, non-zero values represent\n        # ignored positions, while zero values mean valid positions.\n\n        masks = F.interpolate(\n            masks.unsqueeze(1), size=feat.shape[-2:]).to(torch.bool).squeeze(1)\n        # [batch_size, embed_dim, h, w]\n        pos_embed = self.positional_encoding(masks)\n\n        # use `view` instead of `flatten` for dynamically exporting to ONNX\n        # [bs, c, h, w] -> [bs, h*w, c]\n        feat = feat.view(batch_size, feat_dim, -1).permute(0, 2, 1)\n        pos_embed = pos_embed.view(batch_size, feat_dim, -1).permute(0, 2, 1)\n        # [bs, h, w] -> [bs, h*w]\n        masks = masks.view(batch_size, -1)\n\n        # prepare transformer_inputs_dict\n        encoder_inputs_dict = dict(\n            feat=feat, feat_mask=masks, feat_pos=pos_embed)\n        decoder_inputs_dict = dict(memory_mask=masks, memory_pos=pos_embed)\n        return encoder_inputs_dict, decoder_inputs_dict\n\n    def forward_encoder(self, feat: Tensor, feat_mask: Tensor,\n                        feat_pos: Tensor) -> Dict:\n        \"\"\"Forward with Transformer encoder.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            feat (Tensor): Sequential features, has shape (bs, num_feat_points,\n                dim).\n            feat_mask (Tensor): ByteTensor, the padding mask of the features,\n                has shape (bs, num_feat_points).\n            feat_pos (Tensor): The positional embeddings of the features, has\n                shape (bs, num_feat_points, dim).\n\n        Returns:\n            dict: The dictionary of encoder outputs, which includes the\n            `memory` of the encoder output.\n        \"\"\"\n        memory = self.encoder(\n            query=feat, query_pos=feat_pos,\n            key_padding_mask=feat_mask)  # for self_attn\n        encoder_outputs_dict = dict(memory=memory)\n        return encoder_outputs_dict\n\n    def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:\n        \"\"\"Prepare intermediate variables before entering Transformer decoder,\n        such as `query`, `query_pos`.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n\n        Returns:\n            tuple[dict, dict]: The first dict contains the inputs of decoder\n            and the second dict contains the inputs of the bbox_head function.\n\n            - decoder_inputs_dict (dict): The keyword args dictionary of\n              `self.forward_decoder()`, which includes 'query', 'query_pos',\n              'memory'.\n            - head_inputs_dict (dict): The keyword args dictionary of the\n              bbox_head functions, which is usually empty, or includes\n              `enc_outputs_class` and `enc_outputs_class` when the detector\n              support 'two stage' or 'query selection' strategies.\n        \"\"\"\n\n        batch_size = memory.size(0)  # (bs, num_feat_points, dim)\n        query_pos = self.query_embedding.weight\n        # (num_queries, dim) -> (bs, num_queries, dim)\n        query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)\n        query = torch.zeros_like(query_pos)\n\n        decoder_inputs_dict = dict(\n            query_pos=query_pos, query=query, memory=memory)\n        head_inputs_dict = dict()\n        return decoder_inputs_dict, head_inputs_dict\n\n    def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,\n                        memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n        \"\"\"Forward with Transformer decoder.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            query (Tensor): The queries of decoder inputs, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional queries of decoder inputs,\n                has shape (bs, num_queries, dim).\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points).\n            memory_pos (Tensor): The positional embeddings of memory, has\n                shape (bs, num_feat_points, dim).\n\n        Returns:\n            dict: The dictionary of decoder outputs, which includes the\n            `hidden_states` of the decoder output.\n\n            - hidden_states (Tensor): Has shape\n              (num_decoder_layers, bs, num_queries, dim)\n        \"\"\"\n\n        hidden_states = self.decoder(\n            query=query,\n            key=memory,\n            value=memory,\n            query_pos=query_pos,\n            key_pos=memory_pos,\n            key_padding_mask=memory_mask)  # for cross_attn\n\n        head_inputs_dict = dict(hidden_states=hidden_states)\n        return head_inputs_dict\n"
  },
  {
    "path": "mmdet/models/detectors/dino.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, Optional, Tuple\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.nn.init import normal_\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList\nfrom mmdet.utils import OptConfigType\nfrom ..layers import (CdnQueryGenerator, DeformableDetrTransformerEncoder,\n                      DinoTransformerDecoder, SinePositionalEncoding)\nfrom .deformable_detr import DeformableDETR, MultiScaleDeformableAttention\n\n\n@MODELS.register_module()\nclass DINO(DeformableDETR):\n    r\"\"\"Implementation of `DINO: DETR with Improved DeNoising Anchor Boxes\n    for End-to-End Object Detection <https://arxiv.org/abs/2203.03605>`_\n\n    Code is modified from the `official github repo\n    <https://github.com/IDEA-Research/DINO>`_.\n\n    Args:\n        dn_cfg (:obj:`ConfigDict` or dict, optional): Config of denoising\n            query generator. Defaults to `None`.\n    \"\"\"\n\n    def __init__(self, *args, dn_cfg: OptConfigType = None, **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        assert self.as_two_stage, 'as_two_stage must be True for DINO'\n        assert self.with_box_refine, 'with_box_refine must be True for DINO'\n\n        if dn_cfg is not None:\n            assert 'num_classes' not in dn_cfg and \\\n                   'num_queries' not in dn_cfg and \\\n                   'hidden_dim' not in dn_cfg, \\\n                'The three keyword args `num_classes`, `embed_dims`, and ' \\\n                '`num_matching_queries` are set in `detector.__init__()`, ' \\\n                'users should not set them in `dn_cfg` config.'\n            dn_cfg['num_classes'] = self.bbox_head.num_classes\n            dn_cfg['embed_dims'] = self.embed_dims\n            dn_cfg['num_matching_queries'] = self.num_queries\n        self.dn_query_generator = CdnQueryGenerator(**dn_cfg)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n        self.positional_encoding = SinePositionalEncoding(\n            **self.positional_encoding)\n        self.encoder = DeformableDetrTransformerEncoder(**self.encoder)\n        self.decoder = DinoTransformerDecoder(**self.decoder)\n        self.embed_dims = self.encoder.embed_dims\n        self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n        # NOTE In DINO, the query_embedding only contains content\n        # queries, while in Deformable DETR, the query_embedding\n        # contains both content and spatial queries, and in DETR,\n        # it only contains spatial queries.\n\n        num_feats = self.positional_encoding.num_feats\n        assert num_feats * 2 == self.embed_dims, \\\n            f'embed_dims should be exactly 2 times of num_feats. ' \\\n            f'Found {self.embed_dims} and {num_feats}.'\n\n        self.level_embed = nn.Parameter(\n            torch.Tensor(self.num_feature_levels, self.embed_dims))\n        self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims)\n        self.memory_trans_norm = nn.LayerNorm(self.embed_dims)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights for Transformer and other components.\"\"\"\n        super(DeformableDETR, self).init_weights()\n        for coder in self.encoder, self.decoder:\n            for p in coder.parameters():\n                if p.dim() > 1:\n                    nn.init.xavier_uniform_(p)\n        for m in self.modules():\n            if isinstance(m, MultiScaleDeformableAttention):\n                m.init_weights()\n        nn.init.xavier_uniform_(self.memory_trans_fc.weight)\n        nn.init.xavier_uniform_(self.query_embedding.weight)\n        normal_(self.level_embed)\n\n    def forward_transformer(\n        self,\n        img_feats: Tuple[Tensor],\n        batch_data_samples: OptSampleList = None,\n    ) -> Dict:\n        \"\"\"Forward process of Transformer.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n        The difference is that the ground truth in `batch_data_samples` is\n        required for the `pre_decoder` to prepare the query of DINO.\n        Additionally, DINO inherits the `pre_transformer` method and the\n        `forward_encoder` method of DeformableDETR. More details about the\n        two methods can be found in `mmdet/detector/deformable_detr.py`.\n\n        Args:\n            img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each\n                feature map has shape (bs, dim, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            dict: The dictionary of bbox_head function inputs, which always\n            includes the `hidden_states` of the decoder output and may contain\n            `references` including the initial and intermediate references.\n        \"\"\"\n        encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer(\n            img_feats, batch_data_samples)\n\n        encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)\n\n        tmp_dec_in, head_inputs_dict = self.pre_decoder(\n            **encoder_outputs_dict, batch_data_samples=batch_data_samples)\n        decoder_inputs_dict.update(tmp_dec_in)\n\n        decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)\n        head_inputs_dict.update(decoder_outputs_dict)\n        return head_inputs_dict\n\n    def pre_decoder(\n        self,\n        memory: Tensor,\n        memory_mask: Tensor,\n        spatial_shapes: Tensor,\n        batch_data_samples: OptSampleList = None,\n    ) -> Tuple[Dict]:\n        \"\"\"Prepare intermediate variables before entering Transformer decoder,\n        such as `query`, `query_pos`, and `reference_points`.\n\n        Args:\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points). Will only be used when\n                `as_two_stage` is `True`.\n            spatial_shapes (Tensor): Spatial shapes of features in all levels.\n                With shape (num_levels, 2), last dimension represents (h, w).\n                Will only be used when `as_two_stage` is `True`.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n                Defaults to None.\n\n        Returns:\n            tuple[dict]: The decoder_inputs_dict and head_inputs_dict.\n\n            - decoder_inputs_dict (dict): The keyword dictionary args of\n              `self.forward_decoder()`, which includes 'query', 'memory',\n              `reference_points`, and `dn_mask`. The reference points of\n              decoder input here are 4D boxes, although it has `points`\n              in its name.\n            - head_inputs_dict (dict): The keyword dictionary args of the\n              bbox_head functions, which includes `topk_score`, `topk_coords`,\n              and `dn_meta` when `self.training` is `True`, else is empty.\n        \"\"\"\n        bs, _, c = memory.shape\n        cls_out_features = self.bbox_head.cls_branches[\n            self.decoder.num_layers].out_features\n\n        output_memory, output_proposals = self.gen_encoder_output_proposals(\n            memory, memory_mask, spatial_shapes)\n        enc_outputs_class = self.bbox_head.cls_branches[\n            self.decoder.num_layers](\n                output_memory)\n        enc_outputs_coord_unact = self.bbox_head.reg_branches[\n            self.decoder.num_layers](output_memory) + output_proposals\n\n        # NOTE The DINO selects top-k proposals according to scores of\n        # multi-class classification, while DeformDETR, where the input\n        # is `enc_outputs_class[..., 0]` selects according to scores of\n        # binary classification.\n        topk_indices = torch.topk(\n            enc_outputs_class.max(-1)[0], k=self.num_queries, dim=1)[1]\n        topk_score = torch.gather(\n            enc_outputs_class, 1,\n            topk_indices.unsqueeze(-1).repeat(1, 1, cls_out_features))\n        topk_coords_unact = torch.gather(\n            enc_outputs_coord_unact, 1,\n            topk_indices.unsqueeze(-1).repeat(1, 1, 4))\n        topk_coords = topk_coords_unact.sigmoid()\n        topk_coords_unact = topk_coords_unact.detach()\n\n        query = self.query_embedding.weight[:, None, :]\n        query = query.repeat(1, bs, 1).transpose(0, 1)\n        if self.training:\n            dn_label_query, dn_bbox_query, dn_mask, dn_meta = \\\n                self.dn_query_generator(batch_data_samples)\n            query = torch.cat([dn_label_query, query], dim=1)\n            reference_points = torch.cat([dn_bbox_query, topk_coords_unact],\n                                         dim=1)\n        else:\n            reference_points = topk_coords_unact\n            dn_mask, dn_meta = None, None\n        reference_points = reference_points.sigmoid()\n\n        decoder_inputs_dict = dict(\n            query=query,\n            memory=memory,\n            reference_points=reference_points,\n            dn_mask=dn_mask)\n        # NOTE DINO calculates encoder losses on scores and coordinates\n        # of selected top-k encoder queries, while DeformDETR is of all\n        # encoder queries.\n        head_inputs_dict = dict(\n            enc_outputs_class=topk_score,\n            enc_outputs_coord=topk_coords,\n            dn_meta=dn_meta) if self.training else dict()\n        return decoder_inputs_dict, head_inputs_dict\n\n    def forward_decoder(self,\n                        query: Tensor,\n                        memory: Tensor,\n                        memory_mask: Tensor,\n                        reference_points: Tensor,\n                        spatial_shapes: Tensor,\n                        level_start_index: Tensor,\n                        valid_ratios: Tensor,\n                        dn_mask: Optional[Tensor] = None) -> Dict:\n        \"\"\"Forward with Transformer decoder.\n\n        The forward procedure of the transformer is defined as:\n        'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'\n        More details can be found at `TransformerDetector.forward_transformer`\n        in `mmdet/detector/base_detr.py`.\n\n        Args:\n            query (Tensor): The queries of decoder inputs, has shape\n                (bs, num_queries_total, dim), where `num_queries_total` is the\n                sum of `num_denoising_queries` and `num_matching_queries` when\n                `self.training` is `True`, else `num_matching_queries`.\n            memory (Tensor): The output embeddings of the Transformer encoder,\n                has shape (bs, num_feat_points, dim).\n            memory_mask (Tensor): ByteTensor, the padding mask of the memory,\n                has shape (bs, num_feat_points).\n            reference_points (Tensor): The initial reference, has shape\n                (bs, num_queries_total, 4) with the last dimension arranged as\n                (cx, cy, w, h).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n            dn_mask (Tensor, optional): The attention mask to prevent\n                information leakage from different denoising groups and\n                matching parts, will be used as `self_attn_mask` of the\n                `self.decoder`, has shape (num_queries_total,\n                num_queries_total).\n                It is `None` when `self.training` is `False`.\n\n        Returns:\n            dict: The dictionary of decoder outputs, which includes the\n            `hidden_states` of the decoder output and `references` including\n            the initial and intermediate reference_points.\n        \"\"\"\n        inter_states, references = self.decoder(\n            query=query,\n            value=memory,\n            key_padding_mask=memory_mask,\n            self_attn_mask=dn_mask,\n            reference_points=reference_points,\n            spatial_shapes=spatial_shapes,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios,\n            reg_branches=self.bbox_head.reg_branches)\n\n        if len(query) == self.num_queries:\n            # NOTE: This is to make sure label_embeding can be involved to\n            # produce loss even if there is no denoising query (no ground truth\n            # target in this GPU), otherwise, this will raise runtime error in\n            # distributed training.\n            inter_states[0] += \\\n                self.dn_query_generator.label_embedding.weight[0, 0] * 0.0\n\n        decoder_outputs_dict = dict(\n            hidden_states=inter_states, references=list(references))\n        return decoder_outputs_dict\n"
  },
  {
    "path": "mmdet/models/detectors/fast_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass FastRCNN(TwoStageDetector):\n    \"\"\"Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            data_preprocessor=data_preprocessor)\n"
  },
  {
    "path": "mmdet/models/detectors/faster_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass FasterRCNN(TwoStageDetector):\n    \"\"\"Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 rpn_head: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            data_preprocessor=data_preprocessor)\n"
  },
  {
    "path": "mmdet/models/detectors/fcos.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass FCOS(SingleStageDetector):\n    \"\"\"Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of FCOS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of FCOS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/fovea.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass FOVEA(SingleStageDetector):\n    \"\"\"Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of FOVEA. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of FOVEA. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/fsaf.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass FSAF(SingleStageDetector):\n    \"\"\"Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/gfl.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass GFL(SingleStageDetector):\n    \"\"\"Implementation of `GFL <https://arxiv.org/abs/2006.04388>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of GFL. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of GFL. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/grid_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass GridRCNN(TwoStageDetector):\n    \"\"\"Grid R-CNN.\n\n    This detector is the implementation of:\n    - Grid R-CNN (https://arxiv.org/abs/1811.12030)\n    - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 rpn_head: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/htc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom .cascade_rcnn import CascadeRCNN\n\n\n@MODELS.register_module()\nclass HybridTaskCascade(CascadeRCNN):\n    \"\"\"Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_\"\"\"\n\n    def __init__(self, **kwargs) -> None:\n        super().__init__(**kwargs)\n\n    @property\n    def with_semantic(self) -> bool:\n        \"\"\"bool: whether the detector has a semantic head\"\"\"\n        return self.roi_head.with_semantic\n"
  },
  {
    "path": "mmdet/models/detectors/kd_one_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom pathlib import Path\nfrom typing import Any, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.config import Config\nfrom mmengine.runner import load_checkpoint\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass KnowledgeDistillationSingleStageDetector(SingleStageDetector):\n    r\"\"\"Implementation of `Distilling the Knowledge in a Neural Network.\n    <https://arxiv.org/abs/1503.02531>`_.\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file\n            path or the config object of teacher model.\n        teacher_ckpt (str, optional): Checkpoint path of teacher model.\n            If left as None, the model will not load any weights.\n            Defaults to True.\n        eval_teacher (bool): Set the train mode for teacher.\n            Defaults to True.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of ATSS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of ATSS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(\n        self,\n        backbone: ConfigType,\n        neck: ConfigType,\n        bbox_head: ConfigType,\n        teacher_config: Union[ConfigType, str, Path],\n        teacher_ckpt: Optional[str] = None,\n        eval_teacher: bool = True,\n        train_cfg: OptConfigType = None,\n        test_cfg: OptConfigType = None,\n        data_preprocessor: OptConfigType = None,\n    ) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor)\n        self.eval_teacher = eval_teacher\n        # Build teacher model\n        if isinstance(teacher_config, (str, Path)):\n            teacher_config = Config.fromfile(teacher_config)\n        self.teacher_model = MODELS.build(teacher_config['model'])\n        if teacher_ckpt is not None:\n            load_checkpoint(\n                self.teacher_model, teacher_ckpt, map_location='cpu')\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        with torch.no_grad():\n            teacher_x = self.teacher_model.extract_feat(batch_inputs)\n            out_teacher = self.teacher_model.bbox_head(teacher_x)\n        losses = self.bbox_head.loss(x, out_teacher, batch_data_samples)\n        return losses\n\n    def cuda(self, device: Optional[str] = None) -> nn.Module:\n        \"\"\"Since teacher_model is registered as a plain object, it is necessary\n        to put the teacher model to cuda when calling ``cuda`` function.\"\"\"\n        self.teacher_model.cuda(device=device)\n        return super().cuda(device=device)\n\n    def to(self, device: Optional[str] = None) -> nn.Module:\n        \"\"\"Since teacher_model is registered as a plain object, it is necessary\n        to put the teacher model to other device when calling ``to``\n        function.\"\"\"\n        self.teacher_model.to(device=device)\n        return super().to(device=device)\n\n    def train(self, mode: bool = True) -> None:\n        \"\"\"Set the same train mode for teacher and student model.\"\"\"\n        if self.eval_teacher:\n            self.teacher_model.train(False)\n        else:\n            self.teacher_model.train(mode)\n        super().train(mode)\n\n    def __setattr__(self, name: str, value: Any) -> None:\n        \"\"\"Set attribute, i.e. self.name = value\n\n        This reloading prevent the teacher model from being registered as a\n        nn.Module. The teacher module is registered as a plain object, so that\n        the teacher parameters will not show up when calling\n        ``self.parameters``, ``self.modules``, ``self.children`` methods.\n        \"\"\"\n        if name == 'teacher_model':\n            object.__setattr__(self, name, value)\n        else:\n            super().__setattr__(name, value)\n"
  },
  {
    "path": "mmdet/models/detectors/lad.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.runner import load_checkpoint\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType\nfrom ..utils.misc import unpack_gt_instances\nfrom .kd_one_stage import KnowledgeDistillationSingleStageDetector\n\n\n@MODELS.register_module()\nclass LAD(KnowledgeDistillationSingleStageDetector):\n    \"\"\"Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_.\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 teacher_backbone: ConfigType,\n                 teacher_neck: ConfigType,\n                 teacher_bbox_head: ConfigType,\n                 teacher_ckpt: Optional[str] = None,\n                 eval_teacher: bool = True,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None) -> None:\n        super(KnowledgeDistillationSingleStageDetector, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor)\n        self.eval_teacher = eval_teacher\n        self.teacher_model = nn.Module()\n        self.teacher_model.backbone = MODELS.build(teacher_backbone)\n        if teacher_neck is not None:\n            self.teacher_model.neck = MODELS.build(teacher_neck)\n        teacher_bbox_head.update(train_cfg=train_cfg)\n        teacher_bbox_head.update(test_cfg=test_cfg)\n        self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)\n        if teacher_ckpt is not None:\n            load_checkpoint(\n                self.teacher_model, teacher_ckpt, map_location='cpu')\n\n    @property\n    def with_teacher_neck(self) -> bool:\n        \"\"\"bool: whether the detector has a teacher_neck\"\"\"\n        return hasattr(self.teacher_model, 'neck') and \\\n            self.teacher_model.neck is not None\n\n    def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:\n        \"\"\"Directly extract teacher features from the backbone+neck.\"\"\"\n        x = self.teacher_model.backbone(batch_inputs)\n        if self.with_teacher_neck:\n            x = self.teacher_model.neck(x)\n        return x\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n        # get label assignment from the teacher\n        with torch.no_grad():\n            x_teacher = self.extract_teacher_feat(batch_inputs)\n            outs_teacher = self.teacher_model.bbox_head(x_teacher)\n            label_assignment_results = \\\n                self.teacher_model.bbox_head.get_label_assignment(\n                    *outs_teacher, batch_gt_instances, batch_img_metas,\n                    batch_gt_instances_ignore)\n\n        # the student use the label assignment from the teacher to learn\n        x = self.extract_feat(batch_inputs)\n        losses = self.bbox_head.loss(x, label_assignment_results,\n                                     batch_data_samples)\n        return losses\n"
  },
  {
    "path": "mmdet/models/detectors/mask2former.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .maskformer import MaskFormer\n\n\n@MODELS.register_module()\nclass Mask2Former(MaskFormer):\n    r\"\"\"Implementation of `Masked-attention Mask\n    Transformer for Universal Image Segmentation\n    <https://arxiv.org/pdf/2112.01527>`_.\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 panoptic_head: OptConfigType = None,\n                 panoptic_fusion_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            panoptic_head=panoptic_head,\n            panoptic_fusion_head=panoptic_fusion_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/mask_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass MaskRCNN(TwoStageDetector):\n    \"\"\"Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigDict,\n                 rpn_head: ConfigDict,\n                 roi_head: ConfigDict,\n                 train_cfg: ConfigDict,\n                 test_cfg: ConfigDict,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            data_preprocessor=data_preprocessor)\n"
  },
  {
    "path": "mmdet/models/detectors/mask_scoring_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass MaskScoringRCNN(TwoStageDetector):\n    \"\"\"Mask Scoring RCNN.\n\n    https://arxiv.org/abs/1903.00241\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 rpn_head: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/maskformer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Tuple\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass MaskFormer(SingleStageDetector):\n    r\"\"\"Implementation of `Per-Pixel Classification is\n    NOT All You Need for Semantic Segmentation\n    <https://arxiv.org/pdf/2107.06278>`_.\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 panoptic_head: OptConfigType = None,\n                 panoptic_fusion_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super(SingleStageDetector, self).__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        self.backbone = MODELS.build(backbone)\n        if neck is not None:\n            self.neck = MODELS.build(neck)\n\n        panoptic_head_ = panoptic_head.deepcopy()\n        panoptic_head_.update(train_cfg=train_cfg)\n        panoptic_head_.update(test_cfg=test_cfg)\n        self.panoptic_head = MODELS.build(panoptic_head_)\n\n        panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()\n        panoptic_fusion_head_.update(test_cfg=test_cfg)\n        self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)\n\n        self.num_things_classes = self.panoptic_head.num_things_classes\n        self.num_stuff_classes = self.panoptic_head.num_stuff_classes\n        self.num_classes = self.panoptic_head.num_classes\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Dict[str, Tensor]:\n        \"\"\"\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        losses = self.panoptic_head.loss(x, batch_data_samples)\n        return losses\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances' and `pred_panoptic_seg`. And the\n            ``pred_instances`` usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n\n            And the ``pred_panoptic_seg`` contains the following key\n\n                - sem_seg (Tensor): panoptic segmentation mask, has a\n                    shape (1, h, w).\n        \"\"\"\n        feats = self.extract_feat(batch_inputs)\n        mask_cls_results, mask_pred_results = self.panoptic_head.predict(\n            feats, batch_data_samples)\n        results_list = self.panoptic_fusion_head.predict(\n            mask_cls_results,\n            mask_pred_results,\n            batch_data_samples,\n            rescale=rescale)\n        results = self.add_pred_to_datasample(batch_data_samples, results_list)\n\n        return results\n\n    def add_pred_to_datasample(self, data_samples: SampleList,\n                               results_list: List[dict]) -> SampleList:\n        \"\"\"Add predictions to `DetDataSample`.\n\n        Args:\n            data_samples (list[:obj:`DetDataSample`], optional): A batch of\n                data samples that contain annotations and predictions.\n            results_list (List[dict]): Instance segmentation, segmantic\n                segmentation and panoptic segmentation results.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances' and `pred_panoptic_seg`. And the\n            ``pred_instances`` usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n\n            And the ``pred_panoptic_seg`` contains the following key\n\n                - sem_seg (Tensor): panoptic segmentation mask, has a\n                    shape (1, h, w).\n        \"\"\"\n        for data_sample, pred_results in zip(data_samples, results_list):\n            if 'pan_results' in pred_results:\n                data_sample.pred_panoptic_seg = pred_results['pan_results']\n\n            if 'ins_results' in pred_results:\n                data_sample.pred_instances = pred_results['ins_results']\n\n            assert 'sem_results' not in pred_results, 'segmantic ' \\\n                'segmentation results are not supported yet.'\n\n        return data_samples\n\n    def _forward(self, batch_inputs: Tensor,\n                 batch_data_samples: SampleList) -> Tuple[List[Tensor]]:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n         Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            tuple[List[Tensor]]: A tuple of features from ``panoptic_head``\n            forward.\n        \"\"\"\n        feats = self.extract_feat(batch_inputs)\n        results = self.panoptic_head.forward(feats, batch_data_samples)\n        return results\n"
  },
  {
    "path": "mmdet/models/detectors/nasfcos.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass NASFCOS(SingleStageDetector):\n    \"\"\"Implementation of `NAS-FCOS: Fast Neural Architecture Search for Object\n    Detection. <https://arxiv.org/abs/1906.0442>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of NASFCOS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of NASFCOS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/paa.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass PAA(SingleStageDetector):\n    \"\"\"Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of PAA. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of PAA. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/panoptic_fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor\n\n\n@MODELS.register_module()\nclass PanopticFPN(TwoStagePanopticSegmentor):\n    r\"\"\"Implementation of `Panoptic feature pyramid\n    networks <https://arxiv.org/pdf/1901.02446>`_\"\"\"\n\n    def __init__(\n            self,\n            backbone: ConfigType,\n            neck: OptConfigType = None,\n            rpn_head: OptConfigType = None,\n            roi_head: OptConfigType = None,\n            train_cfg: OptConfigType = None,\n            test_cfg: OptConfigType = None,\n            data_preprocessor: OptConfigType = None,\n            init_cfg: OptMultiConfig = None,\n            # for panoptic segmentation\n            semantic_head: OptConfigType = None,\n            panoptic_fusion_head: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg,\n            semantic_head=semantic_head,\n            panoptic_fusion_head=panoptic_fusion_head)\n"
  },
  {
    "path": "mmdet/models/detectors/panoptic_two_stage_segmentor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List\n\nimport torch\nfrom mmengine.structures import PixelData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass TwoStagePanopticSegmentor(TwoStageDetector):\n    \"\"\"Base class of Two-stage Panoptic Segmentor.\n\n    As well as the components in TwoStageDetector, Panoptic Segmentor has extra\n    semantic_head and panoptic_fusion_head.\n    \"\"\"\n\n    def __init__(\n            self,\n            backbone: ConfigType,\n            neck: OptConfigType = None,\n            rpn_head: OptConfigType = None,\n            roi_head: OptConfigType = None,\n            train_cfg: OptConfigType = None,\n            test_cfg: OptConfigType = None,\n            data_preprocessor: OptConfigType = None,\n            init_cfg: OptMultiConfig = None,\n            # for panoptic segmentation\n            semantic_head: OptConfigType = None,\n            panoptic_fusion_head: OptConfigType = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n\n        if semantic_head is not None:\n            self.semantic_head = MODELS.build(semantic_head)\n\n        if panoptic_fusion_head is not None:\n            panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None\n            panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()\n            panoptic_fusion_head_.update(test_cfg=panoptic_cfg)\n            self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)\n\n            self.num_things_classes = self.panoptic_fusion_head.\\\n                num_things_classes\n            self.num_stuff_classes = self.panoptic_fusion_head.\\\n                num_stuff_classes\n            self.num_classes = self.panoptic_fusion_head.num_classes\n\n    @property\n    def with_semantic_head(self) -> bool:\n        \"\"\"bool: whether the detector has semantic head\"\"\"\n        return hasattr(self,\n                       'semantic_head') and self.semantic_head is not None\n\n    @property\n    def with_panoptic_fusion_head(self) -> bool:\n        \"\"\"bool: whether the detector has panoptic fusion head\"\"\"\n        return hasattr(self, 'panoptic_fusion_head') and \\\n            self.panoptic_fusion_head is not None\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n\n        losses = dict()\n\n        # RPN forward and loss\n        if self.with_rpn:\n            proposal_cfg = self.train_cfg.get('rpn_proposal',\n                                              self.test_cfg.rpn)\n            rpn_data_samples = copy.deepcopy(batch_data_samples)\n            # set cat_id of gt_labels to 0 in RPN\n            for data_sample in rpn_data_samples:\n                data_sample.gt_instances.labels = \\\n                    torch.zeros_like(data_sample.gt_instances.labels)\n\n            rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict(\n                x, rpn_data_samples, proposal_cfg=proposal_cfg)\n            # avoid get same name with roi_head loss\n            keys = rpn_losses.keys()\n            for key in list(keys):\n                if 'loss' in key and 'rpn' not in key:\n                    rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)\n            losses.update(rpn_losses)\n        else:\n            # TODO: Not support currently, should have a check at Fast R-CNN\n            assert batch_data_samples[0].get('proposals', None) is not None\n            # use pre-defined proposals in InstanceData for the second stage\n            # to extract ROI features.\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n\n        roi_losses = self.roi_head.loss(x, rpn_results_list,\n                                        batch_data_samples)\n        losses.update(roi_losses)\n\n        semantic_loss = self.semantic_head.loss(x, batch_data_samples)\n        losses.update(semantic_loss)\n\n        return losses\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            List[:obj:`DetDataSample`]: Return the packed panoptic segmentation\n                results of input images. Each DetDataSample usually contains\n                'pred_panoptic_seg'. And the 'pred_panoptic_seg' has a key\n                ``sem_seg``, which is a tensor of shape (1, h, w).\n        \"\"\"\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        x = self.extract_feat(batch_inputs)\n\n        # If there are no pre-defined proposals, use RPN to get proposals\n        if batch_data_samples[0].get('proposals', None) is None:\n            rpn_results_list = self.rpn_head.predict(\n                x, batch_data_samples, rescale=False)\n        else:\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n\n        results_list = self.roi_head.predict(\n            x, rpn_results_list, batch_data_samples, rescale=rescale)\n\n        seg_preds = self.semantic_head.predict(x, batch_img_metas, rescale)\n\n        results_list = self.panoptic_fusion_head.predict(\n            results_list, seg_preds)\n\n        batch_data_samples = self.add_pred_to_datasample(\n            batch_data_samples, results_list)\n        return batch_data_samples\n\n    # TODO the code has not been verified and needs to be refactored later.\n    def _forward(self, batch_inputs: Tensor,\n                 batch_data_samples: SampleList) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n\n        Returns:\n            tuple: A tuple of features from ``rpn_head``, ``roi_head`` and\n                ``semantic_head`` forward.\n        \"\"\"\n        results = ()\n        x = self.extract_feat(batch_inputs)\n        rpn_outs = self.rpn_head.forward(x)\n        results = results + (rpn_outs)\n\n        # If there are no pre-defined proposals, use RPN to get proposals\n        if batch_data_samples[0].get('proposals', None) is None:\n            batch_img_metas = [\n                data_samples.metainfo for data_samples in batch_data_samples\n            ]\n            rpn_results_list = self.rpn_head.predict_by_feat(\n                *rpn_outs, batch_img_metas=batch_img_metas, rescale=False)\n        else:\n            # TODO: Not checked currently.\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n\n        # roi_head\n        roi_outs = self.roi_head(x, rpn_results_list)\n        results = results + (roi_outs)\n\n        # semantic_head\n        sem_outs = self.semantic_head.forward(x)\n        results = results + (sem_outs['seg_preds'], )\n\n        return results\n\n    def add_pred_to_datasample(self, data_samples: SampleList,\n                               results_list: List[PixelData]) -> SampleList:\n        \"\"\"Add predictions to `DetDataSample`.\n\n        Args:\n            data_samples (list[:obj:`DetDataSample`]): The\n                annotation data of every samples.\n            results_list (List[PixelData]): Panoptic segmentation results of\n                each image.\n\n        Returns:\n            List[:obj:`DetDataSample`]: Return the packed panoptic segmentation\n                results of input images. Each DetDataSample usually contains\n                'pred_panoptic_seg'. And the 'pred_panoptic_seg' has a key\n                ``sem_seg``, which is a tensor of shape (1, h, w).\n        \"\"\"\n\n        for data_sample, pred_panoptic_seg in zip(data_samples, results_list):\n            data_sample.pred_panoptic_seg = pred_panoptic_seg\n        return data_samples\n"
  },
  {
    "path": "mmdet/models/detectors/point_rend.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass PointRend(TwoStageDetector):\n    \"\"\"PointRend: Image Segmentation as Rendering\n\n    This detector is the implementation of\n    `PointRend <https://arxiv.org/abs/1912.08193>`_.\n\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigDict,\n                 rpn_head: ConfigDict,\n                 roi_head: ConfigDict,\n                 train_cfg: ConfigDict,\n                 test_cfg: ConfigDict,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            data_preprocessor=data_preprocessor)\n"
  },
  {
    "path": "mmdet/models/detectors/queryinst.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .sparse_rcnn import SparseRCNN\n\n\n@MODELS.register_module()\nclass QueryInst(SparseRCNN):\n    r\"\"\"Implementation of\n    `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 rpn_head: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/reppoints_detector.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass RepPointsDetector(SingleStageDetector):\n    \"\"\"RepPoints: Point Set Representation for Object Detection.\n\n        This detector is the implementation of:\n        - RepPoints detector (https://arxiv.org/pdf/1904.11490)\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/retinanet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass RetinaNet(SingleStageDetector):\n    \"\"\"Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/rpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass RPN(SingleStageDetector):\n    \"\"\"Implementation of Region Proposal Network.\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 rpn_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        super(SingleStageDetector, self).__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        self.backbone = MODELS.build(backbone)\n        self.neck = MODELS.build(neck) if neck is not None else None\n        rpn_train_cfg = train_cfg['rpn'] if train_cfg is not None else None\n        rpn_head_num_classes = rpn_head.get('num_classes', 1)\n        if rpn_head_num_classes != 1:\n            warnings.warn('The `num_classes` should be 1 in RPN, but get '\n                          f'{rpn_head_num_classes}, please set '\n                          'rpn_head.num_classes = 1 in your config file.')\n            rpn_head.update(num_classes=1)\n        rpn_head.update(train_cfg=rpn_train_cfg)\n        rpn_head.update(test_cfg=test_cfg['rpn'])\n        self.bbox_head = MODELS.build(rpn_head)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n\n        # set cat_id of gt_labels to 0 in RPN\n        rpn_data_samples = copy.deepcopy(batch_data_samples)\n        for data_sample in rpn_data_samples:\n            data_sample.gt_instances.labels = \\\n                torch.zeros_like(data_sample.gt_instances.labels)\n\n        losses = self.bbox_head.loss(x, rpn_data_samples)\n        return losses\n"
  },
  {
    "path": "mmdet/models/detectors/rtmdet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmengine.dist import get_world_size\nfrom mmengine.logging import print_log\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass RTMDet(SingleStageDetector):\n    \"\"\"Implementation of RTMDet.\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of ATSS. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of ATSS. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n        use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 use_syncbn: bool = True) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n\n        # TODO： Waiting for mmengine support\n        if use_syncbn and get_world_size() > 1:\n            torch.nn.SyncBatchNorm.convert_sync_batchnorm(self)\n            print_log('Using SyncBatchNorm()', 'current')\n"
  },
  {
    "path": "mmdet/models/detectors/scnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom .cascade_rcnn import CascadeRCNN\n\n\n@MODELS.register_module()\nclass SCNet(CascadeRCNN):\n    \"\"\"Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_\"\"\"\n\n    def __init__(self, **kwargs) -> None:\n        super().__init__(**kwargs)\n"
  },
  {
    "path": "mmdet/models/detectors/semi_base.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmdet.models.utils import (filter_gt_instances, rename_loss_dict,\n                                reweight_loss_dict)\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox_project\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .base import BaseDetector\n\n\n@MODELS.register_module()\nclass SemiBaseDetector(BaseDetector):\n    \"\"\"Base class for semi-supervised detectors.\n\n    Semi-supervised detectors typically consisting of a teacher model\n    updated by exponential moving average and a student model updated\n    by gradient descent.\n\n    Args:\n        detector (:obj:`ConfigDict` or dict): The detector config.\n        semi_train_cfg (:obj:`ConfigDict` or dict, optional):\n            The semi-supervised training config.\n        semi_test_cfg (:obj:`ConfigDict` or dict, optional):\n            The semi-supervised testing config.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 detector: ConfigType,\n                 semi_train_cfg: OptConfigType = None,\n                 semi_test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        self.student = MODELS.build(detector)\n        self.teacher = MODELS.build(detector)\n        self.semi_train_cfg = semi_train_cfg\n        self.semi_test_cfg = semi_test_cfg\n        if self.semi_train_cfg.get('freeze_teacher', True) is True:\n            self.freeze(self.teacher)\n\n    @staticmethod\n    def freeze(model: nn.Module):\n        \"\"\"Freeze the model.\"\"\"\n        model.eval()\n        for param in model.parameters():\n            param.requires_grad = False\n\n    def loss(self, multi_batch_inputs: Dict[str, Tensor],\n             multi_batch_data_samples: Dict[str, SampleList]) -> dict:\n        \"\"\"Calculate losses from multi-branch inputs and data samples.\n\n        Args:\n            multi_batch_inputs (Dict[str, Tensor]): The dict of multi-branch\n                input images, each value with shape (N, C, H, W).\n                Each value should usually be mean centered and std scaled.\n            multi_batch_data_samples (Dict[str, List[:obj:`DetDataSample`]]):\n                The dict of multi-branch data samples.\n\n        Returns:\n            dict: A dictionary of loss components\n        \"\"\"\n        losses = dict()\n        losses.update(**self.loss_by_gt_instances(\n            multi_batch_inputs['sup'], multi_batch_data_samples['sup']))\n\n        origin_pseudo_data_samples, batch_info = self.get_pseudo_instances(\n            multi_batch_inputs['unsup_teacher'],\n            multi_batch_data_samples['unsup_teacher'])\n        multi_batch_data_samples[\n            'unsup_student'] = self.project_pseudo_instances(\n                origin_pseudo_data_samples,\n                multi_batch_data_samples['unsup_student'])\n        losses.update(**self.loss_by_pseudo_instances(\n            multi_batch_inputs['unsup_student'],\n            multi_batch_data_samples['unsup_student'], batch_info))\n        return losses\n\n    def loss_by_gt_instances(self, batch_inputs: Tensor,\n                             batch_data_samples: SampleList) -> dict:\n        \"\"\"Calculate losses from a batch of inputs and ground-truth data\n        samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components\n        \"\"\"\n\n        losses = self.student.loss(batch_inputs, batch_data_samples)\n        sup_weight = self.semi_train_cfg.get('sup_weight', 1.)\n        return rename_loss_dict('sup_', reweight_loss_dict(losses, sup_weight))\n\n    def loss_by_pseudo_instances(self,\n                                 batch_inputs: Tensor,\n                                 batch_data_samples: SampleList,\n                                 batch_info: Optional[dict] = None) -> dict:\n        \"\"\"Calculate losses from a batch of inputs and pseudo data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n                which are `pseudo_instance` or `pseudo_panoptic_seg`\n                or `pseudo_sem_seg` in fact.\n            batch_info (dict): Batch information of teacher model\n                forward propagation process. Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components\n        \"\"\"\n        batch_data_samples = filter_gt_instances(\n            batch_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)\n        losses = self.student.loss(batch_inputs, batch_data_samples)\n        pseudo_instances_num = sum([\n            len(data_samples.gt_instances)\n            for data_samples in batch_data_samples\n        ])\n        unsup_weight = self.semi_train_cfg.get(\n            'unsup_weight', 1.) if pseudo_instances_num > 0 else 0.\n        return rename_loss_dict('unsup_',\n                                reweight_loss_dict(losses, unsup_weight))\n\n    @torch.no_grad()\n    def get_pseudo_instances(\n            self, batch_inputs: Tensor, batch_data_samples: SampleList\n    ) -> Tuple[SampleList, Optional[dict]]:\n        \"\"\"Get pseudo instances from teacher model.\"\"\"\n        self.teacher.eval()\n        results_list = self.teacher.predict(\n            batch_inputs, batch_data_samples, rescale=False)\n        batch_info = {}\n        for data_samples, results in zip(batch_data_samples, results_list):\n            data_samples.gt_instances = results.pred_instances\n            data_samples.gt_instances.bboxes = bbox_project(\n                data_samples.gt_instances.bboxes,\n                torch.from_numpy(data_samples.homography_matrix).inverse().to(\n                    self.data_preprocessor.device), data_samples.ori_shape)\n        return batch_data_samples, batch_info\n\n    def project_pseudo_instances(self, batch_pseudo_instances: SampleList,\n                                 batch_data_samples: SampleList) -> SampleList:\n        \"\"\"Project pseudo instances.\"\"\"\n        for pseudo_instances, data_samples in zip(batch_pseudo_instances,\n                                                  batch_data_samples):\n            data_samples.gt_instances = copy.deepcopy(\n                pseudo_instances.gt_instances)\n            data_samples.gt_instances.bboxes = bbox_project(\n                data_samples.gt_instances.bboxes,\n                torch.tensor(data_samples.homography_matrix).to(\n                    self.data_preprocessor.device), data_samples.img_shape)\n        wh_thr = self.semi_train_cfg.get('min_pseudo_bbox_wh', (1e-2, 1e-2))\n        return filter_gt_instances(batch_data_samples, wh_thr=wh_thr)\n\n    def predict(self, batch_inputs: Tensor,\n                batch_data_samples: SampleList) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Return the detection results of the\n            input images. The returns value is DetDataSample,\n            which usually contain 'pred_instances'. And the\n            ``pred_instances`` usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        if self.semi_test_cfg.get('predict_on', 'teacher') == 'teacher':\n            return self.teacher(\n                batch_inputs, batch_data_samples, mode='predict')\n        else:\n            return self.student(\n                batch_inputs, batch_data_samples, mode='predict')\n\n    def _forward(self, batch_inputs: Tensor,\n                 batch_data_samples: SampleList) -> SampleList:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n\n        Returns:\n            tuple: A tuple of features from ``rpn_head`` and ``roi_head``\n            forward.\n        \"\"\"\n        if self.semi_test_cfg.get('forward_on', 'teacher') == 'teacher':\n            return self.teacher(\n                batch_inputs, batch_data_samples, mode='tensor')\n        else:\n            return self.student(\n                batch_inputs, batch_data_samples, mode='tensor')\n\n    def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n        \"\"\"Extract features.\n\n        Args:\n            batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).\n\n        Returns:\n            tuple[Tensor]: Multi-level features that may have\n            different resolutions.\n        \"\"\"\n        if self.semi_test_cfg.get('extract_feat_on', 'teacher') == 'teacher':\n            return self.teacher.extract_feat(batch_inputs)\n        else:\n            return self.student.extract_feat(batch_inputs)\n\n    def _load_from_state_dict(self, state_dict: dict, prefix: str,\n                              local_metadata: dict, strict: bool,\n                              missing_keys: Union[List[str], str],\n                              unexpected_keys: Union[List[str], str],\n                              error_msgs: Union[List[str], str]) -> None:\n        \"\"\"Add teacher and student prefixes to model parameter names.\"\"\"\n        if not any([\n                'student' in key or 'teacher' in key\n                for key in state_dict.keys()\n        ]):\n            keys = list(state_dict.keys())\n            state_dict.update({'teacher.' + k: state_dict[k] for k in keys})\n            state_dict.update({'student.' + k: state_dict[k] for k in keys})\n            for k in keys:\n                state_dict.pop(k)\n        return super()._load_from_state_dict(\n            state_dict,\n            prefix,\n            local_metadata,\n            strict,\n            missing_keys,\n            unexpected_keys,\n            error_msgs,\n        )\n"
  },
  {
    "path": "mmdet/models/detectors/single_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList, SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .base import BaseDetector\n\n\n@MODELS.register_module()\nclass SingleStageDetector(BaseDetector):\n    \"\"\"Base class for single-stage detectors.\n\n    Single-stage detectors directly and densely predict bounding boxes on the\n    output features of the backbone+neck.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 bbox_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        self.backbone = MODELS.build(backbone)\n        if neck is not None:\n            self.neck = MODELS.build(neck)\n        bbox_head.update(train_cfg=train_cfg)\n        bbox_head.update(test_cfg=test_cfg)\n        self.bbox_head = MODELS.build(bbox_head)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def _load_from_state_dict(self, state_dict: dict, prefix: str,\n                              local_metadata: dict, strict: bool,\n                              missing_keys: Union[List[str], str],\n                              unexpected_keys: Union[List[str], str],\n                              error_msgs: Union[List[str], str]) -> None:\n        \"\"\"Exchange bbox_head key to rpn_head key when loading two-stage\n        weights into single-stage model.\"\"\"\n        bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'\n        bbox_head_keys = [\n            k for k in state_dict.keys() if k.startswith(bbox_head_prefix)\n        ]\n        rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'\n        rpn_head_keys = [\n            k for k in state_dict.keys() if k.startswith(rpn_head_prefix)\n        ]\n        if len(bbox_head_keys) == 0 and len(rpn_head_keys) != 0:\n            for rpn_head_key in rpn_head_keys:\n                bbox_head_key = bbox_head_prefix + \\\n                                rpn_head_key[len(rpn_head_prefix):]\n                state_dict[bbox_head_key] = state_dict.pop(rpn_head_key)\n        super()._load_from_state_dict(state_dict, prefix, local_metadata,\n                                      strict, missing_keys, unexpected_keys,\n                                      error_msgs)\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        losses = self.bbox_head.loss(x, batch_data_samples)\n        return losses\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances'. And the ``pred_instances`` usually\n            contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        results_list = self.bbox_head.predict(\n            x, batch_data_samples, rescale=rescale)\n        batch_data_samples = self.add_pred_to_datasample(\n            batch_data_samples, results_list)\n        return batch_data_samples\n\n    def _forward(\n            self,\n            batch_inputs: Tensor,\n            batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n         Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns:\n            tuple[list]: A tuple of features from ``bbox_head`` forward.\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        results = self.bbox_head.forward(x)\n        return results\n\n    def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n        \"\"\"Extract features.\n\n        Args:\n            batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).\n\n        Returns:\n            tuple[Tensor]: Multi-level features that may have\n            different resolutions.\n        \"\"\"\n        x = self.backbone(batch_inputs)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n"
  },
  {
    "path": "mmdet/models/detectors/single_stage_instance_seg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import Tuple\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList, SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .base import BaseDetector\n\nINF = 1e8\n\n\n@MODELS.register_module()\nclass SingleStageInstanceSegmentor(BaseDetector):\n    \"\"\"Base class for single-stage instance segmentors.\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 bbox_head: OptConfigType = None,\n                 mask_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        self.backbone = MODELS.build(backbone)\n        if neck is not None:\n            self.neck = MODELS.build(neck)\n        else:\n            self.neck = None\n        if bbox_head is not None:\n            bbox_head.update(train_cfg=copy.deepcopy(train_cfg))\n            bbox_head.update(test_cfg=copy.deepcopy(test_cfg))\n            self.bbox_head = MODELS.build(bbox_head)\n        else:\n            self.bbox_head = None\n\n        assert mask_head, f'`mask_head` must ' \\\n                          f'be implemented in {self.__class__.__name__}'\n        mask_head.update(train_cfg=copy.deepcopy(train_cfg))\n        mask_head.update(test_cfg=copy.deepcopy(test_cfg))\n        self.mask_head = MODELS.build(mask_head)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n        \"\"\"Extract features.\n\n        Args:\n            batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).\n\n        Returns:\n            tuple[Tensor]: Multi-level features that may have different\n            resolutions.\n        \"\"\"\n        x = self.backbone(batch_inputs)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    def _forward(self,\n                 batch_inputs: Tensor,\n                 batch_data_samples: OptSampleList = None,\n                 **kwargs) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n         Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n\n        Returns:\n            tuple: A tuple of features from ``bbox_head`` forward.\n        \"\"\"\n        outs = ()\n        # backbone\n        x = self.extract_feat(batch_inputs)\n        # bbox_head\n        positive_infos = None\n        if self.with_bbox:\n            assert batch_data_samples is not None\n            bbox_outs = self.bbox_head.forward(x)\n            outs = outs + (bbox_outs, )\n            # It is necessary to use `bbox_head.loss` to update\n            # `_raw_positive_infos` which will be used in `get_positive_infos`\n            # positive_infos will be used in the following mask head.\n            _ = self.bbox_head.loss(x, batch_data_samples, **kwargs)\n            positive_infos = self.bbox_head.get_positive_infos()\n        # mask_head\n        if positive_infos is None:\n            mask_outs = self.mask_head.forward(x)\n        else:\n            mask_outs = self.mask_head.forward(x, positive_infos)\n        outs = outs + (mask_outs, )\n        return outs\n\n    def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList,\n             **kwargs) -> dict:\n        \"\"\"\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        losses = dict()\n\n        positive_infos = None\n        # CondInst and YOLACT have bbox_head\n        if self.with_bbox:\n            bbox_losses = self.bbox_head.loss(x, batch_data_samples, **kwargs)\n            losses.update(bbox_losses)\n            # get positive information from bbox head, which will be used\n            # in the following mask head.\n            positive_infos = self.bbox_head.get_positive_infos()\n\n        mask_loss = self.mask_head.loss(\n            x, batch_data_samples, positive_infos=positive_infos, **kwargs)\n        # avoid loss override\n        assert not set(mask_loss.keys()) & set(losses.keys())\n\n        losses.update(mask_loss)\n        return losses\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True,\n                **kwargs) -> SampleList:\n        \"\"\"Perform forward propagation of the mask head and predict mask\n        results on the features of the upstream network.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances'. And the ``pred_instances`` usually\n            contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n                (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n                (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n                the last dimension 4 arrange as (x1, y1, x2, y2).\n            - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n        if self.with_bbox:\n            # the bbox branch does not need to be scaled to the original\n            # image scale, because the mask branch will scale both bbox\n            # and mask at the same time.\n            bbox_rescale = rescale if not self.with_mask else False\n            results_list = self.bbox_head.predict(\n                x, batch_data_samples, rescale=bbox_rescale)\n        else:\n            results_list = None\n\n        results_list = self.mask_head.predict(\n            x, batch_data_samples, rescale=rescale, results_list=results_list)\n\n        batch_data_samples = self.add_pred_to_datasample(\n            batch_data_samples, results_list)\n        return batch_data_samples\n"
  },
  {
    "path": "mmdet/models/detectors/soft_teacher.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.utils import (filter_gt_instances, rename_loss_dict,\n                                reweight_loss_dict)\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi, bbox_project\nfrom mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig\nfrom ..utils.misc import unpack_gt_instances\nfrom .semi_base import SemiBaseDetector\n\n\n@MODELS.register_module()\nclass SoftTeacher(SemiBaseDetector):\n    r\"\"\"Implementation of `End-to-End Semi-Supervised Object Detection\n    with Soft Teacher <https://arxiv.org/abs/2106.09018>`_\n\n    Args:\n        detector (:obj:`ConfigDict` or dict): The detector config.\n        semi_train_cfg (:obj:`ConfigDict` or dict, optional):\n            The semi-supervised training config.\n        semi_test_cfg (:obj:`ConfigDict` or dict, optional):\n            The semi-supervised testing config.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 detector: ConfigType,\n                 semi_train_cfg: OptConfigType = None,\n                 semi_test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            detector=detector,\n            semi_train_cfg=semi_train_cfg,\n            semi_test_cfg=semi_test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n\n    def loss_by_pseudo_instances(self,\n                                 batch_inputs: Tensor,\n                                 batch_data_samples: SampleList,\n                                 batch_info: Optional[dict] = None) -> dict:\n        \"\"\"Calculate losses from a batch of inputs and pseudo data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n                which are `pseudo_instance` or `pseudo_panoptic_seg`\n                or `pseudo_sem_seg` in fact.\n            batch_info (dict): Batch information of teacher model\n                forward propagation process. Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss components\n        \"\"\"\n\n        x = self.student.extract_feat(batch_inputs)\n\n        losses = {}\n        rpn_losses, rpn_results_list = self.rpn_loss_by_pseudo_instances(\n            x, batch_data_samples)\n        losses.update(**rpn_losses)\n        losses.update(**self.rcnn_cls_loss_by_pseudo_instances(\n            x, rpn_results_list, batch_data_samples, batch_info))\n        losses.update(**self.rcnn_reg_loss_by_pseudo_instances(\n            x, rpn_results_list, batch_data_samples))\n        unsup_weight = self.semi_train_cfg.get('unsup_weight', 1.)\n        return rename_loss_dict('unsup_',\n                                reweight_loss_dict(losses, unsup_weight))\n\n    @torch.no_grad()\n    def get_pseudo_instances(\n            self, batch_inputs: Tensor, batch_data_samples: SampleList\n    ) -> Tuple[SampleList, Optional[dict]]:\n        \"\"\"Get pseudo instances from teacher model.\"\"\"\n        assert self.teacher.with_bbox, 'Bbox head must be implemented.'\n        x = self.teacher.extract_feat(batch_inputs)\n\n        # If there are no pre-defined proposals, use RPN to get proposals\n        if batch_data_samples[0].get('proposals', None) is None:\n            rpn_results_list = self.teacher.rpn_head.predict(\n                x, batch_data_samples, rescale=False)\n        else:\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n\n        results_list = self.teacher.roi_head.predict(\n            x, rpn_results_list, batch_data_samples, rescale=False)\n\n        for data_samples, results in zip(batch_data_samples, results_list):\n            data_samples.gt_instances = results\n\n        batch_data_samples = filter_gt_instances(\n            batch_data_samples,\n            score_thr=self.semi_train_cfg.pseudo_label_initial_score_thr)\n\n        reg_uncs_list = self.compute_uncertainty_with_aug(\n            x, batch_data_samples)\n\n        for data_samples, reg_uncs in zip(batch_data_samples, reg_uncs_list):\n            data_samples.gt_instances['reg_uncs'] = reg_uncs\n            data_samples.gt_instances.bboxes = bbox_project(\n                data_samples.gt_instances.bboxes,\n                torch.from_numpy(data_samples.homography_matrix).inverse().to(\n                    self.data_preprocessor.device), data_samples.ori_shape)\n\n        batch_info = {\n            'feat': x,\n            'img_shape': [],\n            'homography_matrix': [],\n            'metainfo': []\n        }\n        for data_samples in batch_data_samples:\n            batch_info['img_shape'].append(data_samples.img_shape)\n            batch_info['homography_matrix'].append(\n                torch.from_numpy(data_samples.homography_matrix).to(\n                    self.data_preprocessor.device))\n            batch_info['metainfo'].append(data_samples.metainfo)\n        return batch_data_samples, batch_info\n\n    def rpn_loss_by_pseudo_instances(self, x: Tuple[Tensor],\n                                     batch_data_samples: SampleList) -> dict:\n        \"\"\"Calculate rpn loss from a batch of inputs and pseudo data samples.\n\n        Args:\n            x (tuple[Tensor]): Features from FPN.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n                which are `pseudo_instance` or `pseudo_panoptic_seg`\n                or `pseudo_sem_seg` in fact.\n        Returns:\n            dict: A dictionary of rpn loss components\n        \"\"\"\n\n        rpn_data_samples = copy.deepcopy(batch_data_samples)\n        rpn_data_samples = filter_gt_instances(\n            rpn_data_samples, score_thr=self.semi_train_cfg.rpn_pseudo_thr)\n        proposal_cfg = self.student.train_cfg.get('rpn_proposal',\n                                                  self.student.test_cfg.rpn)\n        # set cat_id of gt_labels to 0 in RPN\n        for data_sample in rpn_data_samples:\n            data_sample.gt_instances.labels = \\\n                torch.zeros_like(data_sample.gt_instances.labels)\n\n        rpn_losses, rpn_results_list = self.student.rpn_head.loss_and_predict(\n            x, rpn_data_samples, proposal_cfg=proposal_cfg)\n        for key in rpn_losses.keys():\n            if 'loss' in key and 'rpn' not in key:\n                rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)\n        return rpn_losses, rpn_results_list\n\n    def rcnn_cls_loss_by_pseudo_instances(self, x: Tuple[Tensor],\n                                          unsup_rpn_results_list: InstanceList,\n                                          batch_data_samples: SampleList,\n                                          batch_info: dict) -> dict:\n        \"\"\"Calculate classification loss from a batch of inputs and pseudo data\n        samples.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            unsup_rpn_results_list (list[:obj:`InstanceData`]):\n                List of region proposals.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n                which are `pseudo_instance` or `pseudo_panoptic_seg`\n                or `pseudo_sem_seg` in fact.\n            batch_info (dict): Batch information of teacher model\n                forward propagation process.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of rcnn\n                classification loss components\n        \"\"\"\n        rpn_results_list = copy.deepcopy(unsup_rpn_results_list)\n        cls_data_samples = copy.deepcopy(batch_data_samples)\n        cls_data_samples = filter_gt_instances(\n            cls_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)\n\n        outputs = unpack_gt_instances(cls_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, _ = outputs\n\n        # assign gts and sample proposals\n        num_imgs = len(cls_data_samples)\n        sampling_results = []\n        for i in range(num_imgs):\n            # rename rpn_results.bboxes to rpn_results.priors\n            rpn_results = rpn_results_list[i]\n            rpn_results.priors = rpn_results.pop('bboxes')\n            assign_result = self.student.roi_head.bbox_assigner.assign(\n                rpn_results, batch_gt_instances[i],\n                batch_gt_instances_ignore[i])\n            sampling_result = self.student.roi_head.bbox_sampler.sample(\n                assign_result,\n                rpn_results,\n                batch_gt_instances[i],\n                feats=[lvl_feat[i][None] for lvl_feat in x])\n            sampling_results.append(sampling_result)\n\n        selected_bboxes = [res.priors for res in sampling_results]\n        rois = bbox2roi(selected_bboxes)\n        bbox_results = self.student.roi_head._bbox_forward(x, rois)\n        # cls_reg_targets is a tuple of labels, label_weights,\n        # and bbox_targets, bbox_weights\n        cls_reg_targets = self.student.roi_head.bbox_head.get_targets(\n            sampling_results, self.student.train_cfg.rcnn)\n\n        selected_results_list = []\n        for bboxes, data_samples, teacher_matrix, teacher_img_shape in zip(\n                selected_bboxes, batch_data_samples,\n                batch_info['homography_matrix'], batch_info['img_shape']):\n            student_matrix = torch.tensor(\n                data_samples.homography_matrix, device=teacher_matrix.device)\n            homography_matrix = teacher_matrix @ student_matrix.inverse()\n            projected_bboxes = bbox_project(bboxes, homography_matrix,\n                                            teacher_img_shape)\n            selected_results_list.append(InstanceData(bboxes=projected_bboxes))\n\n        with torch.no_grad():\n            results_list = self.teacher.roi_head.predict_bbox(\n                batch_info['feat'],\n                batch_info['metainfo'],\n                selected_results_list,\n                rcnn_test_cfg=None,\n                rescale=False)\n            bg_score = torch.cat(\n                [results.scores[:, -1] for results in results_list])\n            # cls_reg_targets[0] is labels\n            neg_inds = cls_reg_targets[\n                0] == self.student.roi_head.bbox_head.num_classes\n            # cls_reg_targets[1] is label_weights\n            cls_reg_targets[1][neg_inds] = bg_score[neg_inds].detach()\n\n        losses = self.student.roi_head.bbox_head.loss(\n            bbox_results['cls_score'], bbox_results['bbox_pred'], rois,\n            *cls_reg_targets)\n        # cls_reg_targets[1] is label_weights\n        losses['loss_cls'] = losses['loss_cls'] * len(\n            cls_reg_targets[1]) / max(sum(cls_reg_targets[1]), 1.0)\n        return losses\n\n    def rcnn_reg_loss_by_pseudo_instances(\n            self, x: Tuple[Tensor], unsup_rpn_results_list: InstanceList,\n            batch_data_samples: SampleList) -> dict:\n        \"\"\"Calculate rcnn regression loss from a batch of inputs and pseudo\n        data samples.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            unsup_rpn_results_list (list[:obj:`InstanceData`]):\n                List of region proposals.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n                which are `pseudo_instance` or `pseudo_panoptic_seg`\n                or `pseudo_sem_seg` in fact.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of rcnn\n                regression loss components\n        \"\"\"\n        rpn_results_list = copy.deepcopy(unsup_rpn_results_list)\n        reg_data_samples = copy.deepcopy(batch_data_samples)\n        for data_samples in reg_data_samples:\n            if data_samples.gt_instances.bboxes.shape[0] > 0:\n                data_samples.gt_instances = data_samples.gt_instances[\n                    data_samples.gt_instances.reg_uncs <\n                    self.semi_train_cfg.reg_pseudo_thr]\n        roi_losses = self.student.roi_head.loss(x, rpn_results_list,\n                                                reg_data_samples)\n        return {'loss_bbox': roi_losses['loss_bbox']}\n\n    def compute_uncertainty_with_aug(\n            self, x: Tuple[Tensor],\n            batch_data_samples: SampleList) -> List[Tensor]:\n        \"\"\"Compute uncertainty with augmented bboxes.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n                which are `pseudo_instance` or `pseudo_panoptic_seg`\n                or `pseudo_sem_seg` in fact.\n\n        Returns:\n            list[Tensor]: A list of uncertainty for pseudo bboxes.\n        \"\"\"\n        auged_results_list = self.aug_box(batch_data_samples,\n                                          self.semi_train_cfg.jitter_times,\n                                          self.semi_train_cfg.jitter_scale)\n        # flatten\n        auged_results_list = [\n            InstanceData(bboxes=auged.reshape(-1, auged.shape[-1]))\n            for auged in auged_results_list\n        ]\n\n        self.teacher.roi_head.test_cfg = None\n        results_list = self.teacher.roi_head.predict(\n            x, auged_results_list, batch_data_samples, rescale=False)\n        self.teacher.roi_head.test_cfg = self.teacher.test_cfg.rcnn\n\n        reg_channel = max(\n            [results.bboxes.shape[-1] for results in results_list]) // 4\n        bboxes = [\n            results.bboxes.reshape(self.semi_train_cfg.jitter_times, -1,\n                                   results.bboxes.shape[-1])\n            if results.bboxes.numel() > 0 else results.bboxes.new_zeros(\n                self.semi_train_cfg.jitter_times, 0, 4 * reg_channel).float()\n            for results in results_list\n        ]\n\n        box_unc = [bbox.std(dim=0) for bbox in bboxes]\n        bboxes = [bbox.mean(dim=0) for bbox in bboxes]\n        labels = [\n            data_samples.gt_instances.labels\n            for data_samples in batch_data_samples\n        ]\n        if reg_channel != 1:\n            bboxes = [\n                bbox.reshape(bbox.shape[0], reg_channel,\n                             4)[torch.arange(bbox.shape[0]), label]\n                for bbox, label in zip(bboxes, labels)\n            ]\n            box_unc = [\n                unc.reshape(unc.shape[0], reg_channel,\n                            4)[torch.arange(unc.shape[0]), label]\n                for unc, label in zip(box_unc, labels)\n            ]\n\n        box_shape = [(bbox[:, 2:4] - bbox[:, :2]).clamp(min=1.0)\n                     for bbox in bboxes]\n        box_unc = [\n            torch.mean(\n                unc / wh[:, None, :].expand(-1, 2, 2).reshape(-1, 4), dim=-1)\n            if wh.numel() > 0 else unc for unc, wh in zip(box_unc, box_shape)\n        ]\n        return box_unc\n\n    @staticmethod\n    def aug_box(batch_data_samples, times, frac):\n        \"\"\"Augment bboxes with jitter.\"\"\"\n\n        def _aug_single(box):\n            box_scale = box[:, 2:4] - box[:, :2]\n            box_scale = (\n                box_scale.clamp(min=1)[:, None, :].expand(-1, 2,\n                                                          2).reshape(-1, 4))\n            aug_scale = box_scale * frac  # [n,4]\n\n            offset = (\n                torch.randn(times, box.shape[0], 4, device=box.device) *\n                aug_scale[None, ...])\n            new_box = box.clone()[None, ...].expand(times, box.shape[0],\n                                                    -1) + offset\n            return new_box\n\n        return [\n            _aug_single(data_samples.gt_instances.bboxes)\n            for data_samples in batch_data_samples\n        ]\n"
  },
  {
    "path": "mmdet/models/detectors/solo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@MODELS.register_module()\nclass SOLO(SingleStageInstanceSegmentor):\n    \"\"\"`SOLO: Segmenting Objects by Locations\n    <https://arxiv.org/abs/1912.04488>`_\n\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 bbox_head: OptConfigType = None,\n                 mask_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/solov2.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@MODELS.register_module()\nclass SOLOv2(SingleStageInstanceSegmentor):\n    \"\"\"`SOLOv2: Dynamic and Fast Instance Segmentation\n    <https://arxiv.org/abs/2003.10152>`_\n\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 bbox_head: OptConfigType = None,\n                 mask_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/sparse_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .two_stage import TwoStageDetector\n\n\n@MODELS.register_module()\nclass SparseRCNN(TwoStageDetector):\n    r\"\"\"Implementation of `Sparse R-CNN: End-to-End Object Detection with\n    Learnable Proposals <https://arxiv.org/abs/2011.12450>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 rpn_head: OptConfigType = None,\n                 roi_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n        assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \\\n            'do not support external proposals'\n"
  },
  {
    "path": "mmdet/models/detectors/tood.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass TOOD(SingleStageDetector):\n    r\"\"\"Implementation of `TOOD: Task-aligned One-stage Object Detection.\n    <https://arxiv.org/abs/2108.07755>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of TOOD. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of TOOD. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/trident_faster_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .faster_rcnn import FasterRCNN\n\n\n@MODELS.register_module()\nclass TridentFasterRCNN(FasterRCNN):\n    \"\"\"Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 rpn_head: ConfigType,\n                 roi_head: ConfigType,\n                 train_cfg: ConfigType,\n                 test_cfg: ConfigType,\n                 neck: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n        assert self.backbone.num_branch == self.roi_head.num_branch\n        assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx\n        self.num_branch = self.backbone.num_branch\n        self.test_branch_idx = self.backbone.test_branch_idx\n\n    def _forward(self, batch_inputs: Tensor,\n                 batch_data_samples: SampleList) -> tuple:\n        \"\"\"copy the ``batch_data_samples`` to fit multi-branch.\"\"\"\n        num_branch = self.num_branch \\\n            if self.training or self.test_branch_idx == -1 else 1\n        trident_data_samples = batch_data_samples * num_branch\n        return super()._forward(\n            batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"copy the ``batch_data_samples`` to fit multi-branch.\"\"\"\n        num_branch = self.num_branch \\\n            if self.training or self.test_branch_idx == -1 else 1\n        trident_data_samples = batch_data_samples * num_branch\n        return super().loss(\n            batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"copy the ``batch_data_samples`` to fit multi-branch.\"\"\"\n        num_branch = self.num_branch \\\n            if self.training or self.test_branch_idx == -1 else 1\n        trident_data_samples = batch_data_samples * num_branch\n        return super().predict(\n            batch_inputs=batch_inputs,\n            batch_data_samples=trident_data_samples,\n            rescale=rescale)\n\n    # TODO need to refactor\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n        x = self.extract_feats(imgs)\n        num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)\n        trident_img_metas = [img_metas * num_branch for img_metas in img_metas]\n        proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)\n        return self.roi_head.aug_test(\n            x, proposal_list, img_metas, rescale=rescale)\n"
  },
  {
    "path": "mmdet/models/detectors/two_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\nfrom typing import List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .base import BaseDetector\n\n\n@MODELS.register_module()\nclass TwoStageDetector(BaseDetector):\n    \"\"\"Base class for two-stage detectors.\n\n    Two-stage detectors typically consisting of a region proposal network and a\n    task-specific regression head.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: OptConfigType = None,\n                 rpn_head: OptConfigType = None,\n                 roi_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n        self.backbone = MODELS.build(backbone)\n\n        if neck is not None:\n            self.neck = MODELS.build(neck)\n\n        if rpn_head is not None:\n            rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n            rpn_head_ = rpn_head.copy()\n            rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n            rpn_head_num_classes = rpn_head_.get('num_classes', None)\n            if rpn_head_num_classes is None:\n                rpn_head_.update(num_classes=1)\n            else:\n                if rpn_head_num_classes != 1:\n                    warnings.warn(\n                        'The `num_classes` should be 1 in RPN, but get '\n                        f'{rpn_head_num_classes}, please set '\n                        'rpn_head.num_classes = 1 in your config file.')\n                    rpn_head_.update(num_classes=1)\n            self.rpn_head = MODELS.build(rpn_head_)\n\n        if roi_head is not None:\n            # update train and test cfg here for now\n            # TODO: refactor assigner & sampler\n            rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n            roi_head.update(train_cfg=rcnn_train_cfg)\n            roi_head.update(test_cfg=test_cfg.rcnn)\n            self.roi_head = MODELS.build(roi_head)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def _load_from_state_dict(self, state_dict: dict, prefix: str,\n                              local_metadata: dict, strict: bool,\n                              missing_keys: Union[List[str], str],\n                              unexpected_keys: Union[List[str], str],\n                              error_msgs: Union[List[str], str]) -> None:\n        \"\"\"Exchange bbox_head key to rpn_head key when loading single-stage\n        weights into two-stage model.\"\"\"\n        bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'\n        bbox_head_keys = [\n            k for k in state_dict.keys() if k.startswith(bbox_head_prefix)\n        ]\n        rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'\n        rpn_head_keys = [\n            k for k in state_dict.keys() if k.startswith(rpn_head_prefix)\n        ]\n        if len(bbox_head_keys) != 0 and len(rpn_head_keys) == 0:\n            for bbox_head_key in bbox_head_keys:\n                rpn_head_key = rpn_head_prefix + \\\n                               bbox_head_key[len(bbox_head_prefix):]\n                state_dict[rpn_head_key] = state_dict.pop(bbox_head_key)\n        super()._load_from_state_dict(state_dict, prefix, local_metadata,\n                                      strict, missing_keys, unexpected_keys,\n                                      error_msgs)\n\n    @property\n    def with_rpn(self) -> bool:\n        \"\"\"bool: whether the detector has RPN\"\"\"\n        return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n    @property\n    def with_roi_head(self) -> bool:\n        \"\"\"bool: whether the detector has a RoI head\"\"\"\n        return hasattr(self, 'roi_head') and self.roi_head is not None\n\n    def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n        \"\"\"Extract features.\n\n        Args:\n            batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).\n\n        Returns:\n            tuple[Tensor]: Multi-level features that may have\n            different resolutions.\n        \"\"\"\n        x = self.backbone(batch_inputs)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    def _forward(self, batch_inputs: Tensor,\n                 batch_data_samples: SampleList) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns:\n            tuple: A tuple of features from ``rpn_head`` and ``roi_head``\n            forward.\n        \"\"\"\n        results = ()\n        x = self.extract_feat(batch_inputs)\n\n        if self.with_rpn:\n            rpn_results_list = self.rpn_head.predict(\n                x, batch_data_samples, rescale=False)\n        else:\n            assert batch_data_samples[0].get('proposals', None) is not None\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n        roi_outs = self.roi_head.forward(x, rpn_results_list,\n                                         batch_data_samples)\n        results = results + (roi_outs, )\n        return results\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (List[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components\n        \"\"\"\n        x = self.extract_feat(batch_inputs)\n\n        losses = dict()\n\n        # RPN forward and loss\n        if self.with_rpn:\n            proposal_cfg = self.train_cfg.get('rpn_proposal',\n                                              self.test_cfg.rpn)\n            rpn_data_samples = copy.deepcopy(batch_data_samples)\n            # set cat_id of gt_labels to 0 in RPN\n            for data_sample in rpn_data_samples:\n                data_sample.gt_instances.labels = \\\n                    torch.zeros_like(data_sample.gt_instances.labels)\n\n            rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict(\n                x, rpn_data_samples, proposal_cfg=proposal_cfg)\n            # avoid get same name with roi_head loss\n            keys = rpn_losses.keys()\n            for key in list(keys):\n                if 'loss' in key and 'rpn' not in key:\n                    rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)\n            losses.update(rpn_losses)\n        else:\n            assert batch_data_samples[0].get('proposals', None) is not None\n            # use pre-defined proposals in InstanceData for the second stage\n            # to extract ROI features.\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n\n        roi_losses = self.roi_head.loss(x, rpn_results_list,\n                                        batch_data_samples)\n        losses.update(roi_losses)\n\n        return losses\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Return the detection results of the\n            input images. The returns value is DetDataSample,\n            which usually contain 'pred_instances'. And the\n            ``pred_instances`` usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        x = self.extract_feat(batch_inputs)\n\n        # If there are no pre-defined proposals, use RPN to get proposals\n        if batch_data_samples[0].get('proposals', None) is None:\n            rpn_results_list = self.rpn_head.predict(\n                x, batch_data_samples, rescale=False)\n        else:\n            rpn_results_list = [\n                data_sample.proposals for data_sample in batch_data_samples\n            ]\n\n        results_list = self.roi_head.predict(\n            x, rpn_results_list, batch_data_samples, rescale=rescale)\n\n        batch_data_samples = self.add_pred_to_datasample(\n            batch_data_samples, results_list)\n        return batch_data_samples\n"
  },
  {
    "path": "mmdet/models/detectors/vfnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass VFNet(SingleStageDetector):\n    \"\"\"Implementation of `VarifocalNet\n    (VFNet).<https://arxiv.org/abs/2008.13367>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of VFNet. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of VFNet. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/yolact.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@MODELS.register_module()\nclass YOLACT(SingleStageInstanceSegmentor):\n    \"\"\"Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 mask_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/yolo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass YOLOV3(SingleStageDetector):\n    r\"\"\"Implementation of `Yolov3: An incremental improvement\n    <https://arxiv.org/abs/1804.02767>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of YOLOX. Default: None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of YOLOX. Default: None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional):\n            Model preprocessing config for processing the input data.\n            it usually includes ``to_rgb``, ``pad_size_divisor``,\n            ``pad_value``, ``mean`` and ``std``. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/yolof.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass YOLOF(SingleStageDetector):\n    r\"\"\"Implementation of `You Only Look One-level Feature\n    <https://arxiv.org/abs/2103.09460>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        neck (:obj:`ConfigDict` or dict): The neck module.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head module.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of YOLOF. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of YOLOF. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional):\n            Model preprocessing config for processing the input data.\n            it usually includes ``to_rgb``, ``pad_size_divisor``,\n            ``pad_value``, ``mean`` and ``std``. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/yolox.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .single_stage import SingleStageDetector\n\n\n@MODELS.register_module()\nclass YOLOX(SingleStageDetector):\n    r\"\"\"Implementation of `YOLOX: Exceeding YOLO Series in 2021\n    <https://arxiv.org/abs/2107.08430>`_\n\n    Args:\n        backbone (:obj:`ConfigDict` or dict): The backbone config.\n        neck (:obj:`ConfigDict` or dict): The neck config.\n        bbox_head (:obj:`ConfigDict` or dict): The bbox head config.\n        train_cfg (:obj:`ConfigDict` or dict, optional): The training config\n            of YOLOX. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of YOLOX. Defaults to None.\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/layers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .activations import SiLU\nfrom .bbox_nms import fast_nms, multiclass_nms\nfrom .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d\nfrom .conv_upsample import ConvUpsample\nfrom .csp_layer import CSPLayer\nfrom .dropblock import DropBlock\nfrom .ema import ExpMomentumEMA\nfrom .inverted_residual import InvertedResidual\nfrom .matrix_nms import mask_matrix_nms\nfrom .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder\nfrom .normed_predictor import NormedConv2d, NormedLinear\nfrom .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder\nfrom .positional_encoding import (LearnedPositionalEncoding,\n                                  SinePositionalEncoding)\nfrom .res_layer import ResLayer, SimplifiedBasicBlock\nfrom .se_layer import ChannelAttention, DyReLU, SELayer\n# yapf: disable\nfrom .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,\n                          ConditionalAttention,\n                          ConditionalDetrTransformerDecoder,\n                          ConditionalDetrTransformerDecoderLayer,\n                          DABDetrTransformerDecoder,\n                          DABDetrTransformerDecoderLayer,\n                          DABDetrTransformerEncoder,\n                          DeformableDetrTransformerDecoder,\n                          DeformableDetrTransformerDecoderLayer,\n                          DeformableDetrTransformerEncoder,\n                          DeformableDetrTransformerEncoderLayer,\n                          DetrTransformerDecoder, DetrTransformerDecoderLayer,\n                          DetrTransformerEncoder, DetrTransformerEncoderLayer,\n                          DinoTransformerDecoder, DynamicConv,\n                          Mask2FormerTransformerDecoder,\n                          Mask2FormerTransformerDecoderLayer,\n                          Mask2FormerTransformerEncoder, PatchEmbed,\n                          PatchMerging, coordinate_to_encoding,\n                          inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)\n\n# yapf: enable\n\n__all__ = [\n    'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',\n    'PixelDecoder', 'TransformerEncoderPixelDecoder',\n    'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',\n    'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',\n    'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',\n    'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',\n    'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',\n    'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',\n    'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',\n    'DetrTransformerEncoder', 'DetrTransformerDecoder',\n    'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',\n    'DeformableDetrTransformerEncoderLayer',\n    'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',\n    'coordinate_to_encoding', 'ConditionalAttention',\n    'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',\n    'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',\n    'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',\n    'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',\n    'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'\n]\n"
  },
  {
    "path": "mmdet/models/layers/activations.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmengine.utils import digit_version\n\nfrom mmdet.registry import MODELS\n\nif digit_version(torch.__version__) >= digit_version('1.7.0'):\n    from torch.nn import SiLU\nelse:\n\n    class SiLU(nn.Module):\n        \"\"\"Sigmoid Weighted Liner Unit.\"\"\"\n\n        def __init__(self, inplace=True):\n            super().__init__()\n\n        def forward(self, inputs) -> torch.Tensor:\n            return inputs * torch.sigmoid(inputs)\n\n\nMODELS.register_module(module=SiLU, name='SiLU')\n"
  },
  {
    "path": "mmdet/models/layers/bbox_nms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, Union\n\nimport torch\nfrom mmcv.ops.nms import batched_nms\nfrom torch import Tensor\n\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom mmdet.utils import ConfigType\n\n\ndef multiclass_nms(\n    multi_bboxes: Tensor,\n    multi_scores: Tensor,\n    score_thr: float,\n    nms_cfg: ConfigType,\n    max_num: int = -1,\n    score_factors: Optional[Tensor] = None,\n    return_inds: bool = False,\n    box_dim: int = 4\n) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:\n    \"\"\"NMS for multi-class bboxes.\n\n    Args:\n        multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n        multi_scores (Tensor): shape (n, #class), where the last column\n            contains scores of the background class, but this will be ignored.\n        score_thr (float): bbox threshold, bboxes with scores lower than it\n            will not be considered.\n        nms_cfg (Union[:obj:`ConfigDict`, dict]): a dict that contains\n            the arguments of nms operations.\n        max_num (int, optional): if there are more than max_num bboxes after\n            NMS, only top max_num will be kept. Default to -1.\n        score_factors (Tensor, optional): The factors multiplied to scores\n            before applying NMS. Default to None.\n        return_inds (bool, optional): Whether return the indices of kept\n            bboxes. Default to False.\n        box_dim (int): The dimension of boxes. Defaults to 4.\n\n    Returns:\n        Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:\n            (dets, labels, indices (optional)), tensors of shape (k, 5),\n            (k), and (k). Dets are boxes with scores. Labels are 0-based.\n    \"\"\"\n    num_classes = multi_scores.size(1) - 1\n    # exclude background category\n    if multi_bboxes.shape[1] > box_dim:\n        bboxes = multi_bboxes.view(multi_scores.size(0), -1, box_dim)\n    else:\n        bboxes = multi_bboxes[:, None].expand(\n            multi_scores.size(0), num_classes, box_dim)\n\n    scores = multi_scores[:, :-1]\n\n    labels = torch.arange(num_classes, dtype=torch.long, device=scores.device)\n    labels = labels.view(1, -1).expand_as(scores)\n\n    bboxes = bboxes.reshape(-1, box_dim)\n    scores = scores.reshape(-1)\n    labels = labels.reshape(-1)\n\n    if not torch.onnx.is_in_onnx_export():\n        # NonZero not supported  in TensorRT\n        # remove low scoring boxes\n        valid_mask = scores > score_thr\n    # multiply score_factor after threshold to preserve more bboxes, improve\n    # mAP by 1% for YOLOv3\n    if score_factors is not None:\n        # expand the shape to match original shape of score\n        score_factors = score_factors.view(-1, 1).expand(\n            multi_scores.size(0), num_classes)\n        score_factors = score_factors.reshape(-1)\n        scores = scores * score_factors\n\n    if not torch.onnx.is_in_onnx_export():\n        # NonZero not supported  in TensorRT\n        inds = valid_mask.nonzero(as_tuple=False).squeeze(1)\n        bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]\n    else:\n        # TensorRT NMS plugin has invalid output filled with -1\n        # add dummy data to make detection output correct.\n        bboxes = torch.cat([bboxes, bboxes.new_zeros(1, box_dim)], dim=0)\n        scores = torch.cat([scores, scores.new_zeros(1)], dim=0)\n        labels = torch.cat([labels, labels.new_zeros(1)], dim=0)\n\n    if bboxes.numel() == 0:\n        if torch.onnx.is_in_onnx_export():\n            raise RuntimeError('[ONNX Error] Can not record NMS '\n                               'as it has not been executed this time')\n        dets = torch.cat([bboxes, scores[:, None]], -1)\n        if return_inds:\n            return dets, labels, inds\n        else:\n            return dets, labels\n\n    dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)\n\n    if max_num > 0:\n        dets = dets[:max_num]\n        keep = keep[:max_num]\n\n    if return_inds:\n        return dets, labels[keep], inds[keep]\n    else:\n        return dets, labels[keep]\n\n\ndef fast_nms(\n    multi_bboxes: Tensor,\n    multi_scores: Tensor,\n    multi_coeffs: Tensor,\n    score_thr: float,\n    iou_thr: float,\n    top_k: int,\n    max_num: int = -1\n) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:\n    \"\"\"Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.\n\n    Fast NMS allows already-removed detections to suppress other detections so\n    that every instance can be decided to be kept or discarded in parallel,\n    which is not possible in traditional NMS. This relaxation allows us to\n    implement Fast NMS entirely in standard GPU-accelerated matrix operations.\n\n    Args:\n        multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n        multi_scores (Tensor): shape (n, #class+1), where the last column\n            contains scores of the background class, but this will be ignored.\n        multi_coeffs (Tensor): shape (n, #class*coeffs_dim).\n        score_thr (float): bbox threshold, bboxes with scores lower than it\n            will not be considered.\n        iou_thr (float): IoU threshold to be considered as conflicted.\n        top_k (int): if there are more than top_k bboxes before NMS,\n            only top top_k will be kept.\n        max_num (int): if there are more than max_num bboxes after NMS,\n            only top max_num will be kept. If -1, keep all the bboxes.\n            Default: -1.\n\n    Returns:\n        Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:\n            (dets, labels, coefficients), tensors of shape (k, 5), (k, 1),\n            and (k, coeffs_dim). Dets are boxes with scores.\n            Labels are 0-based.\n    \"\"\"\n\n    scores = multi_scores[:, :-1].t()  # [#class, n]\n    scores, idx = scores.sort(1, descending=True)\n\n    idx = idx[:, :top_k].contiguous()\n    scores = scores[:, :top_k]  # [#class, topk]\n    num_classes, num_dets = idx.size()\n    boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)\n    coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)\n\n    iou = bbox_overlaps(boxes, boxes)  # [#class, topk, topk]\n    iou.triu_(diagonal=1)\n    iou_max, _ = iou.max(dim=1)\n\n    # Now just filter out the ones higher than the threshold\n    keep = iou_max <= iou_thr\n\n    # Second thresholding introduces 0.2 mAP gain at negligible time cost\n    keep *= scores > score_thr\n\n    # Assign each kept detection to its corresponding class\n    classes = torch.arange(\n        num_classes, device=boxes.device)[:, None].expand_as(keep)\n    classes = classes[keep]\n\n    boxes = boxes[keep]\n    coeffs = coeffs[keep]\n    scores = scores[keep]\n\n    # Only keep the top max_num highest scores across all classes\n    scores, idx = scores.sort(0, descending=True)\n    if max_num > 0:\n        idx = idx[:max_num]\n        scores = scores[:max_num]\n\n    classes = classes[idx]\n    boxes = boxes[idx]\n    coeffs = coeffs[idx]\n\n    cls_dets = torch.cat([boxes, scores[:, None]], dim=1)\n    return cls_dets, classes, coeffs\n"
  },
  {
    "path": "mmdet/models/layers/brick_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version\n\nif torch.__version__ == 'parrots':\n    TORCH_VERSION = torch.__version__\nelse:\n    # torch.__version__ could be 1.3.1+cu92, we only need the first two\n    # for comparison\n    TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])\n\n\ndef adaptive_avg_pool2d(input, output_size):\n    \"\"\"Handle empty batch dimension to adaptive_avg_pool2d.\n\n    Args:\n        input (tensor): 4D tensor.\n        output_size (int, tuple[int,int]): the target output size.\n    \"\"\"\n    if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):\n        if isinstance(output_size, int):\n            output_size = [output_size, output_size]\n        output_size = [*input.shape[:2], *output_size]\n        empty = NewEmptyTensorOp.apply(input, output_size)\n        return empty\n    else:\n        return F.adaptive_avg_pool2d(input, output_size)\n\n\nclass AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):\n    \"\"\"Handle empty batch dimension to AdaptiveAvgPool2d.\"\"\"\n\n    def forward(self, x):\n        # PyTorch 1.9 does not support empty tensor inference yet\n        if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):\n            output_size = self.output_size\n            if isinstance(output_size, int):\n                output_size = [output_size, output_size]\n            else:\n                output_size = [\n                    v if v is not None else d\n                    for v, d in zip(output_size,\n                                    x.size()[-2:])\n                ]\n            output_size = [*x.shape[:2], *output_size]\n            empty = NewEmptyTensorOp.apply(x, output_size)\n            return empty\n\n        return super().forward(x)\n"
  },
  {
    "path": "mmdet/models/layers/conv_upsample.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule, ModuleList\n\n\nclass ConvUpsample(BaseModule):\n    \"\"\"ConvUpsample performs 2x upsampling after Conv.\n\n    There are several `ConvModule` layers. In the first few layers, upsampling\n    will be applied after each layer of convolution. The number of upsampling\n    must be no more than the number of ConvModule layers.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        inner_channels (int): Number of channels produced by the convolution.\n        num_layers (int): Number of convolution layers.\n        num_upsample (int | optional): Number of upsampling layer. Must be no\n            more than num_layers. Upsampling will be applied after the first\n            ``num_upsample`` layers of convolution. Default: ``num_layers``.\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict): Config dict for initialization. Default: None.\n        kwargs (key word augments): Other augments used in ConvModule.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 inner_channels,\n                 num_layers=1,\n                 num_upsample=None,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(ConvUpsample, self).__init__(init_cfg)\n        if num_upsample is None:\n            num_upsample = num_layers\n        assert num_upsample <= num_layers, \\\n            f'num_upsample({num_upsample})must be no more than ' \\\n            f'num_layers({num_layers})'\n        self.num_layers = num_layers\n        self.num_upsample = num_upsample\n        self.conv = ModuleList()\n        for i in range(num_layers):\n            self.conv.append(\n                ConvModule(\n                    in_channels,\n                    inner_channels,\n                    3,\n                    padding=1,\n                    stride=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n            in_channels = inner_channels\n\n    def forward(self, x):\n        num_upsample = self.num_upsample\n        for i in range(self.num_layers):\n            x = self.conv[i](x)\n            if num_upsample > 0:\n                num_upsample -= 1\n                x = F.interpolate(\n                    x, scale_factor=2, mode='bilinear', align_corners=False)\n        return x\n"
  },
  {
    "path": "mmdet/models/layers/csp_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom .se_layer import ChannelAttention\n\n\nclass DarknetBottleneck(BaseModule):\n    \"\"\"The basic bottleneck block used in Darknet.\n\n    Each ResBlock consists of two ConvModules and the input is added to the\n    final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.\n    The first convLayer has filter size of 1x1 and the second one has the\n    filter size of 3x3.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        expansion (float): The kernel size of the convolution.\n            Defaults to 0.5.\n        add_identity (bool): Whether to add identity to the out.\n            Defaults to True.\n        use_depthwise (bool): Whether to use depthwise separable convolution.\n            Defaults to False.\n        conv_cfg (dict): Config dict for convolution layer. Defaults to None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to dict(type='Swish').\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 expansion: float = 0.5,\n                 add_identity: bool = True,\n                 use_depthwise: bool = False,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(\n                     type='BN', momentum=0.03, eps=0.001),\n                 act_cfg: ConfigType = dict(type='Swish'),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        hidden_channels = int(out_channels * expansion)\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n        self.conv1 = ConvModule(\n            in_channels,\n            hidden_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.conv2 = conv(\n            hidden_channels,\n            out_channels,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.add_identity = \\\n            add_identity and in_channels == out_channels\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function.\"\"\"\n        identity = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n\n        if self.add_identity:\n            return out + identity\n        else:\n            return out\n\n\nclass CSPNeXtBlock(BaseModule):\n    \"\"\"The basic bottleneck block used in CSPNeXt.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        expansion (float): Expand ratio of the hidden channel. Defaults to 0.5.\n        add_identity (bool): Whether to add identity to the out. Only works\n            when in_channels == out_channels. Defaults to True.\n        use_depthwise (bool): Whether to use depthwise separable convolution.\n            Defaults to False.\n        kernel_size (int): The kernel size of the second convolution layer.\n            Defaults to 5.\n        conv_cfg (dict): Config dict for convolution layer. Defaults to None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to dict(type='BN', momentum=0.03, eps=0.001).\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to dict(type='SiLU').\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 expansion: float = 0.5,\n                 add_identity: bool = True,\n                 use_depthwise: bool = False,\n                 kernel_size: int = 5,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(\n                     type='BN', momentum=0.03, eps=0.001),\n                 act_cfg: ConfigType = dict(type='SiLU'),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        hidden_channels = int(out_channels * expansion)\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n        self.conv1 = conv(\n            in_channels,\n            hidden_channels,\n            3,\n            stride=1,\n            padding=1,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.conv2 = DepthwiseSeparableConvModule(\n            hidden_channels,\n            out_channels,\n            kernel_size,\n            stride=1,\n            padding=kernel_size // 2,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.add_identity = \\\n            add_identity and in_channels == out_channels\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function.\"\"\"\n        identity = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n\n        if self.add_identity:\n            return out + identity\n        else:\n            return out\n\n\nclass CSPLayer(BaseModule):\n    \"\"\"Cross Stage Partial Layer.\n\n    Args:\n        in_channels (int): The input channels of the CSP layer.\n        out_channels (int): The output channels of the CSP layer.\n        expand_ratio (float): Ratio to adjust the number of channels of the\n            hidden layer. Defaults to 0.5.\n        num_blocks (int): Number of blocks. Defaults to 1.\n        add_identity (bool): Whether to add identity in blocks.\n            Defaults to True.\n        use_cspnext_block (bool): Whether to use CSPNeXt block.\n            Defaults to False.\n        use_depthwise (bool): Whether to use depthwise separable convolution in\n            blocks. Defaults to False.\n        channel_attention (bool): Whether to add channel attention in each\n            stage. Defaults to True.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to dict(type='BN')\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to dict(type='Swish')\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 expand_ratio: float = 0.5,\n                 num_blocks: int = 1,\n                 add_identity: bool = True,\n                 use_depthwise: bool = False,\n                 use_cspnext_block: bool = False,\n                 channel_attention: bool = False,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(\n                     type='BN', momentum=0.03, eps=0.001),\n                 act_cfg: ConfigType = dict(type='Swish'),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        block = CSPNeXtBlock if use_cspnext_block else DarknetBottleneck\n        mid_channels = int(out_channels * expand_ratio)\n        self.channel_attention = channel_attention\n        self.main_conv = ConvModule(\n            in_channels,\n            mid_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.short_conv = ConvModule(\n            in_channels,\n            mid_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.final_conv = ConvModule(\n            2 * mid_channels,\n            out_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n        self.blocks = nn.Sequential(*[\n            block(\n                mid_channels,\n                mid_channels,\n                1.0,\n                add_identity,\n                use_depthwise,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg) for _ in range(num_blocks)\n        ])\n        if channel_attention:\n            self.attention = ChannelAttention(2 * mid_channels)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function.\"\"\"\n        x_short = self.short_conv(x)\n\n        x_main = self.main_conv(x)\n        x_main = self.blocks(x_main)\n\n        x_final = torch.cat((x_main, x_short), dim=1)\n\n        if self.channel_attention:\n            x_final = self.attention(x_final)\n        return self.final_conv(x_final)\n"
  },
  {
    "path": "mmdet/models/layers/dropblock.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\n\neps = 1e-6\n\n\n@MODELS.register_module()\nclass DropBlock(nn.Module):\n    \"\"\"Randomly drop some regions of feature maps.\n\n     Please refer to the method proposed in `DropBlock\n     <https://arxiv.org/abs/1810.12890>`_ for details.\n\n    Args:\n        drop_prob (float): The probability of dropping each block.\n        block_size (int): The size of dropped blocks.\n        warmup_iters (int): The drop probability will linearly increase\n            from `0` to `drop_prob` during the first `warmup_iters` iterations.\n            Default: 2000.\n    \"\"\"\n\n    def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):\n        super(DropBlock, self).__init__()\n        assert block_size % 2 == 1\n        assert 0 < drop_prob <= 1\n        assert warmup_iters >= 0\n        self.drop_prob = drop_prob\n        self.block_size = block_size\n        self.warmup_iters = warmup_iters\n        self.iter_cnt = 0\n\n    def forward(self, x):\n        \"\"\"\n        Args:\n            x (Tensor): Input feature map on which some areas will be randomly\n                dropped.\n\n        Returns:\n            Tensor: The tensor after DropBlock layer.\n        \"\"\"\n        if not self.training:\n            return x\n        self.iter_cnt += 1\n        N, C, H, W = list(x.shape)\n        gamma = self._compute_gamma((H, W))\n        mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)\n        mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))\n\n        mask = F.pad(mask, [self.block_size // 2] * 4, value=0)\n        mask = F.max_pool2d(\n            input=mask,\n            stride=(1, 1),\n            kernel_size=(self.block_size, self.block_size),\n            padding=self.block_size // 2)\n        mask = 1 - mask\n        x = x * mask * mask.numel() / (eps + mask.sum())\n        return x\n\n    def _compute_gamma(self, feat_size):\n        \"\"\"Compute the value of gamma according to paper. gamma is the\n        parameter of bernoulli distribution, which controls the number of\n        features to drop.\n\n        gamma = (drop_prob * fm_area) / (drop_area * keep_area)\n\n        Args:\n            feat_size (tuple[int, int]): The height and width of feature map.\n\n        Returns:\n            float: The value of gamma.\n        \"\"\"\n        gamma = (self.drop_prob * feat_size[0] * feat_size[1])\n        gamma /= ((feat_size[0] - self.block_size + 1) *\n                  (feat_size[1] - self.block_size + 1))\n        gamma /= (self.block_size**2)\n        factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /\n                  self.warmup_iters)\n        return gamma * factor\n\n    def extra_repr(self):\n        return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, '\n                f'warmup_iters={self.warmup_iters}')\n"
  },
  {
    "path": "mmdet/models/layers/ema.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.model import ExponentialMovingAverage\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass ExpMomentumEMA(ExponentialMovingAverage):\n    \"\"\"Exponential moving average (EMA) with exponential momentum strategy,\n    which is used in YOLOX.\n\n    Args:\n        model (nn.Module): The model to be averaged.\n        momentum (float): The momentum used for updating ema parameter.\n            Ema's parameter are updated with the formula:\n           `averaged_param = (1-momentum) * averaged_param + momentum *\n           source_param`. Defaults to 0.0002.\n        gamma (int): Use a larger momentum early in training and gradually\n            annealing to a smaller value to update the ema model smoothly. The\n            momentum is calculated as\n            `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.\n            Defaults to 2000.\n        interval (int): Interval between two updates. Defaults to 1.\n        device (torch.device, optional): If provided, the averaged model will\n            be stored on the :attr:`device`. Defaults to None.\n        update_buffers (bool): if True, it will compute running averages for\n            both the parameters and the buffers of the model. Defaults to\n            False.\n    \"\"\"\n\n    def __init__(self,\n                 model: nn.Module,\n                 momentum: float = 0.0002,\n                 gamma: int = 2000,\n                 interval=1,\n                 device: Optional[torch.device] = None,\n                 update_buffers: bool = False) -> None:\n        super().__init__(\n            model=model,\n            momentum=momentum,\n            interval=interval,\n            device=device,\n            update_buffers=update_buffers)\n        assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'\n        self.gamma = gamma\n\n    def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n                 steps: int) -> None:\n        \"\"\"Compute the moving average of the parameters using the exponential\n        momentum strategy.\n\n        Args:\n            averaged_param (Tensor): The averaged parameters.\n            source_param (Tensor): The source parameters.\n            steps (int): The number of times the parameters have been\n                updated.\n        \"\"\"\n        momentum = (1 - self.momentum) * math.exp(\n            -float(1 + steps) / self.gamma) + self.momentum\n        averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)\n"
  },
  {
    "path": "mmdet/models/layers/inverted_residual.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import ConvModule\nfrom mmcv.cnn.bricks import DropPath\nfrom mmengine.model import BaseModule\n\nfrom .se_layer import SELayer\n\n\nclass InvertedResidual(BaseModule):\n    \"\"\"Inverted Residual Block.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        mid_channels (int): The input channels of the depthwise convolution.\n        kernel_size (int): The kernel size of the depthwise convolution.\n            Default: 3.\n        stride (int): The stride of the depthwise convolution. Default: 1.\n        se_cfg (dict): Config dict for se layer. Default: None, which means no\n            se layer.\n        with_expand_conv (bool): Use expand conv or not. If set False,\n            mid_channels must be the same with in_channels.\n            Default: True.\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='ReLU').\n        drop_path_rate (float): stochastic depth rate. Defaults to 0.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Default: False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Returns:\n        Tensor: The output tensor.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 mid_channels,\n                 kernel_size=3,\n                 stride=1,\n                 se_cfg=None,\n                 with_expand_conv=True,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 act_cfg=dict(type='ReLU'),\n                 drop_path_rate=0.,\n                 with_cp=False,\n                 init_cfg=None):\n        super(InvertedResidual, self).__init__(init_cfg)\n        self.with_res_shortcut = (stride == 1 and in_channels == out_channels)\n        assert stride in [1, 2], f'stride must in [1, 2]. ' \\\n            f'But received {stride}.'\n        self.with_cp = with_cp\n        self.drop_path = DropPath(\n            drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n        self.with_se = se_cfg is not None\n        self.with_expand_conv = with_expand_conv\n\n        if self.with_se:\n            assert isinstance(se_cfg, dict)\n        if not self.with_expand_conv:\n            assert mid_channels == in_channels\n\n        if self.with_expand_conv:\n            self.expand_conv = ConvModule(\n                in_channels=in_channels,\n                out_channels=mid_channels,\n                kernel_size=1,\n                stride=1,\n                padding=0,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n        self.depthwise_conv = ConvModule(\n            in_channels=mid_channels,\n            out_channels=mid_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=kernel_size // 2,\n            groups=mid_channels,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n        if self.with_se:\n            self.se = SELayer(**se_cfg)\n\n        self.linear_conv = ConvModule(\n            in_channels=mid_channels,\n            out_channels=out_channels,\n            kernel_size=1,\n            stride=1,\n            padding=0,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            out = x\n\n            if self.with_expand_conv:\n                out = self.expand_conv(out)\n\n            out = self.depthwise_conv(out)\n\n            if self.with_se:\n                out = self.se(out)\n\n            out = self.linear_conv(out)\n\n            if self.with_res_shortcut:\n                return x + self.drop_path(out)\n            else:\n                return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        return out\n"
  },
  {
    "path": "mmdet/models/layers/matrix_nms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef mask_matrix_nms(masks,\n                    labels,\n                    scores,\n                    filter_thr=-1,\n                    nms_pre=-1,\n                    max_num=-1,\n                    kernel='gaussian',\n                    sigma=2.0,\n                    mask_area=None):\n    \"\"\"Matrix NMS for multi-class masks.\n\n    Args:\n        masks (Tensor): Has shape (num_instances, h, w)\n        labels (Tensor): Labels of corresponding masks,\n            has shape (num_instances,).\n        scores (Tensor): Mask scores of corresponding masks,\n            has shape (num_instances).\n        filter_thr (float): Score threshold to filter the masks\n            after matrix nms. Default: -1, which means do not\n            use filter_thr.\n        nms_pre (int): The max number of instances to do the matrix nms.\n            Default: -1, which means do not use nms_pre.\n        max_num (int, optional): If there are more than max_num masks after\n            matrix, only top max_num will be kept. Default: -1, which means\n            do not use max_num.\n        kernel (str): 'linear' or 'gaussian'.\n        sigma (float): std in gaussian method.\n        mask_area (Tensor): The sum of seg_masks.\n\n    Returns:\n        tuple(Tensor): Processed mask results.\n\n            - scores (Tensor): Updated scores, has shape (n,).\n            - labels (Tensor): Remained labels, has shape (n,).\n            - masks (Tensor): Remained masks, has shape (n, w, h).\n            - keep_inds (Tensor): The indices number of\n                the remaining mask in the input mask, has shape (n,).\n    \"\"\"\n    assert len(labels) == len(masks) == len(scores)\n    if len(labels) == 0:\n        return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(\n            0, *masks.shape[-2:]), labels.new_zeros(0)\n    if mask_area is None:\n        mask_area = masks.sum((1, 2)).float()\n    else:\n        assert len(masks) == len(mask_area)\n\n    # sort and keep top nms_pre\n    scores, sort_inds = torch.sort(scores, descending=True)\n\n    keep_inds = sort_inds\n    if nms_pre > 0 and len(sort_inds) > nms_pre:\n        sort_inds = sort_inds[:nms_pre]\n        keep_inds = keep_inds[:nms_pre]\n        scores = scores[:nms_pre]\n    masks = masks[sort_inds]\n    mask_area = mask_area[sort_inds]\n    labels = labels[sort_inds]\n\n    num_masks = len(labels)\n    flatten_masks = masks.reshape(num_masks, -1).float()\n    # inter.\n    inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))\n    expanded_mask_area = mask_area.expand(num_masks, num_masks)\n    # Upper triangle iou matrix.\n    iou_matrix = (inter_matrix /\n                  (expanded_mask_area + expanded_mask_area.transpose(1, 0) -\n                   inter_matrix)).triu(diagonal=1)\n    # label_specific matrix.\n    expanded_labels = labels.expand(num_masks, num_masks)\n    # Upper triangle label matrix.\n    label_matrix = (expanded_labels == expanded_labels.transpose(\n        1, 0)).triu(diagonal=1)\n\n    # IoU compensation\n    compensate_iou, _ = (iou_matrix * label_matrix).max(0)\n    compensate_iou = compensate_iou.expand(num_masks,\n                                           num_masks).transpose(1, 0)\n\n    # IoU decay\n    decay_iou = iou_matrix * label_matrix\n\n    # Calculate the decay_coefficient\n    if kernel == 'gaussian':\n        decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))\n        compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))\n        decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)\n    elif kernel == 'linear':\n        decay_matrix = (1 - decay_iou) / (1 - compensate_iou)\n        decay_coefficient, _ = decay_matrix.min(0)\n    else:\n        raise NotImplementedError(\n            f'{kernel} kernel is not supported in matrix nms!')\n    # update the score.\n    scores = scores * decay_coefficient\n\n    if filter_thr > 0:\n        keep = scores >= filter_thr\n        keep_inds = keep_inds[keep]\n        if not keep.any():\n            return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(\n                0, *masks.shape[-2:]), labels.new_zeros(0)\n        masks = masks[keep]\n        scores = scores[keep]\n        labels = labels[keep]\n\n    # sort and keep top max_num\n    scores, sort_inds = torch.sort(scores, descending=True)\n    keep_inds = keep_inds[sort_inds]\n    if max_num > 0 and len(sort_inds) > max_num:\n        sort_inds = sort_inds[:max_num]\n        keep_inds = keep_inds[:max_num]\n        scores = scores[:max_num]\n    masks = masks[sort_inds]\n    labels = labels[sort_inds]\n\n    return scores, labels, masks, keep_inds\n"
  },
  {
    "path": "mmdet/models/layers/msdeformattn_pixel_decoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d, ConvModule\nfrom mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention\nfrom mmengine.model import (BaseModule, ModuleList, caffe2_xavier_init,\n                            normal_init, xavier_init)\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptMultiConfig\nfrom ..task_modules.prior_generators import MlvlPointGenerator\nfrom .positional_encoding import SinePositionalEncoding\nfrom .transformer import Mask2FormerTransformerEncoder\n\n\n@MODELS.register_module()\nclass MSDeformAttnPixelDecoder(BaseModule):\n    \"\"\"Pixel decoder with multi-scale deformable attention.\n\n    Args:\n        in_channels (list[int] | tuple[int]): Number of channels in the\n            input feature maps.\n        strides (list[int] | tuple[int]): Output strides of feature from\n            backbone.\n        feat_channels (int): Number of channels for feature.\n        out_channels (int): Number of channels for output.\n        num_outs (int): Number of output scales.\n        norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.\n            Defaults to dict(type='GN', num_groups=32).\n        act_cfg (:obj:`ConfigDict` or dict): Config for activation.\n            Defaults to dict(type='ReLU').\n        encoder (:obj:`ConfigDict` or dict): Config for transformer\n            encoder. Defaults to None.\n        positional_encoding (:obj:`ConfigDict` or dict): Config for\n            transformer encoder position encoding. Defaults to\n            dict(num_feats=128, normalize=True).\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: Union[List[int],\n                                    Tuple[int]] = [256, 512, 1024, 2048],\n                 strides: Union[List[int], Tuple[int]] = [4, 8, 16, 32],\n                 feat_channels: int = 256,\n                 out_channels: int = 256,\n                 num_outs: int = 3,\n                 norm_cfg: ConfigType = dict(type='GN', num_groups=32),\n                 act_cfg: ConfigType = dict(type='ReLU'),\n                 encoder: ConfigType = None,\n                 positional_encoding: ConfigType = dict(\n                     num_feats=128, normalize=True),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.strides = strides\n        self.num_input_levels = len(in_channels)\n        self.num_encoder_levels = \\\n            encoder.layer_cfg.self_attn_cfg.num_levels\n        assert self.num_encoder_levels >= 1, \\\n            'num_levels in attn_cfgs must be at least one'\n        input_conv_list = []\n        # from top to down (low to high resolution)\n        for i in range(self.num_input_levels - 1,\n                       self.num_input_levels - self.num_encoder_levels - 1,\n                       -1):\n            input_conv = ConvModule(\n                in_channels[i],\n                feat_channels,\n                kernel_size=1,\n                norm_cfg=norm_cfg,\n                act_cfg=None,\n                bias=True)\n            input_conv_list.append(input_conv)\n        self.input_convs = ModuleList(input_conv_list)\n\n        self.encoder = Mask2FormerTransformerEncoder(**encoder)\n        self.postional_encoding = SinePositionalEncoding(**positional_encoding)\n        # high resolution to low resolution\n        self.level_encoding = nn.Embedding(self.num_encoder_levels,\n                                           feat_channels)\n\n        # fpn-like structure\n        self.lateral_convs = ModuleList()\n        self.output_convs = ModuleList()\n        self.use_bias = norm_cfg is None\n        # from top to down (low to high resolution)\n        # fpn for the rest features that didn't pass in encoder\n        for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,\n                       -1):\n            lateral_conv = ConvModule(\n                in_channels[i],\n                feat_channels,\n                kernel_size=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=None)\n            output_conv = ConvModule(\n                feat_channels,\n                feat_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            self.lateral_convs.append(lateral_conv)\n            self.output_convs.append(output_conv)\n\n        self.mask_feature = Conv2d(\n            feat_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n        self.num_outs = num_outs\n        self.point_generator = MlvlPointGenerator(strides)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights.\"\"\"\n        for i in range(0, self.num_encoder_levels):\n            xavier_init(\n                self.input_convs[i].conv,\n                gain=1,\n                bias=0,\n                distribution='uniform')\n\n        for i in range(0, self.num_input_levels - self.num_encoder_levels):\n            caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)\n            caffe2_xavier_init(self.output_convs[i].conv, bias=0)\n\n        caffe2_xavier_init(self.mask_feature, bias=0)\n\n        normal_init(self.level_encoding, mean=0, std=1)\n        for p in self.encoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_normal_(p)\n\n        # init_weights defined in MultiScaleDeformableAttention\n        for m in self.encoder.layers.modules():\n            if isinstance(m, MultiScaleDeformableAttention):\n                m.init_weights()\n\n    def forward(self, feats: List[Tensor]) -> Tuple[Tensor, Tensor]:\n        \"\"\"\n        Args:\n            feats (list[Tensor]): Feature maps of each level. Each has\n                shape of (batch_size, c, h, w).\n\n        Returns:\n            tuple: A tuple containing the following:\n\n                - mask_feature (Tensor): shape (batch_size, c, h, w).\n                - multi_scale_features (list[Tensor]): Multi scale \\\n                        features, each in shape (batch_size, c, h, w).\n        \"\"\"\n        # generate padding mask for each level, for each image\n        batch_size = feats[0].shape[0]\n        encoder_input_list = []\n        padding_mask_list = []\n        level_positional_encoding_list = []\n        spatial_shapes = []\n        reference_points_list = []\n        for i in range(self.num_encoder_levels):\n            level_idx = self.num_input_levels - i - 1\n            feat = feats[level_idx]\n            feat_projected = self.input_convs[i](feat)\n            h, w = feat.shape[-2:]\n\n            # no padding\n            padding_mask_resized = feat.new_zeros(\n                (batch_size, ) + feat.shape[-2:], dtype=torch.bool)\n            pos_embed = self.postional_encoding(padding_mask_resized)\n            level_embed = self.level_encoding.weight[i]\n            level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed\n            # (h_i * w_i, 2)\n            reference_points = self.point_generator.single_level_grid_priors(\n                feat.shape[-2:], level_idx, device=feat.device)\n            # normalize\n            factor = feat.new_tensor([[w, h]]) * self.strides[level_idx]\n            reference_points = reference_points / factor\n\n            # shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c)\n            feat_projected = feat_projected.flatten(2).permute(0, 2, 1)\n            level_pos_embed = level_pos_embed.flatten(2).permute(0, 2, 1)\n            padding_mask_resized = padding_mask_resized.flatten(1)\n\n            encoder_input_list.append(feat_projected)\n            padding_mask_list.append(padding_mask_resized)\n            level_positional_encoding_list.append(level_pos_embed)\n            spatial_shapes.append(feat.shape[-2:])\n            reference_points_list.append(reference_points)\n        # shape (batch_size, total_num_queries),\n        # total_num_queries=sum([., h_i * w_i,.])\n        padding_masks = torch.cat(padding_mask_list, dim=1)\n        # shape (total_num_queries, batch_size, c)\n        encoder_inputs = torch.cat(encoder_input_list, dim=1)\n        level_positional_encodings = torch.cat(\n            level_positional_encoding_list, dim=1)\n        device = encoder_inputs.device\n        # shape (num_encoder_levels, 2), from low\n        # resolution to high resolution\n        spatial_shapes = torch.as_tensor(\n            spatial_shapes, dtype=torch.long, device=device)\n        # shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...)\n        level_start_index = torch.cat((spatial_shapes.new_zeros(\n            (1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n        reference_points = torch.cat(reference_points_list, dim=0)\n        reference_points = reference_points[None, :, None].repeat(\n            batch_size, 1, self.num_encoder_levels, 1)\n        valid_radios = reference_points.new_ones(\n            (batch_size, self.num_encoder_levels, 2))\n        # shape (num_total_queries, batch_size, c)\n        memory = self.encoder(\n            query=encoder_inputs,\n            query_pos=level_positional_encodings,\n            key_padding_mask=padding_masks,\n            spatial_shapes=spatial_shapes,\n            reference_points=reference_points,\n            level_start_index=level_start_index,\n            valid_ratios=valid_radios)\n        # (batch_size, c, num_total_queries)\n        memory = memory.permute(0, 2, 1)\n\n        # from low resolution to high resolution\n        num_queries_per_level = [e[0] * e[1] for e in spatial_shapes]\n        outs = torch.split(memory, num_queries_per_level, dim=-1)\n        outs = [\n            x.reshape(batch_size, -1, spatial_shapes[i][0],\n                      spatial_shapes[i][1]) for i, x in enumerate(outs)\n        ]\n\n        for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,\n                       -1):\n            x = feats[i]\n            cur_feat = self.lateral_convs[i](x)\n            y = cur_feat + F.interpolate(\n                outs[-1],\n                size=cur_feat.shape[-2:],\n                mode='bilinear',\n                align_corners=False)\n            y = self.output_convs[i](y)\n            outs.append(y)\n        multi_scale_features = outs[:self.num_outs]\n\n        mask_feature = self.mask_feature(outs[-1])\n        return mask_feature, multi_scale_features\n"
  },
  {
    "path": "mmdet/models/layers/normed_predictor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\n\nMODELS.register_module('Linear', module=nn.Linear)\n\n\n@MODELS.register_module(name='NormedLinear')\nclass NormedLinear(nn.Linear):\n    \"\"\"Normalized Linear Layer.\n\n    Args:\n        tempeature (float, optional): Tempeature term. Defaults to 20.\n        power (int, optional): Power term. Defaults to 1.0.\n        eps (float, optional): The minimal value of divisor to\n             keep numerical stability. Defaults to 1e-6.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 tempearture: float = 20,\n                 power: int = 1.0,\n                 eps: float = 1e-6,\n                 **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        self.tempearture = tempearture\n        self.power = power\n        self.eps = eps\n        self.init_weights()\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize the weights.\"\"\"\n        nn.init.normal_(self.weight, mean=0, std=0.01)\n        if self.bias is not None:\n            nn.init.constant_(self.bias, 0)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function for `NormedLinear`.\"\"\"\n        weight_ = self.weight / (\n            self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)\n        x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)\n        x_ = x_ * self.tempearture\n\n        return F.linear(x_, weight_, self.bias)\n\n\n@MODELS.register_module(name='NormedConv2d')\nclass NormedConv2d(nn.Conv2d):\n    \"\"\"Normalized Conv2d Layer.\n\n    Args:\n        tempeature (float, optional): Tempeature term. Defaults to 20.\n        power (int, optional): Power term. Defaults to 1.0.\n        eps (float, optional): The minimal value of divisor to\n             keep numerical stability. Defaults to 1e-6.\n        norm_over_kernel (bool, optional): Normalize over kernel.\n             Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 tempearture: float = 20,\n                 power: int = 1.0,\n                 eps: float = 1e-6,\n                 norm_over_kernel: bool = False,\n                 **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        self.tempearture = tempearture\n        self.power = power\n        self.norm_over_kernel = norm_over_kernel\n        self.eps = eps\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function for `NormedConv2d`.\"\"\"\n        if not self.norm_over_kernel:\n            weight_ = self.weight / (\n                self.weight.norm(dim=1, keepdim=True).pow(self.power) +\n                self.eps)\n        else:\n            weight_ = self.weight / (\n                self.weight.view(self.weight.size(0), -1).norm(\n                    dim=1, keepdim=True).pow(self.power)[..., None, None] +\n                self.eps)\n        x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)\n        x_ = x_ * self.tempearture\n\n        if hasattr(self, 'conv2d_forward'):\n            x_ = self.conv2d_forward(x_, weight_)\n        else:\n            if torch.__version__ >= '1.8':\n                x_ = self._conv_forward(x_, weight_, self.bias)\n            else:\n                x_ = self._conv_forward(x_, weight_)\n        return x_\n"
  },
  {
    "path": "mmdet/models/layers/pixel_decoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d, ConvModule\nfrom mmengine.model import BaseModule, ModuleList, caffe2_xavier_init\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptMultiConfig\nfrom .positional_encoding import SinePositionalEncoding\nfrom .transformer import DetrTransformerEncoder\n\n\n@MODELS.register_module()\nclass PixelDecoder(BaseModule):\n    \"\"\"Pixel decoder with a structure like fpn.\n\n    Args:\n        in_channels (list[int] | tuple[int]): Number of channels in the\n            input feature maps.\n        feat_channels (int): Number channels for feature.\n        out_channels (int): Number channels for output.\n        norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.\n            Defaults to dict(type='GN', num_groups=32).\n        act_cfg (:obj:`ConfigDict` or dict): Config for activation.\n            Defaults to dict(type='ReLU').\n        encoder (:obj:`ConfigDict` or dict): Config for transorformer\n            encoder.Defaults to None.\n        positional_encoding (:obj:`ConfigDict` or dict): Config for\n            transformer encoder position encoding. Defaults to\n            dict(type='SinePositionalEncoding', num_feats=128,\n            normalize=True).\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: Union[List[int], Tuple[int]],\n                 feat_channels: int,\n                 out_channels: int,\n                 norm_cfg: ConfigType = dict(type='GN', num_groups=32),\n                 act_cfg: ConfigType = dict(type='ReLU'),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.num_inputs = len(in_channels)\n        self.lateral_convs = ModuleList()\n        self.output_convs = ModuleList()\n        self.use_bias = norm_cfg is None\n        for i in range(0, self.num_inputs - 1):\n            lateral_conv = ConvModule(\n                in_channels[i],\n                feat_channels,\n                kernel_size=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=None)\n            output_conv = ConvModule(\n                feat_channels,\n                feat_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            self.lateral_convs.append(lateral_conv)\n            self.output_convs.append(output_conv)\n\n        self.last_feat_conv = ConvModule(\n            in_channels[-1],\n            feat_channels,\n            kernel_size=3,\n            padding=1,\n            stride=1,\n            bias=self.use_bias,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.mask_feature = Conv2d(\n            feat_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights.\"\"\"\n        for i in range(0, self.num_inputs - 2):\n            caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)\n            caffe2_xavier_init(self.output_convs[i].conv, bias=0)\n\n        caffe2_xavier_init(self.mask_feature, bias=0)\n        caffe2_xavier_init(self.last_feat_conv, bias=0)\n\n    def forward(self, feats: List[Tensor],\n                batch_img_metas: List[dict]) -> Tuple[Tensor, Tensor]:\n        \"\"\"\n        Args:\n            feats (list[Tensor]): Feature maps of each level. Each has\n                shape of (batch_size, c, h, w).\n            batch_img_metas (list[dict]): List of image information.\n                Pass in for creating more accurate padding mask. Not\n                used here.\n\n        Returns:\n            tuple[Tensor, Tensor]: a tuple containing the following:\n\n                - mask_feature (Tensor): Shape (batch_size, c, h, w).\n                - memory (Tensor): Output of last stage of backbone.\\\n                        Shape (batch_size, c, h, w).\n        \"\"\"\n        y = self.last_feat_conv(feats[-1])\n        for i in range(self.num_inputs - 2, -1, -1):\n            x = feats[i]\n            cur_feat = self.lateral_convs[i](x)\n            y = cur_feat + \\\n                F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')\n            y = self.output_convs[i](y)\n\n        mask_feature = self.mask_feature(y)\n        memory = feats[-1]\n        return mask_feature, memory\n\n\n@MODELS.register_module()\nclass TransformerEncoderPixelDecoder(PixelDecoder):\n    \"\"\"Pixel decoder with transormer encoder inside.\n\n    Args:\n        in_channels (list[int] | tuple[int]): Number of channels in the\n            input feature maps.\n        feat_channels (int): Number channels for feature.\n        out_channels (int): Number channels for output.\n        norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.\n            Defaults to dict(type='GN', num_groups=32).\n        act_cfg (:obj:`ConfigDict` or dict): Config for activation.\n            Defaults to dict(type='ReLU').\n        encoder (:obj:`ConfigDict` or dict): Config for transformer encoder.\n            Defaults to None.\n        positional_encoding (:obj:`ConfigDict` or dict): Config for\n            transformer encoder position encoding. Defaults to\n            dict(num_feats=128, normalize=True).\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: Union[List[int], Tuple[int]],\n                 feat_channels: int,\n                 out_channels: int,\n                 norm_cfg: ConfigType = dict(type='GN', num_groups=32),\n                 act_cfg: ConfigType = dict(type='ReLU'),\n                 encoder: ConfigType = None,\n                 positional_encoding: ConfigType = dict(\n                     num_feats=128, normalize=True),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            out_channels=out_channels,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg,\n            init_cfg=init_cfg)\n        self.last_feat_conv = None\n\n        self.encoder = DetrTransformerEncoder(**encoder)\n        self.encoder_embed_dims = self.encoder.embed_dims\n        assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \\\n            'tranformer encoder must equal to feat_channels({})'.format(\n                feat_channels, self.encoder_embed_dims)\n        self.positional_encoding = SinePositionalEncoding(\n            **positional_encoding)\n        self.encoder_in_proj = Conv2d(\n            in_channels[-1], feat_channels, kernel_size=1)\n        self.encoder_out_proj = ConvModule(\n            feat_channels,\n            feat_channels,\n            kernel_size=3,\n            stride=1,\n            padding=1,\n            bias=self.use_bias,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights.\"\"\"\n        for i in range(0, self.num_inputs - 2):\n            caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)\n            caffe2_xavier_init(self.output_convs[i].conv, bias=0)\n\n        caffe2_xavier_init(self.mask_feature, bias=0)\n        caffe2_xavier_init(self.encoder_in_proj, bias=0)\n        caffe2_xavier_init(self.encoder_out_proj.conv, bias=0)\n\n        for p in self.encoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n    def forward(self, feats: List[Tensor],\n                batch_img_metas: List[dict]) -> Tuple[Tensor, Tensor]:\n        \"\"\"\n        Args:\n            feats (list[Tensor]): Feature maps of each level. Each has\n                shape of (batch_size, c, h, w).\n            batch_img_metas (list[dict]): List of image information. Pass in\n                for creating more accurate padding mask.\n\n        Returns:\n            tuple: a tuple containing the following:\n\n                - mask_feature (Tensor): shape (batch_size, c, h, w).\n                - memory (Tensor): shape (batch_size, c, h, w).\n        \"\"\"\n        feat_last = feats[-1]\n        bs, c, h, w = feat_last.shape\n        input_img_h, input_img_w = batch_img_metas[0]['batch_input_shape']\n        padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w),\n                                          dtype=torch.float32)\n        for i in range(bs):\n            img_h, img_w = batch_img_metas[i]['img_shape']\n            padding_mask[i, :img_h, :img_w] = 0\n        padding_mask = F.interpolate(\n            padding_mask.unsqueeze(1),\n            size=feat_last.shape[-2:],\n            mode='nearest').to(torch.bool).squeeze(1)\n\n        pos_embed = self.positional_encoding(padding_mask)\n        feat_last = self.encoder_in_proj(feat_last)\n        # (batch_size, c, h, w) -> (batch_size, num_queries, c)\n        feat_last = feat_last.flatten(2).permute(0, 2, 1)\n        pos_embed = pos_embed.flatten(2).permute(0, 2, 1)\n        # (batch_size, h, w) -> (batch_size, h*w)\n        padding_mask = padding_mask.flatten(1)\n        memory = self.encoder(\n            query=feat_last,\n            query_pos=pos_embed,\n            key_padding_mask=padding_mask)\n        # (batch_size, num_queries, c) -> (batch_size, c, h, w)\n        memory = memory.permute(0, 2, 1).view(bs, self.encoder_embed_dims, h,\n                                              w)\n        y = self.encoder_out_proj(memory)\n        for i in range(self.num_inputs - 2, -1, -1):\n            x = feats[i]\n            cur_feat = self.lateral_convs[i](x)\n            y = cur_feat + \\\n                F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')\n            y = self.output_convs[i](y)\n\n        mask_feature = self.mask_feature(y)\n        return mask_feature, memory\n"
  },
  {
    "path": "mmdet/models/layers/positional_encoding.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig, OptMultiConfig\n\n\n@MODELS.register_module()\nclass SinePositionalEncoding(BaseModule):\n    \"\"\"Position encoding with sine and cosine functions.\n\n    See `End-to-End Object Detection with Transformers\n    <https://arxiv.org/pdf/2005.12872>`_ for details.\n\n    Args:\n        num_feats (int): The feature dimension for each position\n            along x-axis or y-axis. Note the final returned dimension\n            for each position is 2 times of this value.\n        temperature (int, optional): The temperature used for scaling\n            the position embedding. Defaults to 10000.\n        normalize (bool, optional): Whether to normalize the position\n            embedding. Defaults to False.\n        scale (float, optional): A scale factor that scales the position\n            embedding. The scale will be used only when `normalize` is True.\n            Defaults to 2*pi.\n        eps (float, optional): A value added to the denominator for\n            numerical stability. Defaults to 1e-6.\n        offset (float): offset add to embed when do the normalization.\n            Defaults to 0.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None\n    \"\"\"\n\n    def __init__(self,\n                 num_feats: int,\n                 temperature: int = 10000,\n                 normalize: bool = False,\n                 scale: float = 2 * math.pi,\n                 eps: float = 1e-6,\n                 offset: float = 0.,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        if normalize:\n            assert isinstance(scale, (float, int)), 'when normalize is set,' \\\n                'scale should be provided and in float or int type, ' \\\n                f'found {type(scale)}'\n        self.num_feats = num_feats\n        self.temperature = temperature\n        self.normalize = normalize\n        self.scale = scale\n        self.eps = eps\n        self.offset = offset\n\n    def forward(self, mask: Tensor) -> Tensor:\n        \"\"\"Forward function for `SinePositionalEncoding`.\n\n        Args:\n            mask (Tensor): ByteTensor mask. Non-zero values representing\n                ignored positions, while zero values means valid positions\n                for this image. Shape [bs, h, w].\n\n        Returns:\n            pos (Tensor): Returned position embedding with shape\n                [bs, num_feats*2, h, w].\n        \"\"\"\n        # For convenience of exporting to ONNX, it's required to convert\n        # `masks` from bool to int.\n        mask = mask.to(torch.int)\n        not_mask = 1 - mask  # logical_not\n        y_embed = not_mask.cumsum(1, dtype=torch.float32)\n        x_embed = not_mask.cumsum(2, dtype=torch.float32)\n        if self.normalize:\n            y_embed = (y_embed + self.offset) / \\\n                      (y_embed[:, -1:, :] + self.eps) * self.scale\n            x_embed = (x_embed + self.offset) / \\\n                      (x_embed[:, :, -1:] + self.eps) * self.scale\n        dim_t = torch.arange(\n            self.num_feats, dtype=torch.float32, device=mask.device)\n        dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)\n        pos_x = x_embed[:, :, :, None] / dim_t\n        pos_y = y_embed[:, :, :, None] / dim_t\n        # use `view` instead of `flatten` for dynamically exporting to ONNX\n        B, H, W = mask.size()\n        pos_x = torch.stack(\n            (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),\n            dim=4).view(B, H, W, -1)\n        pos_y = torch.stack(\n            (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),\n            dim=4).view(B, H, W, -1)\n        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n        return pos\n\n    def __repr__(self) -> str:\n        \"\"\"str: a string that describes the module\"\"\"\n        repr_str = self.__class__.__name__\n        repr_str += f'(num_feats={self.num_feats}, '\n        repr_str += f'temperature={self.temperature}, '\n        repr_str += f'normalize={self.normalize}, '\n        repr_str += f'scale={self.scale}, '\n        repr_str += f'eps={self.eps})'\n        return repr_str\n\n\n@MODELS.register_module()\nclass LearnedPositionalEncoding(BaseModule):\n    \"\"\"Position embedding with learnable embedding weights.\n\n    Args:\n        num_feats (int): The feature dimension for each position\n            along x-axis or y-axis. The final returned dimension for\n            each position is 2 times of this value.\n        row_num_embed (int, optional): The dictionary size of row embeddings.\n            Defaults to 50.\n        col_num_embed (int, optional): The dictionary size of col embeddings.\n            Defaults to 50.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_feats: int,\n        row_num_embed: int = 50,\n        col_num_embed: int = 50,\n        init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.row_embed = nn.Embedding(row_num_embed, num_feats)\n        self.col_embed = nn.Embedding(col_num_embed, num_feats)\n        self.num_feats = num_feats\n        self.row_num_embed = row_num_embed\n        self.col_num_embed = col_num_embed\n\n    def forward(self, mask: Tensor) -> Tensor:\n        \"\"\"Forward function for `LearnedPositionalEncoding`.\n\n        Args:\n            mask (Tensor): ByteTensor mask. Non-zero values representing\n                ignored positions, while zero values means valid positions\n                for this image. Shape [bs, h, w].\n\n        Returns:\n            pos (Tensor): Returned position embedding with shape\n                [bs, num_feats*2, h, w].\n        \"\"\"\n        h, w = mask.shape[-2:]\n        x = torch.arange(w, device=mask.device)\n        y = torch.arange(h, device=mask.device)\n        x_embed = self.col_embed(x)\n        y_embed = self.row_embed(y)\n        pos = torch.cat(\n            (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(\n                1, w, 1)),\n            dim=-1).permute(2, 0,\n                            1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)\n        return pos\n\n    def __repr__(self) -> str:\n        \"\"\"str: a string that describes the module\"\"\"\n        repr_str = self.__class__.__name__\n        repr_str += f'(num_feats={self.num_feats}, '\n        repr_str += f'row_num_embed={self.row_num_embed}, '\n        repr_str += f'col_num_embed={self.col_num_embed})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/models/layers/res_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmengine.model import BaseModule, Sequential\nfrom torch import Tensor\nfrom torch import nn as nn\n\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\n\n\nclass ResLayer(Sequential):\n    \"\"\"ResLayer to build ResNet style backbone.\n\n    Args:\n        block (nn.Module): block used to build ResLayer.\n        inplanes (int): inplanes of block.\n        planes (int): planes of block.\n        num_blocks (int): number of blocks.\n        stride (int): stride of the first block. Defaults to 1\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottleneck. Defaults to False\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Defaults to None\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Defaults to dict(type='BN')\n        downsample_first (bool): Downsample at the first block or last block.\n            False for Hourglass, True for ResNet. Defaults to True\n    \"\"\"\n\n    def __init__(self,\n                 block: BaseModule,\n                 inplanes: int,\n                 planes: int,\n                 num_blocks: int,\n                 stride: int = 1,\n                 avg_down: bool = False,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 downsample_first: bool = True,\n                 **kwargs) -> None:\n        self.block = block\n\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = []\n            conv_stride = stride\n            if avg_down:\n                conv_stride = 1\n                downsample.append(\n                    nn.AvgPool2d(\n                        kernel_size=stride,\n                        stride=stride,\n                        ceil_mode=True,\n                        count_include_pad=False))\n            downsample.extend([\n                build_conv_layer(\n                    conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=conv_stride,\n                    bias=False),\n                build_norm_layer(norm_cfg, planes * block.expansion)[1]\n            ])\n            downsample = nn.Sequential(*downsample)\n\n        layers = []\n        if downsample_first:\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=stride,\n                    downsample=downsample,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n            inplanes = planes * block.expansion\n            for _ in range(1, num_blocks):\n                layers.append(\n                    block(\n                        inplanes=inplanes,\n                        planes=planes,\n                        stride=1,\n                        conv_cfg=conv_cfg,\n                        norm_cfg=norm_cfg,\n                        **kwargs))\n\n        else:  # downsample_first=False is for HourglassModule\n            for _ in range(num_blocks - 1):\n                layers.append(\n                    block(\n                        inplanes=inplanes,\n                        planes=inplanes,\n                        stride=1,\n                        conv_cfg=conv_cfg,\n                        norm_cfg=norm_cfg,\n                        **kwargs))\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=stride,\n                    downsample=downsample,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n        super().__init__(*layers)\n\n\nclass SimplifiedBasicBlock(BaseModule):\n    \"\"\"Simplified version of original basic residual block. This is used in\n    `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    - Norm layer is now optional\n    - Last ReLU in forward function is removed\n    \"\"\"\n    expansion = 1\n\n    def __init__(self,\n                 inplanes: int,\n                 planes: int,\n                 stride: int = 1,\n                 dilation: int = 1,\n                 downsample: Optional[Sequential] = None,\n                 style: ConfigType = 'pytorch',\n                 with_cp: bool = False,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 dcn: OptConfigType = None,\n                 plugins: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert dcn is None, 'Not implemented yet.'\n        assert plugins is None, 'Not implemented yet.'\n        assert not with_cp, 'Not implemented yet.'\n        self.with_norm = norm_cfg is not None\n        with_bias = True if norm_cfg is None else False\n        self.conv1 = build_conv_layer(\n            conv_cfg,\n            inplanes,\n            planes,\n            3,\n            stride=stride,\n            padding=dilation,\n            dilation=dilation,\n            bias=with_bias)\n        if self.with_norm:\n            self.norm1_name, norm1 = build_norm_layer(\n                norm_cfg, planes, postfix=1)\n            self.add_module(self.norm1_name, norm1)\n        self.conv2 = build_conv_layer(\n            conv_cfg, planes, planes, 3, padding=1, bias=with_bias)\n        if self.with_norm:\n            self.norm2_name, norm2 = build_norm_layer(\n                norm_cfg, planes, postfix=2)\n            self.add_module(self.norm2_name, norm2)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n        self.dilation = dilation\n        self.with_cp = with_cp\n\n    @property\n    def norm1(self) -> Optional[BaseModule]:\n        \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n        return getattr(self, self.norm1_name) if self.with_norm else None\n\n    @property\n    def norm2(self) -> Optional[BaseModule]:\n        \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n        return getattr(self, self.norm2_name) if self.with_norm else None\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function for SimplifiedBasicBlock.\"\"\"\n\n        identity = x\n\n        out = self.conv1(x)\n        if self.with_norm:\n            out = self.norm1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        if self.with_norm:\n            out = self.norm2(out)\n\n        if self.downsample is not None:\n            identity = self.downsample(x)\n\n        out += identity\n\n        return out\n"
  },
  {
    "path": "mmdet/models/layers/se_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom mmengine.utils import digit_version, is_tuple_of\nfrom torch import Tensor\n\nfrom mmdet.utils import MultiConfig, OptConfigType, OptMultiConfig\n\n\nclass SELayer(BaseModule):\n    \"\"\"Squeeze-and-Excitation Module.\n\n    Args:\n        channels (int): The input (and output) channels of the SE layer.\n        ratio (int): Squeeze ratio in SELayer, the intermediate channel will be\n            ``int(channels/ratio)``. Defaults to 16.\n        conv_cfg (None or dict): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n            If act_cfg is a dict, two activation layers will be configurated\n            by this dict. If act_cfg is a sequence of dicts, the first\n            activation layer will be configurated by the first dict and the\n            second activation layer will be configurated by the second dict.\n            Defaults to (dict(type='ReLU'), dict(type='Sigmoid'))\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None\n    \"\"\"\n\n    def __init__(self,\n                 channels: int,\n                 ratio: int = 16,\n                 conv_cfg: OptConfigType = None,\n                 act_cfg: MultiConfig = (dict(type='ReLU'),\n                                         dict(type='Sigmoid')),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        if isinstance(act_cfg, dict):\n            act_cfg = (act_cfg, act_cfg)\n        assert len(act_cfg) == 2\n        assert is_tuple_of(act_cfg, dict)\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = ConvModule(\n            in_channels=channels,\n            out_channels=int(channels / ratio),\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[0])\n        self.conv2 = ConvModule(\n            in_channels=int(channels / ratio),\n            out_channels=channels,\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[1])\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function for SELayer.\"\"\"\n        out = self.global_avgpool(x)\n        out = self.conv1(out)\n        out = self.conv2(out)\n        return x * out\n\n\nclass DyReLU(BaseModule):\n    \"\"\"Dynamic ReLU (DyReLU) module.\n\n    See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.\n    Current implementation is specialized for task-aware attention in DyHead.\n    HSigmoid arguments in default act_cfg follow DyHead official code.\n    https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py\n\n    Args:\n        channels (int): The input (and output) channels of DyReLU module.\n        ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,\n            the intermediate channel will be ``int(channels/ratio)``.\n            Defaults to 4.\n        conv_cfg (None or dict): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n            If act_cfg is a dict, two activation layers will be configurated\n            by this dict. If act_cfg is a sequence of dicts, the first\n            activation layer will be configurated by the first dict and the\n            second activation layer will be configurated by the second dict.\n            Defaults to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,\n            divisor=6.0))\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None\n    \"\"\"\n\n    def __init__(self,\n                 channels: int,\n                 ratio: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 act_cfg: MultiConfig = (dict(type='ReLU'),\n                                         dict(\n                                             type='HSigmoid',\n                                             bias=3.0,\n                                             divisor=6.0)),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        if isinstance(act_cfg, dict):\n            act_cfg = (act_cfg, act_cfg)\n        assert len(act_cfg) == 2\n        assert is_tuple_of(act_cfg, dict)\n        self.channels = channels\n        self.expansion = 4  # for a1, b1, a2, b2\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = ConvModule(\n            in_channels=channels,\n            out_channels=int(channels / ratio),\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[0])\n        self.conv2 = ConvModule(\n            in_channels=int(channels / ratio),\n            out_channels=channels * self.expansion,\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[1])\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function.\"\"\"\n        coeffs = self.global_avgpool(x)\n        coeffs = self.conv1(coeffs)\n        coeffs = self.conv2(coeffs) - 0.5  # value range: [-0.5, 0.5]\n        a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1)\n        a1 = a1 * 2.0 + 1.0  # [-1.0, 1.0] + 1.0\n        a2 = a2 * 2.0  # [-1.0, 1.0]\n        out = torch.max(x * a1 + b1, x * a2 + b2)\n        return out\n\n\nclass ChannelAttention(BaseModule):\n    \"\"\"Channel attention Module.\n\n    Args:\n        channels (int): The input (and output) channels of the attention layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None\n    \"\"\"\n\n    def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True)\n        if digit_version(torch.__version__) < (1, 7, 0):\n            self.act = nn.Hardsigmoid()\n        else:\n            self.act = nn.Hardsigmoid(inplace=True)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function for ChannelAttention.\"\"\"\n        with torch.cuda.amp.autocast(enabled=False):\n            out = self.global_avgpool(x)\n        out = self.fc(out)\n        out = self.act(out)\n        return x * out\n"
  },
  {
    "path": "mmdet/models/layers/transformer/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .conditional_detr_layers import (ConditionalDetrTransformerDecoder,\n                                      ConditionalDetrTransformerDecoderLayer)\nfrom .dab_detr_layers import (DABDetrTransformerDecoder,\n                              DABDetrTransformerDecoderLayer,\n                              DABDetrTransformerEncoder)\nfrom .deformable_detr_layers import (DeformableDetrTransformerDecoder,\n                                     DeformableDetrTransformerDecoderLayer,\n                                     DeformableDetrTransformerEncoder,\n                                     DeformableDetrTransformerEncoderLayer)\nfrom .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,\n                          DetrTransformerEncoder, DetrTransformerEncoderLayer)\nfrom .dino_layers import CdnQueryGenerator, DinoTransformerDecoder\nfrom .mask2former_layers import (Mask2FormerTransformerDecoder,\n                                 Mask2FormerTransformerDecoderLayer,\n                                 Mask2FormerTransformerEncoder)\nfrom .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,\n                    PatchEmbed, PatchMerging, coordinate_to_encoding,\n                    inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)\n\n__all__ = [\n    'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',\n    'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',\n    'DetrTransformerEncoder', 'DetrTransformerDecoder',\n    'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',\n    'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',\n    'DeformableDetrTransformerEncoderLayer',\n    'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',\n    'ConditionalAttention', 'DABDetrTransformerDecoderLayer',\n    'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',\n    'ConditionalDetrTransformerDecoder',\n    'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',\n    'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',\n    'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'\n]\n"
  },
  {
    "path": "mmdet/models/layers/transformer/conditional_detr_layers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.cnn import build_norm_layer\nfrom mmcv.cnn.bricks.transformer import FFN\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer\nfrom .utils import MLP, ConditionalAttention, coordinate_to_encoding\n\n\nclass ConditionalDetrTransformerDecoder(DetrTransformerDecoder):\n    \"\"\"Decoder of Conditional DETR.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize decoder layers and other layers.\"\"\"\n        self.layers = ModuleList([\n            ConditionalDetrTransformerDecoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        self.embed_dims = self.layers[0].embed_dims\n        self.post_norm = build_norm_layer(self.post_norm_cfg,\n                                          self.embed_dims)[1]\n        # conditional detr affline\n        self.query_scale = MLP(self.embed_dims, self.embed_dims,\n                               self.embed_dims, 2)\n        self.ref_point_head = MLP(self.embed_dims, self.embed_dims, 2, 2)\n        # we have substitute 'qpos_proj' with 'qpos_sine_proj' except for\n        # the first decoder layer), so 'qpos_proj' should be deleted\n        # in other layers.\n        for layer_id in range(self.num_layers - 1):\n            self.layers[layer_id + 1].cross_attn.qpos_proj = None\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor = None,\n                query_pos: Tensor = None,\n                key_pos: Tensor = None,\n                key_padding_mask: Tensor = None):\n        \"\"\"Forward function of decoder.\n\n        Args:\n            query (Tensor): The input query with shape\n                (bs, num_queries, dim).\n            key (Tensor): The input key with shape (bs, num_keys, dim) If\n                `None`, the `query` will be used. Defaults to `None`.\n            query_pos (Tensor): The positional encoding for `query`, with the\n                same shape as `query`. If not `None`, it will be added to\n                `query` before forward function. Defaults to `None`.\n            key_pos (Tensor): The positional encoding for `key`, with the\n                same shape as `key`. If not `None`, it will be added to\n                `key` before forward function. If `None`, and `query_pos`\n                has the same shape as `key`, then `query_pos` will be used\n                as `key_pos`. Defaults to `None`.\n            key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys).\n                Defaults to `None`.\n        Returns:\n            List[Tensor]: forwarded results with shape (num_decoder_layers,\n            bs, num_queries, dim) if `return_intermediate` is True, otherwise\n            with shape (1, bs, num_queries, dim). References with shape\n            (bs, num_queries, 2).\n        \"\"\"\n        reference_unsigmoid = self.ref_point_head(\n            query_pos)  # [bs, num_queries, 2]\n        reference = reference_unsigmoid.sigmoid()\n        reference_xy = reference[..., :2]\n        intermediate = []\n        for layer_id, layer in enumerate(self.layers):\n            if layer_id == 0:\n                pos_transformation = 1\n            else:\n                pos_transformation = self.query_scale(query)\n            # get sine embedding for the query reference\n            ref_sine_embed = coordinate_to_encoding(coord_tensor=reference_xy)\n            # apply transformation\n            ref_sine_embed = ref_sine_embed * pos_transformation\n            query = layer(\n                query,\n                key=key,\n                query_pos=query_pos,\n                key_pos=key_pos,\n                key_padding_mask=key_padding_mask,\n                ref_sine_embed=ref_sine_embed,\n                is_first=(layer_id == 0))\n            if self.return_intermediate:\n                intermediate.append(self.post_norm(query))\n\n        if self.return_intermediate:\n            return torch.stack(intermediate), reference\n\n        query = self.post_norm(query)\n        return query.unsqueeze(0), reference\n\n\nclass ConditionalDetrTransformerDecoderLayer(DetrTransformerDecoderLayer):\n    \"\"\"Implements decoder layer in Conditional DETR transformer.\"\"\"\n\n    def _init_layers(self):\n        \"\"\"Initialize self-attention, cross-attention, FFN, and\n        normalization.\"\"\"\n        self.self_attn = ConditionalAttention(**self.self_attn_cfg)\n        self.cross_attn = ConditionalAttention(**self.cross_attn_cfg)\n        self.embed_dims = self.self_attn.embed_dims\n        self.ffn = FFN(**self.ffn_cfg)\n        norms_list = [\n            build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n            for _ in range(3)\n        ]\n        self.norms = ModuleList(norms_list)\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor = None,\n                query_pos: Tensor = None,\n                key_pos: Tensor = None,\n                self_attn_masks: Tensor = None,\n                cross_attn_masks: Tensor = None,\n                key_padding_mask: Tensor = None,\n                ref_sine_embed: Tensor = None,\n                is_first: bool = False):\n        \"\"\"\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim)\n            key (Tensor, optional): The input key, has shape (bs, num_keys,\n                dim). If `None`, the `query` will be used. Defaults to `None`.\n            query_pos (Tensor, optional): The positional encoding for `query`,\n                has the same shape as `query`. If not `None`, it will be\n                added to `query` before forward function. Defaults to `None`.\n            ref_sine_embed (Tensor): The positional encoding for query in\n                cross attention, with the same shape as `x`. Defaults to None.\n            key_pos (Tensor, optional): The positional encoding for `key`, has\n                the same shape as `key`. If not None, it will be added to\n                `key` before forward function. If None, and `query_pos` has\n                the same shape as `key`, then `query_pos` will be used for\n                `key_pos`. Defaults to None.\n            self_attn_masks (Tensor, optional): ByteTensor mask, has shape\n                (num_queries, num_keys), Same in `nn.MultiheadAttention.\n                forward`. Defaults to None.\n            cross_attn_masks (Tensor, optional): ByteTensor mask, has shape\n                (num_queries, num_keys), Same in `nn.MultiheadAttention.\n                forward`. Defaults to None.\n            key_padding_mask (Tensor, optional): ByteTensor, has shape\n                (bs, num_keys). Defaults to None.\n            is_first (bool): A indicator to tell whether the current layer\n                is the first layer of the decoder. Defaults to False.\n\n        Returns:\n            Tensor: Forwarded results, has shape (bs, num_queries, dim).\n        \"\"\"\n        query = self.self_attn(\n            query=query,\n            key=query,\n            query_pos=query_pos,\n            key_pos=query_pos,\n            attn_mask=self_attn_masks)\n        query = self.norms[0](query)\n        query = self.cross_attn(\n            query=query,\n            key=key,\n            query_pos=query_pos,\n            key_pos=key_pos,\n            attn_mask=cross_attn_masks,\n            key_padding_mask=key_padding_mask,\n            ref_sine_embed=ref_sine_embed,\n            is_first=is_first)\n        query = self.norms[1](query)\n        query = self.ffn(query)\n        query = self.norms[2](query)\n\n        return query\n"
  },
  {
    "path": "mmdet/models/layers/transformer/dab_detr_layers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import build_norm_layer\nfrom mmcv.cnn.bricks.transformer import FFN\nfrom mmengine.model import ModuleList\nfrom torch import Tensor\n\nfrom .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,\n                          DetrTransformerEncoder, DetrTransformerEncoderLayer)\nfrom .utils import (MLP, ConditionalAttention, coordinate_to_encoding,\n                    inverse_sigmoid)\n\n\nclass DABDetrTransformerDecoderLayer(DetrTransformerDecoderLayer):\n    \"\"\"Implements decoder layer in DAB-DETR transformer.\"\"\"\n\n    def _init_layers(self):\n        \"\"\"Initialize self-attention, cross-attention, FFN, normalization and\n        others.\"\"\"\n        self.self_attn = ConditionalAttention(**self.self_attn_cfg)\n        self.cross_attn = ConditionalAttention(**self.cross_attn_cfg)\n        self.embed_dims = self.self_attn.embed_dims\n        self.ffn = FFN(**self.ffn_cfg)\n        norms_list = [\n            build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n            for _ in range(3)\n        ]\n        self.norms = ModuleList(norms_list)\n        self.keep_query_pos = self.cross_attn.keep_query_pos\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor,\n                query_pos: Tensor,\n                key_pos: Tensor,\n                ref_sine_embed: Tensor = None,\n                self_attn_masks: Tensor = None,\n                cross_attn_masks: Tensor = None,\n                key_padding_mask: Tensor = None,\n                is_first: bool = False,\n                **kwargs) -> Tensor:\n        \"\"\"\n        Args:\n            query (Tensor): The input query with shape [bs, num_queries,\n                dim].\n            key (Tensor): The key tensor with shape [bs, num_keys,\n                dim].\n            query_pos (Tensor): The positional encoding for query in self\n                attention, with the same shape as `x`.\n            key_pos (Tensor): The positional encoding for `key`, with the\n                same shape as `key`.\n            ref_sine_embed (Tensor): The positional encoding for query in\n                cross attention, with the same shape as `x`.\n                Defaults to None.\n            self_attn_masks (Tensor): ByteTensor mask with shape [num_queries,\n                num_keys]. Same in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            cross_attn_masks (Tensor): ByteTensor mask with shape [num_queries,\n                num_keys]. Same in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].\n                Defaults to None.\n            is_first (bool): A indicator to tell whether the current layer\n                is the first layer of the decoder.\n                Defaults to False.\n\n        Returns:\n            Tensor: forwarded results with shape\n            [bs, num_queries, dim].\n        \"\"\"\n\n        query = self.self_attn(\n            query=query,\n            key=query,\n            query_pos=query_pos,\n            key_pos=query_pos,\n            attn_mask=self_attn_masks,\n            **kwargs)\n        query = self.norms[0](query)\n        query = self.cross_attn(\n            query=query,\n            key=key,\n            query_pos=query_pos,\n            key_pos=key_pos,\n            ref_sine_embed=ref_sine_embed,\n            attn_mask=cross_attn_masks,\n            key_padding_mask=key_padding_mask,\n            is_first=is_first,\n            **kwargs)\n        query = self.norms[1](query)\n        query = self.ffn(query)\n        query = self.norms[2](query)\n\n        return query\n\n\nclass DABDetrTransformerDecoder(DetrTransformerDecoder):\n    \"\"\"Decoder of DAB-DETR.\n\n    Args:\n        query_dim (int): The last dimension of query pos,\n            4 for anchor format, 2 for point format.\n            Defaults to 4.\n        query_scale_type (str): Type of transformation applied\n            to content query. Defaults to `cond_elewise`.\n        with_modulated_hw_attn (bool): Whether to inject h&w info\n            during cross conditional attention. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 query_dim: int = 4,\n                 query_scale_type: str = 'cond_elewise',\n                 with_modulated_hw_attn: bool = True,\n                 **kwargs):\n\n        self.query_dim = query_dim\n        self.query_scale_type = query_scale_type\n        self.with_modulated_hw_attn = with_modulated_hw_attn\n\n        super().__init__(*args, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize decoder layers and other layers.\"\"\"\n        assert self.query_dim in [2, 4], \\\n            f'{\"dab-detr only supports anchor prior or reference point prior\"}'\n        assert self.query_scale_type in [\n            'cond_elewise', 'cond_scalar', 'fix_elewise'\n        ]\n\n        self.layers = ModuleList([\n            DABDetrTransformerDecoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n\n        embed_dims = self.layers[0].embed_dims\n        self.embed_dims = embed_dims\n\n        self.post_norm = build_norm_layer(self.post_norm_cfg, embed_dims)[1]\n        if self.query_scale_type == 'cond_elewise':\n            self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2)\n        elif self.query_scale_type == 'cond_scalar':\n            self.query_scale = MLP(embed_dims, embed_dims, 1, 2)\n        elif self.query_scale_type == 'fix_elewise':\n            self.query_scale = nn.Embedding(self.num_layers, embed_dims)\n        else:\n            raise NotImplementedError('Unknown query_scale_type: {}'.format(\n                self.query_scale_type))\n\n        self.ref_point_head = MLP(self.query_dim // 2 * embed_dims, embed_dims,\n                                  embed_dims, 2)\n\n        if self.with_modulated_hw_attn and self.query_dim == 4:\n            self.ref_anchor_head = MLP(embed_dims, embed_dims, 2, 2)\n\n        self.keep_query_pos = self.layers[0].keep_query_pos\n        if not self.keep_query_pos:\n            for layer_id in range(self.num_layers - 1):\n                self.layers[layer_id + 1].cross_attn.qpos_proj = None\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor,\n                query_pos: Tensor,\n                key_pos: Tensor,\n                reg_branches: nn.Module,\n                key_padding_mask: Tensor = None,\n                **kwargs) -> List[Tensor]:\n        \"\"\"Forward function of decoder.\n\n        Args:\n            query (Tensor): The input query with shape (bs, num_queries, dim).\n            key (Tensor): The input key with shape (bs, num_keys, dim).\n            query_pos (Tensor): The positional encoding for `query`, with the\n                same shape as `query`.\n            key_pos (Tensor): The positional encoding for `key`, with the\n                same shape as `key`.\n            reg_branches (nn.Module): The regression branch for dynamically\n                updating references in each layer.\n            key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys).\n                Defaults to `None`.\n\n        Returns:\n            List[Tensor]: forwarded results with shape (num_decoder_layers,\n            bs, num_queries, dim) if `return_intermediate` is True, otherwise\n            with shape (1, bs, num_queries, dim). references with shape\n            (num_decoder_layers, bs, num_queries, 2/4).\n        \"\"\"\n        output = query\n        unsigmoid_references = query_pos\n\n        reference_points = unsigmoid_references.sigmoid()\n        intermediate_reference_points = [reference_points]\n\n        intermediate = []\n        for layer_id, layer in enumerate(self.layers):\n            obj_center = reference_points[..., :self.query_dim]\n            ref_sine_embed = coordinate_to_encoding(\n                coord_tensor=obj_center, num_feats=self.embed_dims // 2)\n            query_pos = self.ref_point_head(\n                ref_sine_embed)  # [bs, nq, 2c] -> [bs, nq, c]\n            # For the first decoder layer, do not apply transformation\n            if self.query_scale_type != 'fix_elewise':\n                if layer_id == 0:\n                    pos_transformation = 1\n                else:\n                    pos_transformation = self.query_scale(output)\n            else:\n                pos_transformation = self.query_scale.weight[layer_id]\n            # apply transformation\n            ref_sine_embed = ref_sine_embed[\n                ..., :self.embed_dims] * pos_transformation\n            # modulated height and weight attention\n            if self.with_modulated_hw_attn:\n                assert obj_center.size(-1) == 4\n                ref_hw = self.ref_anchor_head(output).sigmoid()\n                ref_sine_embed[..., self.embed_dims // 2:] *= \\\n                    (ref_hw[..., 0] / obj_center[..., 2]).unsqueeze(-1)\n                ref_sine_embed[..., : self.embed_dims // 2] *= \\\n                    (ref_hw[..., 1] / obj_center[..., 3]).unsqueeze(-1)\n\n            output = layer(\n                output,\n                key,\n                query_pos=query_pos,\n                ref_sine_embed=ref_sine_embed,\n                key_pos=key_pos,\n                key_padding_mask=key_padding_mask,\n                is_first=(layer_id == 0),\n                **kwargs)\n            # iter update\n            tmp_reg_preds = reg_branches(output)\n            tmp_reg_preds[..., :self.query_dim] += inverse_sigmoid(\n                reference_points)\n            new_reference_points = tmp_reg_preds[\n                ..., :self.query_dim].sigmoid()\n            if layer_id != self.num_layers - 1:\n                intermediate_reference_points.append(new_reference_points)\n            reference_points = new_reference_points.detach()\n\n            if self.return_intermediate:\n                intermediate.append(self.post_norm(output))\n\n        output = self.post_norm(output)\n\n        if self.return_intermediate:\n            return [\n                torch.stack(intermediate),\n                torch.stack(intermediate_reference_points),\n            ]\n        else:\n            return [\n                output.unsqueeze(0),\n                torch.stack(intermediate_reference_points)\n            ]\n\n\nclass DABDetrTransformerEncoder(DetrTransformerEncoder):\n    \"\"\"Encoder of DAB-DETR.\"\"\"\n\n    def _init_layers(self):\n        \"\"\"Initialize encoder layers.\"\"\"\n        self.layers = ModuleList([\n            DetrTransformerEncoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        embed_dims = self.layers[0].embed_dims\n        self.embed_dims = embed_dims\n        self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2)\n\n    def forward(self, query: Tensor, query_pos: Tensor,\n                key_padding_mask: Tensor, **kwargs):\n        \"\"\"Forward function of encoder.\n\n        Args:\n            query (Tensor): Input queries of encoder, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional embeddings of the queries, has\n                shape (bs, num_feat_points, dim).\n            key_padding_mask (Tensor): ByteTensor, the key padding mask\n                of the queries, has shape (bs, num_feat_points).\n\n        Returns:\n            Tensor: With shape (num_queries, bs, dim).\n        \"\"\"\n\n        for layer in self.layers:\n            pos_scales = self.query_scale(query)\n            query = layer(\n                query,\n                query_pos=query_pos * pos_scales,\n                key_padding_mask=key_padding_mask,\n                **kwargs)\n\n        return query\n"
  },
  {
    "path": "mmdet/models/layers/transformer/deformable_detr_layers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, Union\n\nimport torch\nfrom mmcv.cnn import build_norm_layer\nfrom mmcv.cnn.bricks.transformer import FFN, MultiheadAttention\nfrom mmcv.ops import MultiScaleDeformableAttention\nfrom mmengine.model import ModuleList\nfrom torch import Tensor, nn\n\nfrom .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,\n                          DetrTransformerEncoder, DetrTransformerEncoderLayer)\nfrom .utils import inverse_sigmoid\n\n\nclass DeformableDetrTransformerEncoder(DetrTransformerEncoder):\n    \"\"\"Transformer encoder of Deformable DETR.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize encoder layers.\"\"\"\n        self.layers = ModuleList([\n            DeformableDetrTransformerEncoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        self.embed_dims = self.layers[0].embed_dims\n\n    def forward(self, query: Tensor, query_pos: Tensor,\n                key_padding_mask: Tensor, spatial_shapes: Tensor,\n                level_start_index: Tensor, valid_ratios: Tensor,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function of Transformer encoder.\n\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim).\n            query_pos (Tensor): The positional encoding for query, has shape\n                (bs, num_queries, dim).\n            key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`\n                input. ByteTensor, has shape (bs, num_queries).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n\n        Returns:\n            Tensor: Output queries of Transformer encoder, which is also\n            called 'encoder output embeddings' or 'memory', has shape\n            (bs, num_queries, dim)\n        \"\"\"\n        reference_points = self.get_encoder_reference_points(\n            spatial_shapes, valid_ratios, device=query.device)\n        for layer in self.layers:\n            query = layer(\n                query=query,\n                query_pos=query_pos,\n                key_padding_mask=key_padding_mask,\n                spatial_shapes=spatial_shapes,\n                level_start_index=level_start_index,\n                valid_ratios=valid_ratios,\n                reference_points=reference_points,\n                **kwargs)\n        return query\n\n    @staticmethod\n    def get_encoder_reference_points(\n            spatial_shapes: Tensor, valid_ratios: Tensor,\n            device: Union[torch.device, str]) -> Tensor:\n        \"\"\"Get the reference points used in encoder.\n\n        Args:\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n            device (obj:`device` or str): The device acquired by the\n                `reference_points`.\n\n        Returns:\n            Tensor: Reference points used in decoder, has shape (bs, length,\n            num_levels, 2).\n        \"\"\"\n\n        reference_points_list = []\n        for lvl, (H, W) in enumerate(spatial_shapes):\n            ref_y, ref_x = torch.meshgrid(\n                torch.linspace(\n                    0.5, H - 0.5, H, dtype=torch.float32, device=device),\n                torch.linspace(\n                    0.5, W - 0.5, W, dtype=torch.float32, device=device))\n            ref_y = ref_y.reshape(-1)[None] / (\n                valid_ratios[:, None, lvl, 1] * H)\n            ref_x = ref_x.reshape(-1)[None] / (\n                valid_ratios[:, None, lvl, 0] * W)\n            ref = torch.stack((ref_x, ref_y), -1)\n            reference_points_list.append(ref)\n        reference_points = torch.cat(reference_points_list, 1)\n        # [bs, sum(hw), num_level, 2]\n        reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n        return reference_points\n\n\nclass DeformableDetrTransformerDecoder(DetrTransformerDecoder):\n    \"\"\"Transformer Decoder of Deformable DETR.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize decoder layers.\"\"\"\n        self.layers = ModuleList([\n            DeformableDetrTransformerDecoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        self.embed_dims = self.layers[0].embed_dims\n        if self.post_norm_cfg is not None:\n            raise ValueError('There is not post_norm in '\n                             f'{self._get_name()}')\n\n    def forward(self,\n                query: Tensor,\n                query_pos: Tensor,\n                value: Tensor,\n                key_padding_mask: Tensor,\n                reference_points: Tensor,\n                spatial_shapes: Tensor,\n                level_start_index: Tensor,\n                valid_ratios: Tensor,\n                reg_branches: Optional[nn.Module] = None,\n                **kwargs) -> Tuple[Tensor]:\n        \"\"\"Forward function of Transformer decoder.\n\n        Args:\n            query (Tensor): The input queries, has shape (bs, num_queries,\n                dim).\n            query_pos (Tensor): The input positional query, has shape\n                (bs, num_queries, dim). It will be added to `query` before\n                forward function.\n            value (Tensor): The input values, has shape (bs, num_value, dim).\n            key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn`\n                input. ByteTensor, has shape (bs, num_value).\n            reference_points (Tensor): The initial reference, has shape\n                (bs, num_queries, 4) with the last dimension arranged as\n                (cx, cy, w, h) when `as_two_stage` is `True`, otherwise has\n                shape (bs, num_queries, 2) with the last dimension arranged\n                as (cx, cy).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n            reg_branches: (obj:`nn.ModuleList`, optional): Used for refining\n                the regression results. Only would be passed when\n                `with_box_refine` is `True`, otherwise would be `None`.\n\n        Returns:\n            tuple[Tensor]: Outputs of Deformable Transformer Decoder.\n\n            - output (Tensor): Output embeddings of the last decoder, has\n              shape (num_queries, bs, embed_dims) when `return_intermediate`\n              is `False`. Otherwise, Intermediate output embeddings of all\n              decoder layers, has shape (num_decoder_layers, num_queries, bs,\n              embed_dims).\n            - reference_points (Tensor): The reference of the last decoder\n              layer, has shape (bs, num_queries, 4)  when `return_intermediate`\n              is `False`. Otherwise, Intermediate references of all decoder\n              layers, has shape (num_decoder_layers, bs, num_queries, 4). The\n              coordinates are arranged as (cx, cy, w, h)\n        \"\"\"\n        output = query\n        intermediate = []\n        intermediate_reference_points = []\n        for layer_id, layer in enumerate(self.layers):\n            if reference_points.shape[-1] == 4:\n                reference_points_input = \\\n                    reference_points[:, :, None] * \\\n                    torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n            else:\n                assert reference_points.shape[-1] == 2\n                reference_points_input = \\\n                    reference_points[:, :, None] * \\\n                    valid_ratios[:, None]\n            output = layer(\n                output,\n                query_pos=query_pos,\n                value=value,\n                key_padding_mask=key_padding_mask,\n                spatial_shapes=spatial_shapes,\n                level_start_index=level_start_index,\n                valid_ratios=valid_ratios,\n                reference_points=reference_points_input,\n                **kwargs)\n\n            if reg_branches is not None:\n                tmp_reg_preds = reg_branches[layer_id](output)\n                if reference_points.shape[-1] == 4:\n                    new_reference_points = tmp_reg_preds + inverse_sigmoid(\n                        reference_points)\n                    new_reference_points = new_reference_points.sigmoid()\n                else:\n                    assert reference_points.shape[-1] == 2\n                    new_reference_points = tmp_reg_preds\n                    new_reference_points[..., :2] = tmp_reg_preds[\n                        ..., :2] + inverse_sigmoid(reference_points)\n                    new_reference_points = new_reference_points.sigmoid()\n                reference_points = new_reference_points.detach()\n\n            if self.return_intermediate:\n                intermediate.append(output)\n                intermediate_reference_points.append(reference_points)\n\n        if self.return_intermediate:\n            return torch.stack(intermediate), torch.stack(\n                intermediate_reference_points)\n\n        return output, reference_points\n\n\nclass DeformableDetrTransformerEncoderLayer(DetrTransformerEncoderLayer):\n    \"\"\"Encoder layer of Deformable DETR.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize self_attn, ffn, and norms.\"\"\"\n        self.self_attn = MultiScaleDeformableAttention(**self.self_attn_cfg)\n        self.embed_dims = self.self_attn.embed_dims\n        self.ffn = FFN(**self.ffn_cfg)\n        norms_list = [\n            build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n            for _ in range(2)\n        ]\n        self.norms = ModuleList(norms_list)\n\n\nclass DeformableDetrTransformerDecoderLayer(DetrTransformerDecoderLayer):\n    \"\"\"Decoder layer of Deformable DETR.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize self_attn, cross-attn, ffn, and norms.\"\"\"\n        self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n        self.cross_attn = MultiScaleDeformableAttention(**self.cross_attn_cfg)\n        self.embed_dims = self.self_attn.embed_dims\n        self.ffn = FFN(**self.ffn_cfg)\n        norms_list = [\n            build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n            for _ in range(3)\n        ]\n        self.norms = ModuleList(norms_list)\n"
  },
  {
    "path": "mmdet/models/layers/transformer/detr_layers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Union\n\nimport torch\nfrom mmcv.cnn import build_norm_layer\nfrom mmcv.cnn.bricks.transformer import FFN, MultiheadAttention\nfrom mmengine import ConfigDict\nfrom mmengine.model import BaseModule, ModuleList\nfrom torch import Tensor\n\nfrom mmdet.utils import ConfigType, OptConfigType\n\n\nclass DetrTransformerEncoder(BaseModule):\n    \"\"\"Encoder of DETR.\n\n    Args:\n        num_layers (int): Number of encoder layers.\n        layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder\n            layer. All the layers will share the same config.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_layers: int,\n                 layer_cfg: ConfigType,\n                 init_cfg: OptConfigType = None) -> None:\n\n        super().__init__(init_cfg=init_cfg)\n        self.num_layers = num_layers\n        self.layer_cfg = layer_cfg\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize encoder layers.\"\"\"\n        self.layers = ModuleList([\n            DetrTransformerEncoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        self.embed_dims = self.layers[0].embed_dims\n\n    def forward(self, query: Tensor, query_pos: Tensor,\n                key_padding_mask: Tensor, **kwargs) -> Tensor:\n        \"\"\"Forward function of encoder.\n\n        Args:\n            query (Tensor): Input queries of encoder, has shape\n                (bs, num_queries, dim).\n            query_pos (Tensor): The positional embeddings of the queries, has\n                shape (bs, num_queries, dim).\n            key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`\n                input. ByteTensor, has shape (bs, num_queries).\n\n        Returns:\n            Tensor: Has shape (bs, num_queries, dim) if `batch_first` is\n            `True`, otherwise (num_queries, bs, dim).\n        \"\"\"\n        for layer in self.layers:\n            query = layer(query, query_pos, key_padding_mask, **kwargs)\n        return query\n\n\nclass DetrTransformerDecoder(BaseModule):\n    \"\"\"Decoder of DETR.\n\n    Args:\n        num_layers (int): Number of decoder layers.\n        layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder\n            layer. All the layers will share the same config.\n        post_norm_cfg (:obj:`ConfigDict` or dict, optional): Config of the\n            post normalization layer. Defaults to `LN`.\n        return_intermediate (bool, optional): Whether to return outputs of\n            intermediate layers. Defaults to `True`,\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_layers: int,\n                 layer_cfg: ConfigType,\n                 post_norm_cfg: OptConfigType = dict(type='LN'),\n                 return_intermediate: bool = True,\n                 init_cfg: Union[dict, ConfigDict] = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.layer_cfg = layer_cfg\n        self.num_layers = num_layers\n        self.post_norm_cfg = post_norm_cfg\n        self.return_intermediate = return_intermediate\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize decoder layers.\"\"\"\n        self.layers = ModuleList([\n            DetrTransformerDecoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        self.embed_dims = self.layers[0].embed_dims\n        self.post_norm = build_norm_layer(self.post_norm_cfg,\n                                          self.embed_dims)[1]\n\n    def forward(self, query: Tensor, key: Tensor, value: Tensor,\n                query_pos: Tensor, key_pos: Tensor, key_padding_mask: Tensor,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function of decoder\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim).\n            key (Tensor): The input key, has shape (bs, num_keys, dim).\n            value (Tensor): The input value with the same shape as `key`.\n            query_pos (Tensor): The positional encoding for `query`, with the\n                same shape as `query`.\n            key_pos (Tensor): The positional encoding for `key`, with the\n                same shape as `key`.\n            key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn`\n                input. ByteTensor, has shape (bs, num_value).\n\n        Returns:\n            Tensor: The forwarded results will have shape\n            (num_decoder_layers, bs, num_queries, dim) if\n            `return_intermediate` is `True` else (1, bs, num_queries, dim).\n        \"\"\"\n        intermediate = []\n        for layer in self.layers:\n            query = layer(\n                query,\n                key=key,\n                value=value,\n                query_pos=query_pos,\n                key_pos=key_pos,\n                key_padding_mask=key_padding_mask,\n                **kwargs)\n            if self.return_intermediate:\n                intermediate.append(self.post_norm(query))\n        query = self.post_norm(query)\n\n        if self.return_intermediate:\n            return torch.stack(intermediate)\n\n        return query.unsqueeze(0)\n\n\nclass DetrTransformerEncoderLayer(BaseModule):\n    \"\"\"Implements encoder layer in DETR transformer.\n\n    Args:\n        self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self\n            attention.\n        ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Config for\n            normalization layers. All the layers will share the same\n            config. Defaults to `LN`.\n        init_cfg (:obj:`ConfigDict` or dict, optional): Config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 self_attn_cfg: OptConfigType = dict(\n                     embed_dims=256, num_heads=8, dropout=0.0),\n                 ffn_cfg: OptConfigType = dict(\n                     embed_dims=256,\n                     feedforward_channels=1024,\n                     num_fcs=2,\n                     ffn_drop=0.,\n                     act_cfg=dict(type='ReLU', inplace=True)),\n                 norm_cfg: OptConfigType = dict(type='LN'),\n                 init_cfg: OptConfigType = None) -> None:\n\n        super().__init__(init_cfg=init_cfg)\n\n        self.self_attn_cfg = self_attn_cfg\n        if 'batch_first' not in self.self_attn_cfg:\n            self.self_attn_cfg['batch_first'] = True\n        else:\n            assert self.self_attn_cfg['batch_first'] is True, 'First \\\n            dimension of all DETRs in mmdet is `batch`, \\\n            please set `batch_first` flag.'\n\n        self.ffn_cfg = ffn_cfg\n        self.norm_cfg = norm_cfg\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize self-attention, FFN, and normalization.\"\"\"\n        self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n        self.embed_dims = self.self_attn.embed_dims\n        self.ffn = FFN(**self.ffn_cfg)\n        norms_list = [\n            build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n            for _ in range(2)\n        ]\n        self.norms = ModuleList(norms_list)\n\n    def forward(self, query: Tensor, query_pos: Tensor,\n                key_padding_mask: Tensor, **kwargs) -> Tensor:\n        \"\"\"Forward function of an encoder layer.\n\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim).\n            query_pos (Tensor): The positional encoding for query, with\n                the same shape as `query`.\n            key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`\n                input. ByteTensor. has shape (bs, num_queries).\n        Returns:\n            Tensor: forwarded results, has shape (bs, num_queries, dim).\n        \"\"\"\n        query = self.self_attn(\n            query=query,\n            key=query,\n            value=query,\n            query_pos=query_pos,\n            key_pos=query_pos,\n            key_padding_mask=key_padding_mask,\n            **kwargs)\n        query = self.norms[0](query)\n        query = self.ffn(query)\n        query = self.norms[1](query)\n\n        return query\n\n\nclass DetrTransformerDecoderLayer(BaseModule):\n    \"\"\"Implements decoder layer in DETR transformer.\n\n    Args:\n        self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self\n            attention.\n        cross_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for cross\n            attention.\n        ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Config for\n            normalization layers. All the layers will share the same\n            config. Defaults to `LN`.\n        init_cfg (:obj:`ConfigDict` or dict, optional): Config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 self_attn_cfg: OptConfigType = dict(\n                     embed_dims=256,\n                     num_heads=8,\n                     dropout=0.0,\n                     batch_first=True),\n                 cross_attn_cfg: OptConfigType = dict(\n                     embed_dims=256,\n                     num_heads=8,\n                     dropout=0.0,\n                     batch_first=True),\n                 ffn_cfg: OptConfigType = dict(\n                     embed_dims=256,\n                     feedforward_channels=1024,\n                     num_fcs=2,\n                     ffn_drop=0.,\n                     act_cfg=dict(type='ReLU', inplace=True),\n                 ),\n                 norm_cfg: OptConfigType = dict(type='LN'),\n                 init_cfg: OptConfigType = None) -> None:\n\n        super().__init__(init_cfg=init_cfg)\n\n        self.self_attn_cfg = self_attn_cfg\n        self.cross_attn_cfg = cross_attn_cfg\n        if 'batch_first' not in self.self_attn_cfg:\n            self.self_attn_cfg['batch_first'] = True\n        else:\n            assert self.self_attn_cfg['batch_first'] is True, 'First \\\n            dimension of all DETRs in mmdet is `batch`, \\\n            please set `batch_first` flag.'\n\n        if 'batch_first' not in self.cross_attn_cfg:\n            self.cross_attn_cfg['batch_first'] = True\n        else:\n            assert self.cross_attn_cfg['batch_first'] is True, 'First \\\n            dimension of all DETRs in mmdet is `batch`, \\\n            please set `batch_first` flag.'\n\n        self.ffn_cfg = ffn_cfg\n        self.norm_cfg = norm_cfg\n        self._init_layers()\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize self-attention, FFN, and normalization.\"\"\"\n        self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n        self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n        self.embed_dims = self.self_attn.embed_dims\n        self.ffn = FFN(**self.ffn_cfg)\n        norms_list = [\n            build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n            for _ in range(3)\n        ]\n        self.norms = ModuleList(norms_list)\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor = None,\n                value: Tensor = None,\n                query_pos: Tensor = None,\n                key_pos: Tensor = None,\n                self_attn_mask: Tensor = None,\n                cross_attn_mask: Tensor = None,\n                key_padding_mask: Tensor = None,\n                **kwargs) -> Tensor:\n        \"\"\"\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim).\n            key (Tensor, optional): The input key, has shape (bs, num_keys,\n                dim). If `None`, the `query` will be used. Defaults to `None`.\n            value (Tensor, optional): The input value, has the same shape as\n                `key`, as in `nn.MultiheadAttention.forward`. If `None`, the\n                `key` will be used. Defaults to `None`.\n            query_pos (Tensor, optional): The positional encoding for `query`,\n                has the same shape as `query`. If not `None`, it will be added\n                to `query` before forward function. Defaults to `None`.\n            key_pos (Tensor, optional): The positional encoding for `key`, has\n                the same shape as `key`. If not `None`, it will be added to\n                `key` before forward function. If None, and `query_pos` has the\n                same shape as `key`, then `query_pos` will be used for\n                `key_pos`. Defaults to None.\n            self_attn_mask (Tensor, optional): ByteTensor mask, has shape\n                (num_queries, num_keys), as in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            cross_attn_mask (Tensor, optional): ByteTensor mask, has shape\n                (num_queries, num_keys), as in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            key_padding_mask (Tensor, optional): The `key_padding_mask` of\n                `self_attn` input. ByteTensor, has shape (bs, num_value).\n                Defaults to None.\n\n        Returns:\n            Tensor: forwarded results, has shape (bs, num_queries, dim).\n        \"\"\"\n\n        query = self.self_attn(\n            query=query,\n            key=query,\n            value=query,\n            query_pos=query_pos,\n            key_pos=query_pos,\n            attn_mask=self_attn_mask,\n            **kwargs)\n        query = self.norms[0](query)\n        query = self.cross_attn(\n            query=query,\n            key=key,\n            value=value,\n            query_pos=query_pos,\n            key_pos=key_pos,\n            attn_mask=cross_attn_mask,\n            key_padding_mask=key_padding_mask,\n            **kwargs)\n        query = self.norms[1](query)\n        query = self.ffn(query)\n        query = self.norms[2](query)\n\n        return query\n"
  },
  {
    "path": "mmdet/models/layers/transformer/dino_layers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import Tuple, Union\n\nimport torch\nfrom mmengine.model import BaseModule\nfrom torch import Tensor, nn\n\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox_xyxy_to_cxcywh\nfrom mmdet.utils import OptConfigType\nfrom .deformable_detr_layers import DeformableDetrTransformerDecoder\nfrom .utils import MLP, coordinate_to_encoding, inverse_sigmoid\n\n\nclass DinoTransformerDecoder(DeformableDetrTransformerDecoder):\n    \"\"\"Transformer encoder of DINO.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize decoder layers.\"\"\"\n        super()._init_layers()\n        self.ref_point_head = MLP(self.embed_dims * 2, self.embed_dims,\n                                  self.embed_dims, 2)\n        self.norm = nn.LayerNorm(self.embed_dims)\n\n    def forward(self, query: Tensor, value: Tensor, key_padding_mask: Tensor,\n                self_attn_mask: Tensor, reference_points: Tensor,\n                spatial_shapes: Tensor, level_start_index: Tensor,\n                valid_ratios: Tensor, reg_branches: nn.ModuleList,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function of Transformer encoder.\n\n        Args:\n            query (Tensor): The input query, has shape (num_queries, bs, dim).\n            value (Tensor): The input values, has shape (num_value, bs, dim).\n            key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`\n                input. ByteTensor, has shape (num_queries, bs).\n            self_attn_mask (Tensor): The attention mask to prevent information\n                leakage from different denoising groups and matching parts, has\n                shape (num_queries_total, num_queries_total). It is `None` when\n                `self.training` is `False`.\n            reference_points (Tensor): The initial reference, has shape\n                (bs, num_queries, 4) with the last dimension arranged as\n                (cx, cy, w, h).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n            reg_branches: (obj:`nn.ModuleList`): Used for refining the\n                regression results.\n\n        Returns:\n            Tensor: Output queries of Transformer encoder, which is also\n            called 'encoder output embeddings' or 'memory', has shape\n            (num_queries, bs, dim)\n        \"\"\"\n        intermediate = []\n        intermediate_reference_points = [reference_points]\n        for lid, layer in enumerate(self.layers):\n            if reference_points.shape[-1] == 4:\n                reference_points_input = \\\n                    reference_points[:, :, None] * torch.cat(\n                        [valid_ratios, valid_ratios], -1)[:, None]\n            else:\n                assert reference_points.shape[-1] == 2\n                reference_points_input = \\\n                    reference_points[:, :, None] * valid_ratios[:, None]\n\n            query_sine_embed = coordinate_to_encoding(\n                reference_points_input[:, :, 0, :])\n            query_pos = self.ref_point_head(query_sine_embed)\n\n            query = layer(\n                query,\n                query_pos=query_pos,\n                value=value,\n                key_padding_mask=key_padding_mask,\n                self_attn_mask=self_attn_mask,\n                spatial_shapes=spatial_shapes,\n                level_start_index=level_start_index,\n                valid_ratios=valid_ratios,\n                reference_points=reference_points_input,\n                **kwargs)\n\n            if reg_branches is not None:\n                tmp = reg_branches[lid](query)\n                assert reference_points.shape[-1] == 4\n                new_reference_points = tmp + inverse_sigmoid(\n                    reference_points, eps=1e-3)\n                new_reference_points = new_reference_points.sigmoid()\n                reference_points = new_reference_points.detach()\n\n            if self.return_intermediate:\n                intermediate.append(self.norm(query))\n                intermediate_reference_points.append(new_reference_points)\n                # NOTE this is for the \"Look Forward Twice\" module,\n                # in the DeformDETR, reference_points was appended.\n\n        if self.return_intermediate:\n            return torch.stack(intermediate), torch.stack(\n                intermediate_reference_points)\n\n        return query, reference_points\n\n\nclass CdnQueryGenerator(BaseModule):\n    \"\"\"Implement query generator of the Contrastive denoising (CDN) proposed in\n    `DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object\n    Detection <https://arxiv.org/abs/2203.03605>`_\n\n    Code is modified from the `official github repo\n    <https://github.com/IDEA-Research/DINO>`_.\n\n    Args:\n        num_classes (int): Number of object classes.\n        embed_dims (int): The embedding dimensions of the generated queries.\n        num_matching_queries (int): The queries number of the matching part.\n            Used for generating dn_mask.\n        label_noise_scale (float): The scale of label noise, defaults to 0.5.\n        box_noise_scale (float): The scale of box noise, defaults to 1.0.\n        group_cfg (:obj:`ConfigDict` or dict, optional): The config of the\n            denoising queries grouping, includes `dynamic`, `num_dn_queries`,\n            and `num_groups`. Two grouping strategies, 'static dn groups' and\n            'dynamic dn groups', are supported. When `dynamic` is `False`,\n            the `num_groups` should be set, and the number of denoising query\n            groups will always be `num_groups`. When `dynamic` is `True`, the\n            `num_dn_queries` should be set, and the group number will be\n            dynamic to ensure that the denoising queries number will not exceed\n            `num_dn_queries` to prevent large fluctuations of memory. Defaults\n            to `None`.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 embed_dims: int,\n                 num_matching_queries: int,\n                 label_noise_scale: float = 0.5,\n                 box_noise_scale: float = 1.0,\n                 group_cfg: OptConfigType = None) -> None:\n        super().__init__()\n        self.num_classes = num_classes\n        self.embed_dims = embed_dims\n        self.num_matching_queries = num_matching_queries\n        self.label_noise_scale = label_noise_scale\n        self.box_noise_scale = box_noise_scale\n\n        # prepare grouping strategy\n        group_cfg = {} if group_cfg is None else group_cfg\n        self.dynamic_dn_groups = group_cfg.get('dynamic', True)\n        if self.dynamic_dn_groups:\n            if 'num_dn_queries' not in group_cfg:\n                warnings.warn(\"'num_dn_queries' should be set when using \"\n                              'dynamic dn groups, use 100 as default.')\n            self.num_dn_queries = group_cfg.get('num_dn_queries', 100)\n            assert isinstance(self.num_dn_queries, int), \\\n                f'Expected the num_dn_queries to have type int, but got ' \\\n                f'{self.num_dn_queries}({type(self.num_dn_queries)}). '\n        else:\n            assert 'num_groups' in group_cfg, \\\n                'num_groups should be set when using static dn groups'\n            self.num_groups = group_cfg['num_groups']\n            assert isinstance(self.num_groups, int), \\\n                f'Expected the num_groups to have type int, but got ' \\\n                f'{self.num_groups}({type(self.num_groups)}). '\n\n        # NOTE The original repo of DINO set the num_embeddings 92 for coco,\n        # 91 (0~90) of which represents target classes and the 92 (91)\n        # indicates `Unknown` class. However, the embedding of `unknown` class\n        # is not used in the original DINO.\n        # TODO: num_classes + 1 or num_classes ?\n        self.label_embedding = nn.Embedding(self.num_classes, self.embed_dims)\n\n    def __call__(self, batch_data_samples: SampleList) -> tuple:\n        \"\"\"Generate contrastive denoising (cdn) queries with ground truth.\n\n        Descriptions of the Number Values in code and comments:\n            - num_target_total: the total target number of the input batch\n              samples.\n            - max_num_target: the max target number of the input batch samples.\n            - num_noisy_targets: the total targets number after adding noise,\n              i.e., num_target_total * num_groups * 2.\n            - num_denoising_queries: the length of the output batched queries,\n              i.e., max_num_target * num_groups * 2.\n\n        NOTE The format of input bboxes in batch_data_samples is unnormalized\n        (x, y, x, y), and the output bbox queries are embedded by normalized\n        (cx, cy, w, h) format bboxes going through inverse_sigmoid.\n\n        Args:\n            batch_data_samples (list[:obj:`DetDataSample`]): List of the batch\n                data samples, each includes `gt_instance` which has attributes\n                `bboxes` and `labels`. The `bboxes` has unnormalized coordinate\n                format (x, y, x, y).\n\n        Returns:\n            tuple: The outputs of the dn query generator.\n\n            - dn_label_query (Tensor): The output content queries for denoising\n              part, has shape (bs, num_denoising_queries, dim), where\n              `num_denoising_queries = max_num_target * num_groups * 2`.\n            - dn_bbox_query (Tensor): The output reference bboxes as positions\n              of queries for denoising part, which are embedded by normalized\n              (cx, cy, w, h) format bboxes going through inverse_sigmoid, has\n              shape (bs, num_denoising_queries, 4) with the last dimension\n              arranged as (cx, cy, w, h).\n            - attn_mask (Tensor): The attention mask to prevent information\n              leakage from different denoising groups and matching parts,\n              will be used as `self_attn_mask` of the `decoder`, has shape\n              (num_queries_total, num_queries_total), where `num_queries_total`\n              is the sum of `num_denoising_queries` and `num_matching_queries`.\n            - dn_meta (Dict[str, int]): The dictionary saves information about\n              group collation, including 'num_denoising_queries' and\n              'num_denoising_groups'. It will be used for split outputs of\n              denoising and matching parts and loss calculation.\n        \"\"\"\n        # normalize bbox and collate ground truth (gt)\n        gt_labels_list = []\n        gt_bboxes_list = []\n        for sample in batch_data_samples:\n            img_h, img_w = sample.img_shape\n            bboxes = sample.gt_instances.bboxes\n            factor = bboxes.new_tensor([img_w, img_h, img_w,\n                                        img_h]).unsqueeze(0)\n            bboxes_normalized = bboxes / factor\n            gt_bboxes_list.append(bboxes_normalized)\n            gt_labels_list.append(sample.gt_instances.labels)\n        gt_labels = torch.cat(gt_labels_list)  # (num_target_total, 4)\n        gt_bboxes = torch.cat(gt_bboxes_list)\n\n        num_target_list = [len(bboxes) for bboxes in gt_bboxes_list]\n        max_num_target = max(num_target_list)\n        num_groups = self.get_num_groups(max_num_target)\n\n        dn_label_query = self.generate_dn_label_query(gt_labels, num_groups)\n        dn_bbox_query = self.generate_dn_bbox_query(gt_bboxes, num_groups)\n\n        # The `batch_idx` saves the batch index of the corresponding sample\n        # for each target, has shape (num_target_total).\n        batch_idx = torch.cat([\n            torch.full_like(t.long(), i) for i, t in enumerate(gt_labels_list)\n        ])\n        dn_label_query, dn_bbox_query = self.collate_dn_queries(\n            dn_label_query, dn_bbox_query, batch_idx, len(batch_data_samples),\n            num_groups)\n\n        attn_mask = self.generate_dn_mask(\n            max_num_target, num_groups, device=dn_label_query.device)\n\n        dn_meta = dict(\n            num_denoising_queries=int(max_num_target * 2 * num_groups),\n            num_denoising_groups=num_groups)\n\n        return dn_label_query, dn_bbox_query, attn_mask, dn_meta\n\n    def get_num_groups(self, max_num_target: int = None) -> int:\n        \"\"\"Calculate denoising query groups number.\n\n        Two grouping strategies, 'static dn groups' and 'dynamic dn groups',\n        are supported. When `self.dynamic_dn_groups` is `False`, the number\n        of denoising query groups will always be `self.num_groups`. When\n        `self.dynamic_dn_groups` is `True`, the group number will be dynamic,\n        ensuring the denoising queries number will not exceed\n        `self.num_dn_queries` to prevent large fluctuations of memory.\n\n        NOTE The `num_group` is shared for different samples in a batch. When\n        the target numbers in the samples varies, the denoising queries of the\n        samples containing fewer targets are padded to the max length.\n\n        Args:\n            max_num_target (int, optional): The max target number of the batch\n                samples. It will only be used when `self.dynamic_dn_groups` is\n                `True`. Defaults to `None`.\n\n        Returns:\n            int: The denoising group number of the current batch.\n        \"\"\"\n        if self.dynamic_dn_groups:\n            assert max_num_target is not None, \\\n                'group_queries should be provided when using ' \\\n                'dynamic dn groups'\n            if max_num_target == 0:\n                num_groups = 1\n            else:\n                num_groups = self.num_dn_queries // max_num_target\n        else:\n            num_groups = self.num_groups\n        if num_groups < 1:\n            num_groups = 1\n        return int(num_groups)\n\n    def generate_dn_label_query(self, gt_labels: Tensor,\n                                num_groups: int) -> Tensor:\n        \"\"\"Generate noisy labels and their query embeddings.\n\n        The strategy for generating noisy labels is: Randomly choose labels of\n        `self.label_noise_scale * 0.5` proportion and override each of them\n        with a random object category label.\n\n        NOTE Not add noise to all labels. Besides, the `self.label_noise_scale\n        * 0.5` arg is the ratio of the chosen positions, which is higher than\n        the actual proportion of noisy labels, because the labels to override\n        may be correct. And the gap becomes larger as the number of target\n        categories decreases. The users should notice this and modify the scale\n        arg or the corresponding logic according to specific dataset.\n\n        Args:\n            gt_labels (Tensor): The concatenated gt labels of all samples\n                in the batch, has shape (num_target_total, ) where\n                `num_target_total = sum(num_target_list)`.\n            num_groups (int): The number of denoising query groups.\n\n        Returns:\n            Tensor: The query embeddings of noisy labels, has shape\n            (num_noisy_targets, embed_dims), where `num_noisy_targets =\n            num_target_total * num_groups * 2`.\n        \"\"\"\n        assert self.label_noise_scale > 0\n        gt_labels_expand = gt_labels.repeat(2 * num_groups,\n                                            1).view(-1)  # Note `* 2`  # noqa\n        p = torch.rand_like(gt_labels_expand.float())\n        chosen_indice = torch.nonzero(p < (self.label_noise_scale * 0.5)).view(\n            -1)  # Note `* 0.5`\n        new_labels = torch.randint_like(chosen_indice, 0, self.num_classes)\n        noisy_labels_expand = gt_labels_expand.scatter(0, chosen_indice,\n                                                       new_labels)\n        dn_label_query = self.label_embedding(noisy_labels_expand)\n        return dn_label_query\n\n    def generate_dn_bbox_query(self, gt_bboxes: Tensor,\n                               num_groups: int) -> Tensor:\n        \"\"\"Generate noisy bboxes and their query embeddings.\n\n        The strategy for generating noisy bboxes is as follow:\n\n        .. code:: text\n\n            +--------------------+\n            |      negative      |\n            |    +----------+    |\n            |    | positive |    |\n            |    |    +-----|----+------------+\n            |    |    |     |    |            |\n            |    +----+-----+    |            |\n            |         |          |            |\n            +---------+----------+            |\n                      |                       |\n                      |        gt bbox        |\n                      |                       |\n                      |             +---------+----------+\n                      |             |         |          |\n                      |             |    +----+-----+    |\n                      |             |    |    |     |    |\n                      +-------------|--- +----+     |    |\n                                    |    | positive |    |\n                                    |    +----------+    |\n                                    |      negative      |\n                                    +--------------------+\n\n         The random noise is added to the top-left and down-right point\n         positions, hence, normalized (x, y, x, y) format of bboxes are\n         required. The noisy bboxes of positive queries have the points\n         both within the inner square, while those of negative queries\n         have the points both between the inner and outer squares.\n\n        Besides, the length of outer square is twice as long as that of\n        the inner square, i.e., self.box_noise_scale * w_or_h / 2.\n        NOTE The noise is added to all the bboxes. Moreover, there is still\n        unconsidered case when one point is within the positive square and\n        the others is between the inner and outer squares.\n\n        Args:\n            gt_bboxes (Tensor): The concatenated gt bboxes of all samples\n                in the batch, has shape (num_target_total, 4) with the last\n                dimension arranged as (cx, cy, w, h) where\n                `num_target_total = sum(num_target_list)`.\n            num_groups (int): The number of denoising query groups.\n\n        Returns:\n            Tensor: The output noisy bboxes, which are embedded by normalized\n            (cx, cy, w, h) format bboxes going through inverse_sigmoid, has\n            shape (num_noisy_targets, 4) with the last dimension arranged as\n            (cx, cy, w, h), where\n            `num_noisy_targets = num_target_total * num_groups * 2`.\n        \"\"\"\n        assert self.box_noise_scale > 0\n        device = gt_bboxes.device\n\n        # expand gt_bboxes as groups\n        gt_bboxes_expand = gt_bboxes.repeat(2 * num_groups, 1)  # xyxy\n\n        # obtain index of negative queries in gt_bboxes_expand\n        positive_idx = torch.arange(\n            len(gt_bboxes), dtype=torch.long, device=device)\n        positive_idx = positive_idx.unsqueeze(0).repeat(num_groups, 1)\n        positive_idx += 2 * len(gt_bboxes) * torch.arange(\n            num_groups, dtype=torch.long, device=device)[:, None]\n        positive_idx = positive_idx.flatten()\n        negative_idx = positive_idx + len(gt_bboxes)\n\n        # determine the sign of each element in the random part of the added\n        # noise to be positive or negative randomly.\n        rand_sign = torch.randint_like(\n            gt_bboxes_expand, low=0, high=2,\n            dtype=torch.float32) * 2.0 - 1.0  # [low, high), 1 or -1, randomly\n\n        # calculate the random part of the added noise\n        rand_part = torch.rand_like(gt_bboxes_expand)  # [0, 1)\n        rand_part[negative_idx] += 1.0  # pos: [0, 1); neg: [1, 2)\n        rand_part *= rand_sign  # pos: (-1, 1); neg: (-2, -1] U [1, 2)\n\n        # add noise to the bboxes\n        bboxes_whwh = bbox_xyxy_to_cxcywh(gt_bboxes_expand)[:, 2:].repeat(1, 2)\n        noisy_bboxes_expand = gt_bboxes_expand + torch.mul(\n            rand_part, bboxes_whwh) * self.box_noise_scale / 2  # xyxy\n        noisy_bboxes_expand = noisy_bboxes_expand.clamp(min=0.0, max=1.0)\n        noisy_bboxes_expand = bbox_xyxy_to_cxcywh(noisy_bboxes_expand)\n\n        dn_bbox_query = inverse_sigmoid(noisy_bboxes_expand, eps=1e-3)\n        return dn_bbox_query\n\n    def collate_dn_queries(self, input_label_query: Tensor,\n                           input_bbox_query: Tensor, batch_idx: Tensor,\n                           batch_size: int, num_groups: int) -> Tuple[Tensor]:\n        \"\"\"Collate generated queries to obtain batched dn queries.\n\n        The strategy for query collation is as follow:\n\n        .. code:: text\n\n                    input_queries (num_target_total, query_dim)\n            P_A1 P_B1 P_B2 N_A1 N_B1 N_B2 P'A1 P'B1 P'B2 N'A1 N'B1 N'B2\n              |________ group1 ________|    |________ group2 ________|\n                                         |\n                                         V\n                      P_A1 Pad0 N_A1 Pad0 P'A1 Pad0 N'A1 Pad0\n                      P_B1 P_B2 N_B1 N_B2 P'B1 P'B2 N'B1 N'B2\n                       |____ group1 ____| |____ group2 ____|\n             batched_queries (batch_size, max_num_target, query_dim)\n\n            where query_dim is 4 for bbox and self.embed_dims for label.\n            Notation: _-group 1; '-group 2;\n                      A-Sample1(has 1 target); B-sample2(has 2 targets)\n\n        Args:\n            input_label_query (Tensor): The generated label queries of all\n                targets, has shape (num_target_total, embed_dims) where\n                `num_target_total = sum(num_target_list)`.\n            input_bbox_query (Tensor): The generated bbox queries of all\n                targets, has shape (num_target_total, 4) with the last\n                dimension arranged as (cx, cy, w, h).\n            batch_idx (Tensor): The batch index of the corresponding sample\n                for each target, has shape (num_target_total).\n            batch_size (int): The size of the input batch.\n            num_groups (int): The number of denoising query groups.\n\n        Returns:\n            tuple[Tensor]: Output batched label and bbox queries.\n            - batched_label_query (Tensor): The output batched label queries,\n              has shape (batch_size, max_num_target, embed_dims).\n            - batched_bbox_query (Tensor): The output batched bbox queries,\n              has shape (batch_size, max_num_target, 4) with the last dimension\n              arranged as (cx, cy, w, h).\n        \"\"\"\n        device = input_label_query.device\n        num_target_list = [\n            torch.sum(batch_idx == idx) for idx in range(batch_size)\n        ]\n        max_num_target = max(num_target_list)\n        num_denoising_queries = int(max_num_target * 2 * num_groups)\n\n        map_query_index = torch.cat([\n            torch.arange(num_target, device=device)\n            for num_target in num_target_list\n        ])\n        map_query_index = torch.cat([\n            map_query_index + max_num_target * i for i in range(2 * num_groups)\n        ]).long()\n        batch_idx_expand = batch_idx.repeat(2 * num_groups, 1).view(-1)\n        mapper = (batch_idx_expand, map_query_index)\n\n        batched_label_query = torch.zeros(\n            batch_size, num_denoising_queries, self.embed_dims, device=device)\n        batched_bbox_query = torch.zeros(\n            batch_size, num_denoising_queries, 4, device=device)\n\n        batched_label_query[mapper] = input_label_query\n        batched_bbox_query[mapper] = input_bbox_query\n        return batched_label_query, batched_bbox_query\n\n    def generate_dn_mask(self, max_num_target: int, num_groups: int,\n                         device: Union[torch.device, str]) -> Tensor:\n        \"\"\"Generate attention mask to prevent information leakage from\n        different denoising groups and matching parts.\n\n        .. code:: text\n\n                        0 0 0 0 1 1 1 1 0 0 0 0 0\n                        0 0 0 0 1 1 1 1 0 0 0 0 0\n                        0 0 0 0 1 1 1 1 0 0 0 0 0\n                        0 0 0 0 1 1 1 1 0 0 0 0 0\n                        1 1 1 1 0 0 0 0 0 0 0 0 0\n                        1 1 1 1 0 0 0 0 0 0 0 0 0\n                        1 1 1 1 0 0 0 0 0 0 0 0 0\n                        1 1 1 1 0 0 0 0 0 0 0 0 0\n                        1 1 1 1 1 1 1 1 0 0 0 0 0\n                        1 1 1 1 1 1 1 1 0 0 0 0 0\n                        1 1 1 1 1 1 1 1 0 0 0 0 0\n                        1 1 1 1 1 1 1 1 0 0 0 0 0\n                        1 1 1 1 1 1 1 1 0 0 0 0 0\n         max_num_target |_|           |_________| num_matching_queries\n                        |_____________| num_denoising_queries\n\n               1 -> True  (Masked), means 'can not see'.\n               0 -> False (UnMasked), means 'can see'.\n\n        Args:\n            max_num_target (int): The max target number of the input batch\n                samples.\n            num_groups (int): The number of denoising query groups.\n            device (obj:`device` or str): The device of generated mask.\n\n        Returns:\n            Tensor: The attention mask to prevent information leakage from\n            different denoising groups and matching parts, will be used as\n            `self_attn_mask` of the `decoder`, has shape (num_queries_total,\n            num_queries_total), where `num_queries_total` is the sum of\n            `num_denoising_queries` and `num_matching_queries`.\n        \"\"\"\n        num_denoising_queries = int(max_num_target * 2 * num_groups)\n        num_queries_total = num_denoising_queries + self.num_matching_queries\n        attn_mask = torch.zeros(\n            num_queries_total,\n            num_queries_total,\n            device=device,\n            dtype=torch.bool)\n        # Make the matching part cannot see the denoising groups\n        attn_mask[num_denoising_queries:, :num_denoising_queries] = True\n        # Make the denoising groups cannot see each other\n        for i in range(num_groups):\n            # Mask rows of one group per step.\n            row_scope = slice(max_num_target * 2 * i,\n                              max_num_target * 2 * (i + 1))\n            left_scope = slice(max_num_target * 2 * i)\n            right_scope = slice(max_num_target * 2 * (i + 1),\n                                num_denoising_queries)\n            attn_mask[row_scope, right_scope] = True\n            attn_mask[row_scope, left_scope] = True\n        return attn_mask\n"
  },
  {
    "path": "mmdet/models/layers/transformer/mask2former_layers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import build_norm_layer\nfrom mmengine.model import ModuleList\nfrom torch import Tensor\n\nfrom .deformable_detr_layers import DeformableDetrTransformerEncoder\nfrom .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer\n\n\nclass Mask2FormerTransformerEncoder(DeformableDetrTransformerEncoder):\n    \"\"\"Encoder in PixelDecoder of Mask2Former.\"\"\"\n\n    def forward(self, query: Tensor, query_pos: Tensor,\n                key_padding_mask: Tensor, spatial_shapes: Tensor,\n                level_start_index: Tensor, valid_ratios: Tensor,\n                reference_points: Tensor, **kwargs) -> Tensor:\n        \"\"\"Forward function of Transformer encoder.\n\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim).\n            query_pos (Tensor): The positional encoding for query, has shape\n                (bs, num_queries, dim). If not None, it will be added to the\n                `query` before forward function. Defaults to None.\n            key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`\n                input. ByteTensor, has shape (bs, num_queries).\n            spatial_shapes (Tensor): Spatial shapes of features in all levels,\n                has shape (num_levels, 2), last dimension represents (h, w).\n            level_start_index (Tensor): The start index of each level.\n                A tensor has shape (num_levels, ) and can be represented\n                as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n            valid_ratios (Tensor): The ratios of the valid width and the valid\n                height relative to the width and the height of features in all\n                levels, has shape (bs, num_levels, 2).\n            reference_points (Tensor): The initial reference, has shape\n                (bs, num_queries, 2) with the last dimension arranged\n                as (cx, cy).\n\n        Returns:\n            Tensor: Output queries of Transformer encoder, which is also\n            called 'encoder output embeddings' or 'memory', has shape\n            (bs, num_queries, dim)\n        \"\"\"\n        for layer in self.layers:\n            query = layer(\n                query=query,\n                query_pos=query_pos,\n                key_padding_mask=key_padding_mask,\n                spatial_shapes=spatial_shapes,\n                level_start_index=level_start_index,\n                valid_ratios=valid_ratios,\n                reference_points=reference_points,\n                **kwargs)\n        return query\n\n\nclass Mask2FormerTransformerDecoder(DetrTransformerDecoder):\n    \"\"\"Decoder of Mask2Former.\"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize decoder layers.\"\"\"\n        self.layers = ModuleList([\n            Mask2FormerTransformerDecoderLayer(**self.layer_cfg)\n            for _ in range(self.num_layers)\n        ])\n        self.embed_dims = self.layers[0].embed_dims\n        self.post_norm = build_norm_layer(self.post_norm_cfg,\n                                          self.embed_dims)[1]\n\n\nclass Mask2FormerTransformerDecoderLayer(DetrTransformerDecoderLayer):\n    \"\"\"Implements decoder layer in Mask2Former transformer.\"\"\"\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor = None,\n                value: Tensor = None,\n                query_pos: Tensor = None,\n                key_pos: Tensor = None,\n                self_attn_mask: Tensor = None,\n                cross_attn_mask: Tensor = None,\n                key_padding_mask: Tensor = None,\n                **kwargs) -> Tensor:\n        \"\"\"\n        Args:\n            query (Tensor): The input query, has shape (bs, num_queries, dim).\n            key (Tensor, optional): The input key, has shape (bs, num_keys,\n                dim). If `None`, the `query` will be used. Defaults to `None`.\n            value (Tensor, optional): The input value, has the same shape as\n                `key`, as in `nn.MultiheadAttention.forward`. If `None`, the\n                `key` will be used. Defaults to `None`.\n            query_pos (Tensor, optional): The positional encoding for `query`,\n                has the same shape as `query`. If not `None`, it will be added\n                to `query` before forward function. Defaults to `None`.\n            key_pos (Tensor, optional): The positional encoding for `key`, has\n                the same shape as `key`. If not `None`, it will be added to\n                `key` before forward function. If None, and `query_pos` has the\n                same shape as `key`, then `query_pos` will be used for\n                `key_pos`. Defaults to None.\n            self_attn_mask (Tensor, optional): ByteTensor mask, has shape\n                (num_queries, num_keys), as in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            cross_attn_mask (Tensor, optional): ByteTensor mask, has shape\n                (num_queries, num_keys), as in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            key_padding_mask (Tensor, optional): The `key_padding_mask` of\n                `self_attn` input. ByteTensor, has shape (bs, num_value).\n                Defaults to None.\n\n        Returns:\n            Tensor: forwarded results, has shape (bs, num_queries, dim).\n        \"\"\"\n\n        query = self.cross_attn(\n            query=query,\n            key=key,\n            value=value,\n            query_pos=query_pos,\n            key_pos=key_pos,\n            attn_mask=cross_attn_mask,\n            key_padding_mask=key_padding_mask,\n            **kwargs)\n        query = self.norms[0](query)\n        query = self.self_attn(\n            query=query,\n            key=query,\n            value=query,\n            query_pos=query_pos,\n            key_pos=query_pos,\n            attn_mask=self_attn_mask,\n            **kwargs)\n        query = self.norms[1](query)\n        query = self.ffn(query)\n        query = self.norms[2](query)\n\n        return query\n"
  },
  {
    "path": "mmdet/models/layers/transformer/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\nfrom typing import Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import (Linear, build_activation_layer, build_conv_layer,\n                      build_norm_layer)\nfrom mmcv.cnn.bricks.drop import Dropout\nfrom mmengine.model import BaseModule, ModuleList\nfrom mmengine.utils import to_2tuple\nfrom torch import Tensor, nn\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\n\n\ndef nlc_to_nchw(x: Tensor, hw_shape: Sequence[int]) -> Tensor:\n    \"\"\"Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.\n\n    Args:\n        x (Tensor): The input tensor of shape [N, L, C] before conversion.\n        hw_shape (Sequence[int]): The height and width of output feature map.\n\n    Returns:\n        Tensor: The output tensor of shape [N, C, H, W] after conversion.\n    \"\"\"\n    H, W = hw_shape\n    assert len(x.shape) == 3\n    B, L, C = x.shape\n    assert L == H * W, 'The seq_len does not match H, W'\n    return x.transpose(1, 2).reshape(B, C, H, W).contiguous()\n\n\ndef nchw_to_nlc(x):\n    \"\"\"Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.\n\n    Args:\n        x (Tensor): The input tensor of shape [N, C, H, W] before conversion.\n\n    Returns:\n        Tensor: The output tensor of shape [N, L, C] after conversion.\n    \"\"\"\n    assert len(x.shape) == 4\n    return x.flatten(2).transpose(1, 2).contiguous()\n\n\ndef coordinate_to_encoding(coord_tensor: Tensor,\n                           num_feats: int = 128,\n                           temperature: int = 10000,\n                           scale: float = 2 * math.pi):\n    \"\"\"Convert coordinate tensor to positional encoding.\n\n    Args:\n        coord_tensor (Tensor): Coordinate tensor to be converted to\n            positional encoding. With the last dimension as 2 or 4.\n        num_feats (int, optional): The feature dimension for each position\n            along x-axis or y-axis. Note the final returned dimension\n            for each position is 2 times of this value. Defaults to 128.\n        temperature (int, optional): The temperature used for scaling\n            the position embedding. Defaults to 10000.\n        scale (float, optional): A scale factor that scales the position\n            embedding. The scale will be used only when `normalize` is True.\n            Defaults to 2*pi.\n    Returns:\n        Tensor: Returned encoded positional tensor.\n    \"\"\"\n    dim_t = torch.arange(\n        num_feats, dtype=torch.float32, device=coord_tensor.device)\n    dim_t = temperature**(2 * (dim_t // 2) / num_feats)\n    x_embed = coord_tensor[..., 0] * scale\n    y_embed = coord_tensor[..., 1] * scale\n    pos_x = x_embed[..., None] / dim_t\n    pos_y = y_embed[..., None] / dim_t\n    pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()),\n                        dim=-1).flatten(2)\n    pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()),\n                        dim=-1).flatten(2)\n    if coord_tensor.size(-1) == 2:\n        pos = torch.cat((pos_y, pos_x), dim=-1)\n    elif coord_tensor.size(-1) == 4:\n        w_embed = coord_tensor[..., 2] * scale\n        pos_w = w_embed[..., None] / dim_t\n        pos_w = torch.stack((pos_w[..., 0::2].sin(), pos_w[..., 1::2].cos()),\n                            dim=-1).flatten(2)\n\n        h_embed = coord_tensor[..., 3] * scale\n        pos_h = h_embed[..., None] / dim_t\n        pos_h = torch.stack((pos_h[..., 0::2].sin(), pos_h[..., 1::2].cos()),\n                            dim=-1).flatten(2)\n\n        pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=-1)\n    else:\n        raise ValueError('Unknown pos_tensor shape(-1):{}'.format(\n            coord_tensor.size(-1)))\n    return pos\n\n\ndef inverse_sigmoid(x: Tensor, eps: float = 1e-5) -> Tensor:\n    \"\"\"Inverse function of sigmoid.\n\n    Args:\n        x (Tensor): The tensor to do the inverse.\n        eps (float): EPS avoid numerical overflow. Defaults 1e-5.\n    Returns:\n        Tensor: The x has passed the inverse function of sigmoid, has the same\n        shape with input.\n    \"\"\"\n    x = x.clamp(min=0, max=1)\n    x1 = x.clamp(min=eps)\n    x2 = (1 - x).clamp(min=eps)\n    return torch.log(x1 / x2)\n\n\nclass AdaptivePadding(nn.Module):\n    \"\"\"Applies padding to input (if needed) so that input can get fully covered\n    by filter you specified. It support two modes \"same\" and \"corner\". The\n    \"same\" mode is same with \"SAME\" padding mode in TensorFlow, pad zero around\n    input. The \"corner\"  mode would pad zero to bottom right.\n\n    Args:\n        kernel_size (int | tuple): Size of the kernel:\n        stride (int | tuple): Stride of the filter. Default: 1:\n        dilation (int | tuple): Spacing between kernel elements.\n            Default: 1\n        padding (str): Support \"same\" and \"corner\", \"corner\" mode\n            would pad zero to bottom right, and \"same\" mode would\n            pad zero around input. Default: \"corner\".\n    Example:\n        >>> kernel_size = 16\n        >>> stride = 16\n        >>> dilation = 1\n        >>> input = torch.rand(1, 1, 15, 17)\n        >>> adap_pad = AdaptivePadding(\n        >>>     kernel_size=kernel_size,\n        >>>     stride=stride,\n        >>>     dilation=dilation,\n        >>>     padding=\"corner\")\n        >>> out = adap_pad(input)\n        >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n        >>> input = torch.rand(1, 1, 16, 17)\n        >>> out = adap_pad(input)\n        >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n    \"\"\"\n\n    def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):\n\n        super(AdaptivePadding, self).__init__()\n\n        assert padding in ('same', 'corner')\n\n        kernel_size = to_2tuple(kernel_size)\n        stride = to_2tuple(stride)\n        padding = to_2tuple(padding)\n        dilation = to_2tuple(dilation)\n\n        self.padding = padding\n        self.kernel_size = kernel_size\n        self.stride = stride\n        self.dilation = dilation\n\n    def get_pad_shape(self, input_shape):\n        input_h, input_w = input_shape\n        kernel_h, kernel_w = self.kernel_size\n        stride_h, stride_w = self.stride\n        output_h = math.ceil(input_h / stride_h)\n        output_w = math.ceil(input_w / stride_w)\n        pad_h = max((output_h - 1) * stride_h +\n                    (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)\n        pad_w = max((output_w - 1) * stride_w +\n                    (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)\n        return pad_h, pad_w\n\n    def forward(self, x):\n        pad_h, pad_w = self.get_pad_shape(x.size()[-2:])\n        if pad_h > 0 or pad_w > 0:\n            if self.padding == 'corner':\n                x = F.pad(x, [0, pad_w, 0, pad_h])\n            elif self.padding == 'same':\n                x = F.pad(x, [\n                    pad_w // 2, pad_w - pad_w // 2, pad_h // 2,\n                    pad_h - pad_h // 2\n                ])\n        return x\n\n\nclass PatchEmbed(BaseModule):\n    \"\"\"Image to Patch Embedding.\n\n    We use a conv layer to implement PatchEmbed.\n\n    Args:\n        in_channels (int): The num of input channels. Default: 3\n        embed_dims (int): The dimensions of embedding. Default: 768\n        conv_type (str): The config dict for embedding\n            conv layer type selection. Default: \"Conv2d.\n        kernel_size (int): The kernel_size of embedding conv. Default: 16.\n        stride (int): The slide stride of embedding conv.\n            Default: None (Would be set as `kernel_size`).\n        padding (int | tuple | string ): The padding length of\n            embedding conv. When it is a string, it means the mode\n            of adaptive padding, support \"same\" and \"corner\" now.\n            Default: \"corner\".\n        dilation (int): The dilation rate of embedding conv. Default: 1.\n        bias (bool): Bias of embed conv. Default: True.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: None.\n        input_size (int | tuple | None): The size of input, which will be\n            used to calculate the out size. Only work when `dynamic_size`\n            is False. Default: None.\n        init_cfg (`mmengine.ConfigDict`, optional): The Config for\n            initialization. Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int = 3,\n                 embed_dims: int = 768,\n                 conv_type: str = 'Conv2d',\n                 kernel_size: int = 16,\n                 stride: int = 16,\n                 padding: Union[int, tuple, str] = 'corner',\n                 dilation: int = 1,\n                 bias: bool = True,\n                 norm_cfg: OptConfigType = None,\n                 input_size: Union[int, tuple] = None,\n                 init_cfg: OptConfigType = None) -> None:\n        super(PatchEmbed, self).__init__(init_cfg=init_cfg)\n\n        self.embed_dims = embed_dims\n        if stride is None:\n            stride = kernel_size\n\n        kernel_size = to_2tuple(kernel_size)\n        stride = to_2tuple(stride)\n        dilation = to_2tuple(dilation)\n\n        if isinstance(padding, str):\n            self.adap_padding = AdaptivePadding(\n                kernel_size=kernel_size,\n                stride=stride,\n                dilation=dilation,\n                padding=padding)\n            # disable the padding of conv\n            padding = 0\n        else:\n            self.adap_padding = None\n        padding = to_2tuple(padding)\n\n        self.projection = build_conv_layer(\n            dict(type=conv_type),\n            in_channels=in_channels,\n            out_channels=embed_dims,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        if norm_cfg is not None:\n            self.norm = build_norm_layer(norm_cfg, embed_dims)[1]\n        else:\n            self.norm = None\n\n        if input_size:\n            input_size = to_2tuple(input_size)\n            # `init_out_size` would be used outside to\n            # calculate the num_patches\n            # when `use_abs_pos_embed` outside\n            self.init_input_size = input_size\n            if self.adap_padding:\n                pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)\n                input_h, input_w = input_size\n                input_h = input_h + pad_h\n                input_w = input_w + pad_w\n                input_size = (input_h, input_w)\n\n            # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\n            h_out = (input_size[0] + 2 * padding[0] - dilation[0] *\n                     (kernel_size[0] - 1) - 1) // stride[0] + 1\n            w_out = (input_size[1] + 2 * padding[1] - dilation[1] *\n                     (kernel_size[1] - 1) - 1) // stride[1] + 1\n            self.init_out_size = (h_out, w_out)\n        else:\n            self.init_input_size = None\n            self.init_out_size = None\n\n    def forward(self, x: Tensor) -> Tuple[Tensor, Tuple[int]]:\n        \"\"\"\n        Args:\n            x (Tensor): Has shape (B, C, H, W). In most case, C is 3.\n\n        Returns:\n            tuple: Contains merged results and its spatial shape.\n\n                - x (Tensor): Has shape (B, out_h * out_w, embed_dims)\n                - out_size (tuple[int]): Spatial shape of x, arrange as\n                    (out_h, out_w).\n        \"\"\"\n\n        if self.adap_padding:\n            x = self.adap_padding(x)\n\n        x = self.projection(x)\n        out_size = (x.shape[2], x.shape[3])\n        x = x.flatten(2).transpose(1, 2)\n        if self.norm is not None:\n            x = self.norm(x)\n        return x, out_size\n\n\nclass PatchMerging(BaseModule):\n    \"\"\"Merge patch feature map.\n\n    This layer groups feature map by kernel_size, and applies norm and linear\n    layers to the grouped feature map. Our implementation uses `nn.Unfold` to\n    merge patch, which is about 25% faster than original implementation.\n    Instead, we need to modify pretrained models for compatibility.\n\n    Args:\n        in_channels (int): The num of input channels.\n            to gets fully covered by filter and stride you specified..\n            Default: True.\n        out_channels (int): The num of output channels.\n        kernel_size (int | tuple, optional): the kernel size in the unfold\n            layer. Defaults to 2.\n        stride (int | tuple, optional): the stride of the sliding blocks in the\n            unfold layer. Default: None. (Would be set as `kernel_size`)\n        padding (int | tuple | string ): The padding length of\n            embedding conv. When it is a string, it means the mode\n            of adaptive padding, support \"same\" and \"corner\" now.\n            Default: \"corner\".\n        dilation (int | tuple, optional): dilation parameter in the unfold\n            layer. Default: 1.\n        bias (bool, optional): Whether to add bias in linear layer or not.\n            Defaults: False.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: dict(type='LN').\n        init_cfg (dict, optional): The extra config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 kernel_size: Optional[Union[int, tuple]] = 2,\n                 stride: Optional[Union[int, tuple]] = None,\n                 padding: Union[int, tuple, str] = 'corner',\n                 dilation: Optional[Union[int, tuple]] = 1,\n                 bias: Optional[bool] = False,\n                 norm_cfg: OptConfigType = dict(type='LN'),\n                 init_cfg: OptConfigType = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        if stride:\n            stride = stride\n        else:\n            stride = kernel_size\n\n        kernel_size = to_2tuple(kernel_size)\n        stride = to_2tuple(stride)\n        dilation = to_2tuple(dilation)\n\n        if isinstance(padding, str):\n            self.adap_padding = AdaptivePadding(\n                kernel_size=kernel_size,\n                stride=stride,\n                dilation=dilation,\n                padding=padding)\n            # disable the padding of unfold\n            padding = 0\n        else:\n            self.adap_padding = None\n\n        padding = to_2tuple(padding)\n        self.sampler = nn.Unfold(\n            kernel_size=kernel_size,\n            dilation=dilation,\n            padding=padding,\n            stride=stride)\n\n        sample_dim = kernel_size[0] * kernel_size[1] * in_channels\n\n        if norm_cfg is not None:\n            self.norm = build_norm_layer(norm_cfg, sample_dim)[1]\n        else:\n            self.norm = None\n\n        self.reduction = nn.Linear(sample_dim, out_channels, bias=bias)\n\n    def forward(self, x: Tensor,\n                input_size: Tuple[int]) -> Tuple[Tensor, Tuple[int]]:\n        \"\"\"\n        Args:\n            x (Tensor): Has shape (B, H*W, C_in).\n            input_size (tuple[int]): The spatial shape of x, arrange as (H, W).\n                Default: None.\n\n        Returns:\n            tuple: Contains merged results and its spatial shape.\n\n                - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)\n                - out_size (tuple[int]): Spatial shape of x, arrange as\n                    (Merged_H, Merged_W).\n        \"\"\"\n        B, L, C = x.shape\n        assert isinstance(input_size, Sequence), f'Expect ' \\\n                                                 f'input_size is ' \\\n                                                 f'`Sequence` ' \\\n                                                 f'but get {input_size}'\n\n        H, W = input_size\n        assert L == H * W, 'input feature has wrong size'\n\n        x = x.view(B, H, W, C).permute([0, 3, 1, 2])  # B, C, H, W\n        # Use nn.Unfold to merge patch. About 25% faster than original method,\n        # but need to modify pretrained model for compatibility\n\n        if self.adap_padding:\n            x = self.adap_padding(x)\n            H, W = x.shape[-2:]\n\n        x = self.sampler(x)\n        # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2)\n\n        out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *\n                 (self.sampler.kernel_size[0] - 1) -\n                 1) // self.sampler.stride[0] + 1\n        out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *\n                 (self.sampler.kernel_size[1] - 1) -\n                 1) // self.sampler.stride[1] + 1\n\n        output_size = (out_h, out_w)\n        x = x.transpose(1, 2)  # B, H/2*W/2, 4*C\n        x = self.norm(x) if self.norm else x\n        x = self.reduction(x)\n        return x, output_size\n\n\nclass ConditionalAttention(BaseModule):\n    \"\"\"A wrapper of conditional attention, dropout and residual connection.\n\n    Args:\n        embed_dims (int): The embedding dimension.\n        num_heads (int): Parallel attention heads.\n        attn_drop (float): A Dropout layer on attn_output_weights.\n            Default: 0.0.\n        proj_drop: A Dropout layer after `nn.MultiheadAttention`.\n            Default: 0.0.\n        cross_attn (bool): Whether the attention module is for cross attention.\n            Default: False\n        keep_query_pos (bool): Whether to transform query_pos before cross\n            attention.\n            Default: False.\n        batch_first (bool): When it is True, Key, Query and Value are shape of\n            (batch, n, embed_dim), otherwise (n, batch, embed_dim).\n             Default: True.\n        init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims: int,\n                 num_heads: int,\n                 attn_drop: float = 0.,\n                 proj_drop: float = 0.,\n                 cross_attn: bool = False,\n                 keep_query_pos: bool = False,\n                 batch_first: bool = True,\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(init_cfg=init_cfg)\n\n        assert batch_first is True, 'Set `batch_first`\\\n        to False is NOT supported in ConditionalAttention. \\\n        First dimension of all DETRs in mmdet is `batch`, \\\n        please set `batch_first` to True.'\n\n        self.cross_attn = cross_attn\n        self.keep_query_pos = keep_query_pos\n        self.embed_dims = embed_dims\n        self.num_heads = num_heads\n        self.attn_drop = Dropout(attn_drop)\n        self.proj_drop = Dropout(proj_drop)\n\n        self._init_layers()\n\n    def _init_layers(self):\n        \"\"\"Initialize layers for qkv projection.\"\"\"\n        embed_dims = self.embed_dims\n        self.qcontent_proj = Linear(embed_dims, embed_dims)\n        self.qpos_proj = Linear(embed_dims, embed_dims)\n        self.kcontent_proj = Linear(embed_dims, embed_dims)\n        self.kpos_proj = Linear(embed_dims, embed_dims)\n        self.v_proj = Linear(embed_dims, embed_dims)\n        if self.cross_attn:\n            self.qpos_sine_proj = Linear(embed_dims, embed_dims)\n        self.out_proj = Linear(embed_dims, embed_dims)\n\n        nn.init.constant_(self.out_proj.bias, 0.)\n\n    def forward_attn(self,\n                     query: Tensor,\n                     key: Tensor,\n                     value: Tensor,\n                     attn_mask: Tensor = None,\n                     key_padding_mask: Tensor = None) -> Tuple[Tensor]:\n        \"\"\"Forward process for `ConditionalAttention`.\n\n        Args:\n            query (Tensor): The input query with shape [bs, num_queries,\n                embed_dims].\n            key (Tensor): The key tensor with shape [bs, num_keys,\n                embed_dims].\n                If None, the `query` will be used. Defaults to None.\n            value (Tensor): The value tensor with same shape as `key`.\n                Same in `nn.MultiheadAttention.forward`. Defaults to None.\n                If None, the `key` will be used.\n            attn_mask (Tensor): ByteTensor mask with shape [num_queries,\n                num_keys]. Same in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].\n                Defaults to None.\n        Returns:\n            Tuple[Tensor]: Attention outputs of shape :math:`(N, L, E)`,\n            where :math:`N` is the batch size, :math:`L` is the target\n            sequence length , and :math:`E` is the embedding dimension\n            `embed_dim`. Attention weights per head of shape :math:`\n            (num_heads, L, S)`. where :math:`N` is batch size, :math:`L`\n            is target sequence length, and :math:`S` is the source sequence\n            length.\n        \"\"\"\n        assert key.size(1) == value.size(1), \\\n            f'{\"key, value must have the same sequence length\"}'\n        assert query.size(0) == key.size(0) == value.size(0), \\\n            f'{\"batch size must be equal for query, key, value\"}'\n        assert query.size(2) == key.size(2), \\\n            f'{\"q_dims, k_dims must be equal\"}'\n        assert value.size(2) == self.embed_dims, \\\n            f'{\"v_dims must be equal to embed_dims\"}'\n\n        bs, tgt_len, hidden_dims = query.size()\n        _, src_len, _ = key.size()\n        head_dims = hidden_dims // self.num_heads\n        v_head_dims = self.embed_dims // self.num_heads\n        assert head_dims * self.num_heads == hidden_dims, \\\n            f'{\"hidden_dims must be divisible by num_heads\"}'\n        scaling = float(head_dims)**-0.5\n\n        q = query * scaling\n        k = key\n        v = value\n\n        if attn_mask is not None:\n            assert attn_mask.dtype == torch.float32 or \\\n                   attn_mask.dtype == torch.float64 or \\\n                   attn_mask.dtype == torch.float16 or \\\n                   attn_mask.dtype == torch.uint8 or \\\n                   attn_mask.dtype == torch.bool, \\\n                   'Only float, byte, and bool types are supported for \\\n                    attn_mask'\n\n            if attn_mask.dtype == torch.uint8:\n                warnings.warn('Byte tensor for attn_mask is deprecated.\\\n                     Use bool tensor instead.')\n                attn_mask = attn_mask.to(torch.bool)\n            if attn_mask.dim() == 2:\n                attn_mask = attn_mask.unsqueeze(0)\n                if list(attn_mask.size()) != [1, query.size(1), key.size(1)]:\n                    raise RuntimeError(\n                        'The size of the 2D attn_mask is not correct.')\n            elif attn_mask.dim() == 3:\n                if list(attn_mask.size()) != [\n                        bs * self.num_heads,\n                        query.size(1),\n                        key.size(1)\n                ]:\n                    raise RuntimeError(\n                        'The size of the 3D attn_mask is not correct.')\n            else:\n                raise RuntimeError(\n                    \"attn_mask's dimension {} is not supported\".format(\n                        attn_mask.dim()))\n        # attn_mask's dim is 3 now.\n\n        if key_padding_mask is not None and key_padding_mask.dtype == int:\n            key_padding_mask = key_padding_mask.to(torch.bool)\n\n        q = q.contiguous().view(bs, tgt_len, self.num_heads,\n                                head_dims).permute(0, 2, 1, 3).flatten(0, 1)\n        if k is not None:\n            k = k.contiguous().view(bs, src_len, self.num_heads,\n                                    head_dims).permute(0, 2, 1,\n                                                       3).flatten(0, 1)\n        if v is not None:\n            v = v.contiguous().view(bs, src_len, self.num_heads,\n                                    v_head_dims).permute(0, 2, 1,\n                                                         3).flatten(0, 1)\n\n        if key_padding_mask is not None:\n            assert key_padding_mask.size(0) == bs\n            assert key_padding_mask.size(1) == src_len\n\n        attn_output_weights = torch.bmm(q, k.transpose(1, 2))\n        assert list(attn_output_weights.size()) == [\n            bs * self.num_heads, tgt_len, src_len\n        ]\n\n        if attn_mask is not None:\n            if attn_mask.dtype == torch.bool:\n                attn_output_weights.masked_fill_(attn_mask, float('-inf'))\n            else:\n                attn_output_weights += attn_mask\n\n        if key_padding_mask is not None:\n            attn_output_weights = attn_output_weights.view(\n                bs, self.num_heads, tgt_len, src_len)\n            attn_output_weights = attn_output_weights.masked_fill(\n                key_padding_mask.unsqueeze(1).unsqueeze(2),\n                float('-inf'),\n            )\n            attn_output_weights = attn_output_weights.view(\n                bs * self.num_heads, tgt_len, src_len)\n\n        attn_output_weights = F.softmax(\n            attn_output_weights -\n            attn_output_weights.max(dim=-1, keepdim=True)[0],\n            dim=-1)\n        attn_output_weights = self.attn_drop(attn_output_weights)\n\n        attn_output = torch.bmm(attn_output_weights, v)\n        assert list(\n            attn_output.size()) == [bs * self.num_heads, tgt_len, v_head_dims]\n        attn_output = attn_output.view(bs, self.num_heads, tgt_len,\n                                       v_head_dims).permute(0, 2, 1,\n                                                            3).flatten(2)\n        attn_output = self.out_proj(attn_output)\n\n        # average attention weights over heads\n        attn_output_weights = attn_output_weights.view(bs, self.num_heads,\n                                                       tgt_len, src_len)\n        return attn_output, attn_output_weights.sum(dim=1) / self.num_heads\n\n    def forward(self,\n                query: Tensor,\n                key: Tensor,\n                query_pos: Tensor = None,\n                ref_sine_embed: Tensor = None,\n                key_pos: Tensor = None,\n                attn_mask: Tensor = None,\n                key_padding_mask: Tensor = None,\n                is_first: bool = False) -> Tensor:\n        \"\"\"Forward function for `ConditionalAttention`.\n        Args:\n            query (Tensor): The input query with shape [bs, num_queries,\n                embed_dims].\n            key (Tensor): The key tensor with shape [bs, num_keys,\n                embed_dims].\n                If None, the `query` will be used. Defaults to None.\n            query_pos (Tensor): The positional encoding for query in self\n                attention, with the same shape as `x`. If not None, it will\n                be added to `x` before forward function.\n                Defaults to None.\n            query_sine_embed (Tensor): The positional encoding for query in\n                cross attention, with the same shape as `x`. If not None, it\n                will be added to `x` before forward function.\n                Defaults to None.\n            key_pos (Tensor): The positional encoding for `key`, with the\n                same shape as `key`. Defaults to None. If not None, it will\n                be added to `key` before forward function. If None, and\n                `query_pos` has the same shape as `key`, then `query_pos`\n                will be used for `key_pos`. Defaults to None.\n            attn_mask (Tensor): ByteTensor mask with shape [num_queries,\n                num_keys]. Same in `nn.MultiheadAttention.forward`.\n                Defaults to None.\n            key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].\n                Defaults to None.\n            is_first (bool): A indicator to tell whether the current layer\n                is the first layer of the decoder.\n                Defaults to False.\n        Returns:\n            Tensor: forwarded results with shape\n            [bs, num_queries, embed_dims].\n        \"\"\"\n\n        if self.cross_attn:\n            q_content = self.qcontent_proj(query)\n            k_content = self.kcontent_proj(key)\n            v = self.v_proj(key)\n\n            bs, nq, c = q_content.size()\n            _, hw, _ = k_content.size()\n\n            k_pos = self.kpos_proj(key_pos)\n            if is_first or self.keep_query_pos:\n                q_pos = self.qpos_proj(query_pos)\n                q = q_content + q_pos\n                k = k_content + k_pos\n            else:\n                q = q_content\n                k = k_content\n            q = q.view(bs, nq, self.num_heads, c // self.num_heads)\n            query_sine_embed = self.qpos_sine_proj(ref_sine_embed)\n            query_sine_embed = query_sine_embed.view(bs, nq, self.num_heads,\n                                                     c // self.num_heads)\n            q = torch.cat([q, query_sine_embed], dim=3).view(bs, nq, 2 * c)\n            k = k.view(bs, hw, self.num_heads, c // self.num_heads)\n            k_pos = k_pos.view(bs, hw, self.num_heads, c // self.num_heads)\n            k = torch.cat([k, k_pos], dim=3).view(bs, hw, 2 * c)\n            ca_output = self.forward_attn(\n                query=q,\n                key=k,\n                value=v,\n                attn_mask=attn_mask,\n                key_padding_mask=key_padding_mask)[0]\n            query = query + self.proj_drop(ca_output)\n        else:\n            q_content = self.qcontent_proj(query)\n            q_pos = self.qpos_proj(query_pos)\n            k_content = self.kcontent_proj(query)\n            k_pos = self.kpos_proj(query_pos)\n            v = self.v_proj(query)\n            q = q_content if q_pos is None else q_content + q_pos\n            k = k_content if k_pos is None else k_content + k_pos\n            sa_output = self.forward_attn(\n                query=q,\n                key=k,\n                value=v,\n                attn_mask=attn_mask,\n                key_padding_mask=key_padding_mask)[0]\n            query = query + self.proj_drop(sa_output)\n\n        return query\n\n\nclass MLP(BaseModule):\n    \"\"\"Very simple multi-layer perceptron (also called FFN) with relu. Mostly\n    used in DETR series detectors.\n\n    Args:\n        input_dim (int): Feature dim of the input tensor.\n        hidden_dim (int): Feature dim of the hidden layer.\n        output_dim (int): Feature dim of the output tensor.\n        num_layers (int): Number of FFN layers. As the last\n            layer of MLP only contains FFN (Linear).\n    \"\"\"\n\n    def __init__(self, input_dim: int, hidden_dim: int, output_dim: int,\n                 num_layers: int) -> None:\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        self.layers = ModuleList(\n            Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function of MLP.\n\n        Args:\n            x (Tensor): The input feature, has shape\n                (num_queries, bs, input_dim).\n        Returns:\n            Tensor: The output feature, has shape\n                (num_queries, bs, output_dim).\n        \"\"\"\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\n\n@MODELS.register_module()\nclass DynamicConv(BaseModule):\n    \"\"\"Implements Dynamic Convolution.\n\n    This module generate parameters for each sample and\n    use bmm to implement 1*1 convolution. Code is modified\n    from the `official github repo <https://github.com/PeizeSun/\n    SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py#L258>`_ .\n\n    Args:\n        in_channels (int): The input feature channel.\n            Defaults to 256.\n        feat_channels (int): The inner feature channel.\n            Defaults to 64.\n        out_channels (int, optional): The output feature channel.\n            When not specified, it will be set to `in_channels`\n            by default\n        input_feat_shape (int): The shape of input feature.\n            Defaults to 7.\n        with_proj (bool): Project two-dimentional feature to\n            one-dimentional feature. Default to True.\n        act_cfg (dict): The activation config for DynamicConv.\n        norm_cfg (dict): Config dict for normalization layer. Default\n            layer normalization.\n        init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int = 256,\n                 feat_channels: int = 64,\n                 out_channels: Optional[int] = None,\n                 input_feat_shape: int = 7,\n                 with_proj: bool = True,\n                 act_cfg: OptConfigType = dict(type='ReLU', inplace=True),\n                 norm_cfg: OptConfigType = dict(type='LN'),\n                 init_cfg: OptConfigType = None) -> None:\n        super(DynamicConv, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.out_channels_raw = out_channels\n        self.input_feat_shape = input_feat_shape\n        self.with_proj = with_proj\n        self.act_cfg = act_cfg\n        self.norm_cfg = norm_cfg\n        self.out_channels = out_channels if out_channels else in_channels\n\n        self.num_params_in = self.in_channels * self.feat_channels\n        self.num_params_out = self.out_channels * self.feat_channels\n        self.dynamic_layer = nn.Linear(\n            self.in_channels, self.num_params_in + self.num_params_out)\n\n        self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]\n        self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1]\n\n        self.activation = build_activation_layer(act_cfg)\n\n        num_output = self.out_channels * input_feat_shape**2\n        if self.with_proj:\n            self.fc_layer = nn.Linear(num_output, self.out_channels)\n            self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]\n\n    def forward(self, param_feature: Tensor, input_feature: Tensor) -> Tensor:\n        \"\"\"Forward function for `DynamicConv`.\n\n        Args:\n            param_feature (Tensor): The feature can be used\n                to generate the parameter, has shape\n                (num_all_proposals, in_channels).\n            input_feature (Tensor): Feature that\n                interact with parameters, has shape\n                (num_all_proposals, in_channels, H, W).\n\n        Returns:\n            Tensor: The output feature has shape\n            (num_all_proposals, out_channels).\n        \"\"\"\n        input_feature = input_feature.flatten(2).permute(2, 0, 1)\n\n        input_feature = input_feature.permute(1, 0, 2)\n        parameters = self.dynamic_layer(param_feature)\n\n        param_in = parameters[:, :self.num_params_in].view(\n            -1, self.in_channels, self.feat_channels)\n        param_out = parameters[:, -self.num_params_out:].view(\n            -1, self.feat_channels, self.out_channels)\n\n        # input_feature has shape (num_all_proposals, H*W, in_channels)\n        # param_in has shape (num_all_proposals, in_channels, feat_channels)\n        # feature has shape (num_all_proposals, H*W, feat_channels)\n        features = torch.bmm(input_feature, param_in)\n        features = self.norm_in(features)\n        features = self.activation(features)\n\n        # param_out has shape (batch_size, feat_channels, out_channels)\n        features = torch.bmm(features, param_out)\n        features = self.norm_out(features)\n        features = self.activation(features)\n\n        if self.with_proj:\n            features = features.flatten(1)\n            features = self.fc_layer(features)\n            features = self.fc_norm(features)\n            features = self.activation(features)\n\n        return features\n"
  },
  {
    "path": "mmdet/models/losses/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .accuracy import Accuracy, accuracy\nfrom .ae_loss import AssociativeEmbeddingLoss\nfrom .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss\nfrom .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,\n                                 cross_entropy, mask_cross_entropy)\nfrom .dice_loss import DiceLoss\nfrom .focal_loss import FocalLoss, sigmoid_focal_loss\nfrom .gaussian_focal_loss import GaussianFocalLoss\nfrom .gfocal_loss import DistributionFocalLoss, QualityFocalLoss\nfrom .ghm_loss import GHMC, GHMR\nfrom .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,\n                       IoULoss, bounded_iou_loss, iou_loss)\nfrom .kd_loss import KnowledgeDistillationKLDivLoss, KDQualityFocalLoss\nfrom .mse_loss import MSELoss, mse_loss\nfrom .pisa_loss import carl_loss, isr_p\nfrom .seesaw_loss import SeesawLoss\nfrom .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss\nfrom .utils import reduce_loss, weight_reduce_loss, weighted_loss\nfrom .varifocal_loss import VarifocalLoss\nfrom .pkd_loss import PKDLoss\n\n__all__ = [\n    'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',\n    'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',\n    'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',\n    'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',\n    'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',\n    'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',\n    'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',\n    'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',\n    'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',\n    'SeesawLoss', 'DiceLoss', 'KDQualityFocalLoss', 'PKDLoss'\n]\n"
  },
  {
    "path": "mmdet/models/losses/accuracy.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\n\n\ndef accuracy(pred, target, topk=1, thresh=None):\n    \"\"\"Calculate accuracy according to the prediction and target.\n\n    Args:\n        pred (torch.Tensor): The model prediction, shape (N, num_class)\n        target (torch.Tensor): The target of each prediction, shape (N, )\n        topk (int | tuple[int], optional): If the predictions in ``topk``\n            matches the target, the predictions will be regarded as\n            correct ones. Defaults to 1.\n        thresh (float, optional): If not None, predictions with scores under\n            this threshold are considered incorrect. Default to None.\n\n    Returns:\n        float | tuple[float]: If the input ``topk`` is a single integer,\n            the function will return a single float as accuracy. If\n            ``topk`` is a tuple containing multiple integers, the\n            function will return a tuple containing accuracies of\n            each ``topk`` number.\n    \"\"\"\n    assert isinstance(topk, (int, tuple))\n    if isinstance(topk, int):\n        topk = (topk, )\n        return_single = True\n    else:\n        return_single = False\n\n    maxk = max(topk)\n    if pred.size(0) == 0:\n        accu = [pred.new_tensor(0.) for i in range(len(topk))]\n        return accu[0] if return_single else accu\n    assert pred.ndim == 2 and target.ndim == 1\n    assert pred.size(0) == target.size(0)\n    assert maxk <= pred.size(1), \\\n        f'maxk {maxk} exceeds pred dimension {pred.size(1)}'\n    pred_value, pred_label = pred.topk(maxk, dim=1)\n    pred_label = pred_label.t()  # transpose to shape (maxk, N)\n    correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))\n    if thresh is not None:\n        # Only prediction values larger than thresh are counted as correct\n        correct = correct & (pred_value > thresh).t()\n    res = []\n    for k in topk:\n        correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n        res.append(correct_k.mul_(100.0 / pred.size(0)))\n    return res[0] if return_single else res\n\n\nclass Accuracy(nn.Module):\n\n    def __init__(self, topk=(1, ), thresh=None):\n        \"\"\"Module to calculate the accuracy.\n\n        Args:\n            topk (tuple, optional): The criterion used to calculate the\n                accuracy. Defaults to (1,).\n            thresh (float, optional): If not None, predictions with scores\n                under this threshold are considered incorrect. Default to None.\n        \"\"\"\n        super().__init__()\n        self.topk = topk\n        self.thresh = thresh\n\n    def forward(self, pred, target):\n        \"\"\"Forward function to calculate accuracy.\n\n        Args:\n            pred (torch.Tensor): Prediction of models.\n            target (torch.Tensor): Target for each prediction.\n\n        Returns:\n            tuple[float]: The accuracies under different topk criterions.\n        \"\"\"\n        return accuracy(pred, target, self.topk, self.thresh)\n"
  },
  {
    "path": "mmdet/models/losses/ae_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\n\n\ndef ae_loss_per_image(tl_preds, br_preds, match):\n    \"\"\"Associative Embedding Loss in one image.\n\n    Associative Embedding Loss including two parts: pull loss and push loss.\n    Pull loss makes embedding vectors from same object closer to each other.\n    Push loss distinguish embedding vector from different objects, and makes\n        the gap between them is large enough.\n\n    During computing, usually there are 3 cases:\n        - no object in image: both pull loss and push loss will be 0.\n        - one object in image: push loss will be 0 and pull loss is computed\n            by the two corner of the only object.\n        - more than one objects in image: pull loss is computed by corner pairs\n            from each object, push loss is computed by each object with all\n            other objects. We use confusion matrix with 0 in diagonal to\n            compute the push loss.\n\n    Args:\n        tl_preds (tensor): Embedding feature map of left-top corner.\n        br_preds (tensor): Embedding feature map of bottim-right corner.\n        match (list): Downsampled coordinates pair of each ground truth box.\n    \"\"\"\n\n    tl_list, br_list, me_list = [], [], []\n    if len(match) == 0:  # no object in image\n        pull_loss = tl_preds.sum() * 0.\n        push_loss = tl_preds.sum() * 0.\n    else:\n        for m in match:\n            [tl_y, tl_x], [br_y, br_x] = m\n            tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)\n            br_e = br_preds[:, br_y, br_x].view(-1, 1)\n            tl_list.append(tl_e)\n            br_list.append(br_e)\n            me_list.append((tl_e + br_e) / 2.0)\n\n        tl_list = torch.cat(tl_list)\n        br_list = torch.cat(br_list)\n        me_list = torch.cat(me_list)\n\n        assert tl_list.size() == br_list.size()\n\n        # N is object number in image, M is dimension of embedding vector\n        N, M = tl_list.size()\n\n        pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)\n        pull_loss = pull_loss.sum() / N\n\n        margin = 1  # exp setting of CornerNet, details in section 3.3 of paper\n\n        # confusion matrix of push loss\n        conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list\n        conf_weight = 1 - torch.eye(N).type_as(me_list)\n        conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())\n\n        if N > 1:  # more than one object in current image\n            push_loss = F.relu(conf_mat).sum() / (N * (N - 1))\n        else:\n            push_loss = tl_preds.sum() * 0.\n\n    return pull_loss, push_loss\n\n\n@MODELS.register_module()\nclass AssociativeEmbeddingLoss(nn.Module):\n    \"\"\"Associative Embedding Loss.\n\n    More details can be found in\n    `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and\n    `CornerNet <https://arxiv.org/abs/1808.01244>`_ .\n    Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_  # noqa: E501\n\n    Args:\n        pull_weight (float): Loss weight for corners from same object.\n        push_weight (float): Loss weight for corners from different object.\n    \"\"\"\n\n    def __init__(self, pull_weight=0.25, push_weight=0.25):\n        super(AssociativeEmbeddingLoss, self).__init__()\n        self.pull_weight = pull_weight\n        self.push_weight = push_weight\n\n    def forward(self, pred, target, match):\n        \"\"\"Forward function.\"\"\"\n        batch = pred.size(0)\n        pull_all, push_all = 0.0, 0.0\n        for i in range(batch):\n            pull, push = ae_loss_per_image(pred[i], target[i], match[i])\n\n            pull_all += self.pull_weight * pull\n            push_all += self.push_weight * push\n\n        return pull_all, push_all\n"
  },
  {
    "path": "mmdet/models/losses/balanced_l1_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom mmdet.registry import MODELS\nfrom .utils import weighted_loss\n\n\n@weighted_loss\ndef balanced_l1_loss(pred,\n                     target,\n                     beta=1.0,\n                     alpha=0.5,\n                     gamma=1.5,\n                     reduction='mean'):\n    \"\"\"Calculate balanced L1 loss.\n\n    Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, 4).\n        target (torch.Tensor): The learning target of the prediction with\n            shape (N, 4).\n        beta (float): The loss is a piecewise function of prediction and target\n            and ``beta`` serves as a threshold for the difference between the\n            prediction and target. Defaults to 1.0.\n        alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n            Defaults to 0.5.\n        gamma (float): The ``gamma`` in the balanced L1 loss.\n            Defaults to 1.5.\n        reduction (str, optional): The method that reduces the loss to a\n            scalar. Options are \"none\", \"mean\" and \"sum\".\n\n    Returns:\n        torch.Tensor: The calculated loss\n    \"\"\"\n    assert beta > 0\n    if target.numel() == 0:\n        return pred.sum() * 0\n\n    assert pred.size() == target.size()\n\n    diff = torch.abs(pred - target)\n    b = np.e**(gamma / alpha) - 1\n    loss = torch.where(\n        diff < beta, alpha / b *\n        (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,\n        gamma * diff + gamma / b - alpha * beta)\n\n    return loss\n\n\n@MODELS.register_module()\nclass BalancedL1Loss(nn.Module):\n    \"\"\"Balanced L1 Loss.\n\n    arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n    Args:\n        alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n            Defaults to 0.5.\n        gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.\n        beta (float, optional): The loss is a piecewise function of prediction\n            and target. ``beta`` serves as a threshold for the difference\n            between the prediction and target. Defaults to 1.0.\n        reduction (str, optional): The method that reduces the loss to a\n            scalar. Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n    \"\"\"\n\n    def __init__(self,\n                 alpha=0.5,\n                 gamma=1.5,\n                 beta=1.0,\n                 reduction='mean',\n                 loss_weight=1.0):\n        super(BalancedL1Loss, self).__init__()\n        self.alpha = alpha\n        self.gamma = gamma\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        \"\"\"Forward function of loss.\n\n        Args:\n            pred (torch.Tensor): The prediction with shape (N, 4).\n            target (torch.Tensor): The learning target of the prediction with\n                shape (N, 4).\n            weight (torch.Tensor, optional): Sample-wise loss weight with\n                shape (N, ).\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_bbox = self.loss_weight * balanced_l1_loss(\n            pred,\n            target,\n            weight,\n            alpha=self.alpha,\n            gamma=self.gamma,\n            beta=self.beta,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss_bbox\n"
  },
  {
    "path": "mmdet/models/losses/cross_entropy_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss\n\n\ndef cross_entropy(pred,\n                  label,\n                  weight=None,\n                  reduction='mean',\n                  avg_factor=None,\n                  class_weight=None,\n                  ignore_index=-100,\n                  avg_non_ignore=False):\n    \"\"\"Calculate the CrossEntropy loss.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the number\n            of classes.\n        label (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        reduction (str, optional): The method used to reduce the loss.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n        class_weight (list[float], optional): The weight for each class.\n        ignore_index (int | None): The label index to be ignored.\n            If None, it will be set to default value. Default: -100.\n        avg_non_ignore (bool): The flag decides to whether the loss is\n            only averaged over non-ignored targets. Default: False.\n\n    Returns:\n        torch.Tensor: The calculated loss\n    \"\"\"\n    # The default value of ignore_index is the same as F.cross_entropy\n    ignore_index = -100 if ignore_index is None else ignore_index\n    # element-wise losses\n    loss = F.cross_entropy(\n        pred,\n        label,\n        weight=class_weight,\n        reduction='none',\n        ignore_index=ignore_index)\n\n    # average loss over non-ignored elements\n    # pytorch's official cross_entropy average loss over non-ignored elements\n    # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660  # noqa\n    if (avg_factor is None) and avg_non_ignore and reduction == 'mean':\n        avg_factor = label.numel() - (label == ignore_index).sum().item()\n\n    # apply weights and do the reduction\n    if weight is not None:\n        weight = weight.float()\n    loss = weight_reduce_loss(\n        loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n    return loss\n\n\ndef _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):\n    \"\"\"Expand onehot labels to match the size of prediction.\"\"\"\n    bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n    valid_mask = (labels >= 0) & (labels != ignore_index)\n    inds = torch.nonzero(\n        valid_mask & (labels < label_channels), as_tuple=False)\n\n    if inds.numel() > 0:\n        bin_labels[inds, labels[inds]] = 1\n\n    valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),\n                                               label_channels).float()\n    if label_weights is None:\n        bin_label_weights = valid_mask\n    else:\n        bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)\n        bin_label_weights *= valid_mask\n\n    return bin_labels, bin_label_weights, valid_mask\n\n\ndef binary_cross_entropy(pred,\n                         label,\n                         weight=None,\n                         reduction='mean',\n                         avg_factor=None,\n                         class_weight=None,\n                         ignore_index=-100,\n                         avg_non_ignore=False):\n    \"\"\"Calculate the binary CrossEntropy loss.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, 1) or (N, ).\n            When the shape of pred is (N, 1), label will be expanded to\n            one-hot format, and when the shape of pred is (N, ), label\n            will not be expanded to one-hot format.\n        label (torch.Tensor): The learning label of the prediction,\n            with shape (N, ).\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        reduction (str, optional): The method used to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n        class_weight (list[float], optional): The weight for each class.\n        ignore_index (int | None): The label index to be ignored.\n            If None, it will be set to default value. Default: -100.\n        avg_non_ignore (bool): The flag decides to whether the loss is\n            only averaged over non-ignored targets. Default: False.\n\n    Returns:\n        torch.Tensor: The calculated loss.\n    \"\"\"\n    # The default value of ignore_index is the same as F.cross_entropy\n    ignore_index = -100 if ignore_index is None else ignore_index\n\n    if pred.dim() != label.dim():\n        label, weight, valid_mask = _expand_onehot_labels(\n            label, weight, pred.size(-1), ignore_index)\n    else:\n        # should mask out the ignored elements\n        valid_mask = ((label >= 0) & (label != ignore_index)).float()\n        if weight is not None:\n            # The inplace writing method will have a mismatched broadcast\n            # shape error if the weight and valid_mask dimensions\n            # are inconsistent such as (B,N,1) and (B,N,C).\n            weight = weight * valid_mask\n        else:\n            weight = valid_mask\n\n    # average loss over non-ignored elements\n    if (avg_factor is None) and avg_non_ignore and reduction == 'mean':\n        avg_factor = valid_mask.sum().item()\n\n    # weighted element-wise losses\n    weight = weight.float()\n    loss = F.binary_cross_entropy_with_logits(\n        pred, label.float(), pos_weight=class_weight, reduction='none')\n    # do the reduction for the weighted loss\n    loss = weight_reduce_loss(\n        loss, weight, reduction=reduction, avg_factor=avg_factor)\n\n    return loss\n\n\ndef mask_cross_entropy(pred,\n                       target,\n                       label,\n                       reduction='mean',\n                       avg_factor=None,\n                       class_weight=None,\n                       ignore_index=None,\n                       **kwargs):\n    \"\"\"Calculate the CrossEntropy loss for masks.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C, *), C is the\n            number of classes. The trailing * indicates arbitrary shape.\n        target (torch.Tensor): The learning label of the prediction.\n        label (torch.Tensor): ``label`` indicates the class label of the mask\n            corresponding object. This will be used to select the mask in the\n            of the class which the object belongs to when the mask prediction\n            if not class-agnostic.\n        reduction (str, optional): The method used to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n        class_weight (list[float], optional): The weight for each class.\n        ignore_index (None): Placeholder, to be consistent with other loss.\n            Default: None.\n\n    Returns:\n        torch.Tensor: The calculated loss\n\n    Example:\n        >>> N, C = 3, 11\n        >>> H, W = 2, 2\n        >>> pred = torch.randn(N, C, H, W) * 1000\n        >>> target = torch.rand(N, H, W)\n        >>> label = torch.randint(0, C, size=(N,))\n        >>> reduction = 'mean'\n        >>> avg_factor = None\n        >>> class_weights = None\n        >>> loss = mask_cross_entropy(pred, target, label, reduction,\n        >>>                           avg_factor, class_weights)\n        >>> assert loss.shape == (1,)\n    \"\"\"\n    assert ignore_index is None, 'BCE loss does not support ignore_index'\n    # TODO: handle these two reserved arguments\n    assert reduction == 'mean' and avg_factor is None\n    num_rois = pred.size()[0]\n    inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)\n    pred_slice = pred[inds, label].squeeze(1)\n    return F.binary_cross_entropy_with_logits(\n        pred_slice, target, weight=class_weight, reduction='mean')[None]\n\n\n@MODELS.register_module()\nclass CrossEntropyLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=False,\n                 use_mask=False,\n                 reduction='mean',\n                 class_weight=None,\n                 ignore_index=None,\n                 loss_weight=1.0,\n                 avg_non_ignore=False):\n        \"\"\"CrossEntropyLoss.\n\n        Args:\n            use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n                of softmax. Defaults to False.\n            use_mask (bool, optional): Whether to use mask cross entropy loss.\n                Defaults to False.\n            reduction (str, optional): . Defaults to 'mean'.\n                Options are \"none\", \"mean\" and \"sum\".\n            class_weight (list[float], optional): Weight of each class.\n                Defaults to None.\n            ignore_index (int | None): The label index to be ignored.\n                Defaults to None.\n            loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n            avg_non_ignore (bool): The flag decides to whether the loss is\n                only averaged over non-ignored targets. Default: False.\n        \"\"\"\n        super(CrossEntropyLoss, self).__init__()\n        assert (use_sigmoid is False) or (use_mask is False)\n        self.use_sigmoid = use_sigmoid\n        self.use_mask = use_mask\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.class_weight = class_weight\n        self.ignore_index = ignore_index\n        self.avg_non_ignore = avg_non_ignore\n        if ((ignore_index is not None) and not self.avg_non_ignore\n                and self.reduction == 'mean'):\n            warnings.warn(\n                'Default ``avg_non_ignore`` is False, if you would like to '\n                'ignore the certain label and average loss over non-ignore '\n                'labels, which is the same with PyTorch official '\n                'cross_entropy, set ``avg_non_ignore=True``.')\n\n        if self.use_sigmoid:\n            self.cls_criterion = binary_cross_entropy\n        elif self.use_mask:\n            self.cls_criterion = mask_cross_entropy\n        else:\n            self.cls_criterion = cross_entropy\n\n    def extra_repr(self):\n        \"\"\"Extra repr.\"\"\"\n        s = f'avg_non_ignore={self.avg_non_ignore}'\n        return s\n\n    def forward(self,\n                cls_score,\n                label,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                ignore_index=None,\n                **kwargs):\n        \"\"\"Forward function.\n\n        Args:\n            cls_score (torch.Tensor): The prediction.\n            label (torch.Tensor): The learning label of the prediction.\n            weight (torch.Tensor, optional): Sample-wise loss weight.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The method used to reduce the\n                loss. Options are \"none\", \"mean\" and \"sum\".\n            ignore_index (int | None): The label index to be ignored.\n                If not None, it will override the default value. Default: None.\n        Returns:\n            torch.Tensor: The calculated loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if ignore_index is None:\n            ignore_index = self.ignore_index\n\n        if self.class_weight is not None:\n            class_weight = cls_score.new_tensor(\n                self.class_weight, device=cls_score.device)\n        else:\n            class_weight = None\n        loss_cls = self.loss_weight * self.cls_criterion(\n            cls_score,\n            label,\n            weight,\n            class_weight=class_weight,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            ignore_index=ignore_index,\n            avg_non_ignore=self.avg_non_ignore,\n            **kwargs)\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/dice_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss\n\n\ndef dice_loss(pred,\n              target,\n              weight=None,\n              eps=1e-3,\n              reduction='mean',\n              naive_dice=False,\n              avg_factor=None):\n    \"\"\"Calculate dice loss, there are two forms of dice loss is supported:\n\n        - the one proposed in `V-Net: Fully Convolutional Neural\n            Networks for Volumetric Medical Image Segmentation\n            <https://arxiv.org/abs/1606.04797>`_.\n        - the dice loss in which the power of the number in the\n            denominator is the first power instead of the second\n            power.\n\n    Args:\n        pred (torch.Tensor): The prediction, has a shape (n, *)\n        target (torch.Tensor): The learning label of the prediction,\n            shape (n, *), same shape of pred.\n        weight (torch.Tensor, optional): The weight of loss for each\n            prediction, has a shape (n,). Defaults to None.\n        eps (float): Avoid dividing by zero. Default: 1e-3.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'.\n            Options are \"none\", \"mean\" and \"sum\".\n        naive_dice (bool, optional): If false, use the dice\n                loss defined in the V-Net paper, otherwise, use the\n                naive dice loss in which the power of the number in the\n                denominator is the first power instead of the second\n                power.Defaults to False.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n\n    input = pred.flatten(1)\n    target = target.flatten(1).float()\n\n    a = torch.sum(input * target, 1)\n    if naive_dice:\n        b = torch.sum(input, 1)\n        c = torch.sum(target, 1)\n        d = (2 * a + eps) / (b + c + eps)\n    else:\n        b = torch.sum(input * input, 1) + eps\n        c = torch.sum(target * target, 1) + eps\n        d = (2 * a) / (b + c)\n\n    loss = 1 - d\n    if weight is not None:\n        assert weight.ndim == loss.ndim\n        assert len(weight) == len(pred)\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@MODELS.register_module()\nclass DiceLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 activate=True,\n                 reduction='mean',\n                 naive_dice=False,\n                 loss_weight=1.0,\n                 eps=1e-3):\n        \"\"\"Compute dice loss.\n\n        Args:\n            use_sigmoid (bool, optional): Whether to the prediction is\n                used for sigmoid or softmax. Defaults to True.\n            activate (bool): Whether to activate the predictions inside,\n                this will disable the inside sigmoid operation.\n                Defaults to True.\n            reduction (str, optional): The method used\n                to reduce the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to 'mean'.\n            naive_dice (bool, optional): If false, use the dice\n                loss defined in the V-Net paper, otherwise, use the\n                naive dice loss in which the power of the number in the\n                denominator is the first power instead of the second\n                power. Defaults to False.\n            loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n            eps (float): Avoid dividing by zero. Defaults to 1e-3.\n        \"\"\"\n\n        super(DiceLoss, self).__init__()\n        self.use_sigmoid = use_sigmoid\n        self.reduction = reduction\n        self.naive_dice = naive_dice\n        self.loss_weight = loss_weight\n        self.eps = eps\n        self.activate = activate\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                reduction_override=None,\n                avg_factor=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction, has a shape (n, *).\n            target (torch.Tensor): The label of the prediction,\n                shape (n, *), same shape of pred.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction, has a shape (n,). Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n\n        if self.activate:\n            if self.use_sigmoid:\n                pred = pred.sigmoid()\n            else:\n                raise NotImplementedError\n\n        loss = self.loss_weight * dice_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            naive_dice=self.naive_dice,\n            avg_factor=avg_factor)\n\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/focal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss\n\n\n# This method is only for debugging\ndef py_sigmoid_focal_loss(pred,\n                          target,\n                          weight=None,\n                          gamma=2.0,\n                          alpha=0.25,\n                          reduction='mean',\n                          avg_factor=None):\n    \"\"\"PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the\n            number of classes\n        target (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 0.25.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    pred_sigmoid = pred.sigmoid()\n    target = target.type_as(pred)\n    pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\n    focal_weight = (alpha * target + (1 - alpha) *\n                    (1 - target)) * pt.pow(gamma)\n    loss = F.binary_cross_entropy_with_logits(\n        pred, target, reduction='none') * focal_weight\n    if weight is not None:\n        if weight.shape != loss.shape:\n            if weight.size(0) == loss.size(0):\n                # For most cases, weight is of shape (num_priors, ),\n                #  which means it does not have the second axis num_class\n                weight = weight.view(-1, 1)\n            else:\n                # Sometimes, weight per anchor per class is also needed. e.g.\n                #  in FSAF. But it may be flattened of shape\n                #  (num_priors x num_class, ), while loss is still of shape\n                #  (num_priors, num_class).\n                assert weight.numel() == loss.numel()\n                weight = weight.view(loss.size(0), -1)\n        assert weight.ndim == loss.ndim\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\ndef py_focal_loss_with_prob(pred,\n                            target,\n                            weight=None,\n                            gamma=2.0,\n                            alpha=0.25,\n                            reduction='mean',\n                            avg_factor=None):\n    \"\"\"PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.\n    Different from `py_sigmoid_focal_loss`, this function accepts probability\n    as input.\n\n    Args:\n        pred (torch.Tensor): The prediction probability with shape (N, C),\n            C is the number of classes.\n        target (torch.Tensor): The learning label of the prediction.\n            The target shape support (N,C) or (N,), (N,C) means one-hot form.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 0.25.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    if pred.dim() != target.dim():\n        num_classes = pred.size(1)\n        target = F.one_hot(target, num_classes=num_classes + 1)\n        target = target[:, :num_classes]\n\n    target = target.type_as(pred)\n    pt = (1 - pred) * target + pred * (1 - target)\n    focal_weight = (alpha * target + (1 - alpha) *\n                    (1 - target)) * pt.pow(gamma)\n    loss = F.binary_cross_entropy(\n        pred, target, reduction='none') * focal_weight\n    if weight is not None:\n        if weight.shape != loss.shape:\n            if weight.size(0) == loss.size(0):\n                # For most cases, weight is of shape (num_priors, ),\n                #  which means it does not have the second axis num_class\n                weight = weight.view(-1, 1)\n            else:\n                # Sometimes, weight per anchor per class is also needed. e.g.\n                #  in FSAF. But it may be flattened of shape\n                #  (num_priors x num_class, ), while loss is still of shape\n                #  (num_priors, num_class).\n                assert weight.numel() == loss.numel()\n                weight = weight.view(loss.size(0), -1)\n        assert weight.ndim == loss.ndim\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\ndef sigmoid_focal_loss(pred,\n                       target,\n                       weight=None,\n                       gamma=2.0,\n                       alpha=0.25,\n                       reduction='mean',\n                       avg_factor=None):\n    r\"\"\"A wrapper of cuda version `Focal Loss\n    <https://arxiv.org/abs/1708.02002>`_.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the number\n            of classes.\n        target (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 0.25.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    # Function.apply does not accept keyword arguments, so the decorator\n    # \"weighted_loss\" is not applicable\n    loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,\n                               alpha, None, 'none')\n    if weight is not None:\n        if weight.shape != loss.shape:\n            if weight.size(0) == loss.size(0):\n                # For most cases, weight is of shape (num_priors, ),\n                #  which means it does not have the second axis num_class\n                weight = weight.view(-1, 1)\n            else:\n                # Sometimes, weight per anchor per class is also needed. e.g.\n                #  in FSAF. But it may be flattened of shape\n                #  (num_priors x num_class, ), while loss is still of shape\n                #  (num_priors, num_class).\n                assert weight.numel() == loss.numel()\n                weight = weight.view(loss.size(0), -1)\n        assert weight.ndim == loss.ndim\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@MODELS.register_module()\nclass FocalLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 gamma=2.0,\n                 alpha=0.25,\n                 reduction='mean',\n                 loss_weight=1.0,\n                 activated=False):\n        \"\"\"`Focal Loss <https://arxiv.org/abs/1708.02002>`_\n\n        Args:\n            use_sigmoid (bool, optional): Whether to the prediction is\n                used for sigmoid or softmax. Defaults to True.\n            gamma (float, optional): The gamma for calculating the modulating\n                factor. Defaults to 2.0.\n            alpha (float, optional): A balanced form for Focal Loss.\n                Defaults to 0.25.\n            reduction (str, optional): The method used to reduce the loss into\n                a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and\n                \"sum\".\n            loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n            activated (bool, optional): Whether the input is activated.\n                If True, it means the input has been activated and can be\n                treated as probabilities. Else, it should be treated as logits.\n                Defaults to False.\n        \"\"\"\n        super(FocalLoss, self).__init__()\n        assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'\n        self.use_sigmoid = use_sigmoid\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.activated = activated\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning label of the prediction.\n                The target shape support (N,C) or (N,), (N,C) means\n                one-hot form.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            if self.activated:\n                calculate_loss_func = py_focal_loss_with_prob\n            else:\n                if pred.dim() == target.dim():\n                    # this means that target is already in One-Hot form.\n                    calculate_loss_func = py_sigmoid_focal_loss\n                elif torch.cuda.is_available() and pred.is_cuda:\n                    calculate_loss_func = sigmoid_focal_loss\n                else:\n                    num_classes = pred.size(1)\n                    target = F.one_hot(target, num_classes=num_classes + 1)\n                    target = target[:, :num_classes]\n                    calculate_loss_func = py_sigmoid_focal_loss\n\n            loss_cls = self.loss_weight * calculate_loss_func(\n                pred,\n                target,\n                weight,\n                gamma=self.gamma,\n                alpha=self.alpha,\n                reduction=reduction,\n                avg_factor=avg_factor)\n\n        else:\n            raise NotImplementedError\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/gaussian_focal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Union\n\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss, weighted_loss\n\n\n@weighted_loss\ndef gaussian_focal_loss(pred: Tensor,\n                        gaussian_target: Tensor,\n                        alpha: float = 2.0,\n                        gamma: float = 4.0,\n                        pos_weight: float = 1.0,\n                        neg_weight: float = 1.0) -> Tensor:\n    \"\"\"`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n    distribution.\n\n    Args:\n        pred (torch.Tensor): The prediction.\n        gaussian_target (torch.Tensor): The learning target of the prediction\n            in gaussian distribution.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 2.0.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 4.0.\n        pos_weight(float): Positive sample loss weight. Defaults to 1.0.\n        neg_weight(float): Negative sample loss weight. Defaults to 1.0.\n    \"\"\"\n    eps = 1e-12\n    pos_weights = gaussian_target.eq(1)\n    neg_weights = (1 - gaussian_target).pow(gamma)\n    pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights\n    neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights\n    return pos_weight * pos_loss + neg_weight * neg_loss\n\n\ndef gaussian_focal_loss_with_pos_inds(\n        pred: Tensor,\n        gaussian_target: Tensor,\n        pos_inds: Tensor,\n        pos_labels: Tensor,\n        alpha: float = 2.0,\n        gamma: float = 4.0,\n        pos_weight: float = 1.0,\n        neg_weight: float = 1.0,\n        reduction: str = 'mean',\n        avg_factor: Optional[Union[int, float]] = None) -> Tensor:\n    \"\"\"`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n    distribution.\n\n    Note: The index with a value of 1 in ``gaussian_target`` in the\n    ``gaussian_focal_loss`` function is a positive sample, but in\n    ``gaussian_focal_loss_with_pos_inds`` the positive sample is passed\n    in through the ``pos_inds`` parameter.\n\n    Args:\n        pred (torch.Tensor): The prediction. The shape is (N, num_classes).\n        gaussian_target (torch.Tensor): The learning target of the prediction\n            in gaussian distribution. The shape is (N, num_classes).\n        pos_inds (torch.Tensor): The positive sample index.\n            The shape is (M, ).\n        pos_labels (torch.Tensor): The label corresponding to the positive\n            sample index. The shape is (M, ).\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 2.0.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 4.0.\n        pos_weight(float): Positive sample loss weight. Defaults to 1.0.\n        neg_weight(float): Negative sample loss weight. Defaults to 1.0.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n            Defaults to 'mean`.\n        avg_factor (int, float, optional): Average factor that is used to\n            average the loss. Defaults to None.\n    \"\"\"\n    eps = 1e-12\n    neg_weights = (1 - gaussian_target).pow(gamma)\n\n    pos_pred_pix = pred[pos_inds]\n    pos_pred = pos_pred_pix.gather(1, pos_labels.unsqueeze(1))\n    pos_loss = -(pos_pred + eps).log() * (1 - pos_pred).pow(alpha)\n    pos_loss = weight_reduce_loss(pos_loss, None, reduction, avg_factor)\n\n    neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights\n    neg_loss = weight_reduce_loss(neg_loss, None, reduction, avg_factor)\n\n    return pos_weight * pos_loss + neg_weight * neg_loss\n\n\n@MODELS.register_module()\nclass GaussianFocalLoss(nn.Module):\n    \"\"\"GaussianFocalLoss is a variant of focal loss.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/1808.01244>`_\n    Code is modified from `kp_utils.py\n    <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_  # noqa: E501\n    Please notice that the target in GaussianFocalLoss is a gaussian heatmap,\n    not 0/1 binary target.\n\n    Args:\n        alpha (float): Power of prediction.\n        gamma (float): Power of target for negative samples.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Loss weight of current loss.\n        pos_weight(float): Positive sample loss weight. Defaults to 1.0.\n        neg_weight(float): Negative sample loss weight. Defaults to 1.0.\n    \"\"\"\n\n    def __init__(self,\n                 alpha: float = 2.0,\n                 gamma: float = 4.0,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0,\n                 pos_weight: float = 1.0,\n                 neg_weight: float = 1.0) -> None:\n        super().__init__()\n        self.alpha = alpha\n        self.gamma = gamma\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.pos_weight = pos_weight\n        self.neg_weight = neg_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                pos_inds: Optional[Tensor] = None,\n                pos_labels: Optional[Tensor] = None,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[Union[int, float]] = None,\n                reduction_override: Optional[str] = None) -> Tensor:\n        \"\"\"Forward function.\n\n        If you want to manually determine which positions are\n        positive samples, you can set the pos_index and pos_label\n        parameter. Currently, only the CenterNet update version uses\n        the parameter.\n\n        Args:\n            pred (torch.Tensor): The prediction. The shape is (N, num_classes).\n            target (torch.Tensor): The learning target of the prediction\n                in gaussian distribution. The shape is (N, num_classes).\n            pos_inds (torch.Tensor): The positive sample index.\n                Defaults to None.\n            pos_labels (torch.Tensor): The label corresponding to the positive\n                sample index. Defaults to None.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, float, optional): Average factor that is used to\n                average the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if pos_inds is not None:\n            assert pos_labels is not None\n            # Only used by centernet update version\n            loss_reg = self.loss_weight * gaussian_focal_loss_with_pos_inds(\n                pred,\n                target,\n                pos_inds,\n                pos_labels,\n                alpha=self.alpha,\n                gamma=self.gamma,\n                pos_weight=self.pos_weight,\n                neg_weight=self.neg_weight,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        else:\n            loss_reg = self.loss_weight * gaussian_focal_loss(\n                pred,\n                target,\n                weight,\n                alpha=self.alpha,\n                gamma=self.gamma,\n                pos_weight=self.pos_weight,\n                neg_weight=self.neg_weight,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        return loss_reg\n"
  },
  {
    "path": "mmdet/models/losses/gfocal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.models.losses.utils import weighted_loss\nfrom mmdet.registry import MODELS\n\n\n@weighted_loss\ndef quality_focal_loss(pred, target, beta=2.0):\n    r\"\"\"Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n    Qualified and Distributed Bounding Boxes for Dense Object Detection\n    <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        pred (torch.Tensor): Predicted joint representation of classification\n            and quality (IoU) estimation with shape (N, C), C is the number of\n            classes.\n        target (tuple([torch.Tensor])): Target category label with shape (N,)\n            and target quality label with shape (N,).\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    assert len(target) == 2, \"\"\"target for QFL must be a tuple of two elements,\n        including category label and quality label, respectively\"\"\"\n    # label denotes the category id, score denotes the quality score\n    label, score = target\n\n    # negatives are supervised by 0 quality score\n    pred_sigmoid = pred.sigmoid()\n    scale_factor = pred_sigmoid\n    zerolabel = scale_factor.new_zeros(pred.shape)\n    loss = F.binary_cross_entropy_with_logits(\n        pred, zerolabel, reduction='none') * scale_factor.pow(beta)\n\n    # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n    bg_class_ind = pred.size(1)\n    pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)\n    pos_label = label[pos].long()\n    # positives are supervised by bbox quality (IoU) score\n    scale_factor = score[pos] - pred_sigmoid[pos, pos_label]\n    loss[pos, pos_label] = F.binary_cross_entropy_with_logits(\n        pred[pos, pos_label], score[pos],\n        reduction='none') * scale_factor.abs().pow(beta)\n\n    loss = loss.sum(dim=1, keepdim=False)\n    return loss\n\n\n@weighted_loss\ndef quality_focal_loss_tensor_target(pred, target, beta=2.0, activated=False):\n    \"\"\"`QualityFocal Loss <https://arxiv.org/abs/2008.13367>`_\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the\n            number of classes\n        target (torch.Tensor): The learning target of the iou-aware\n            classification score with shape (N, C), C is the number of classes.\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n        activated (bool): Whether the input is activated.\n            If True, it means the input has been activated and can be\n            treated as probabilities. Else, it should be treated as logits.\n            Defaults to False.\n    \"\"\"\n    # pred and target should be of the same size\n    assert pred.size() == target.size()\n    if activated:\n        pred_sigmoid = pred\n        loss_function = F.binary_cross_entropy\n    else:\n        pred_sigmoid = pred.sigmoid()\n        loss_function = F.binary_cross_entropy_with_logits\n\n    scale_factor = pred_sigmoid\n    target = target.type_as(pred)\n\n    zerolabel = scale_factor.new_zeros(pred.shape)\n    loss = loss_function(\n        pred, zerolabel, reduction='none') * scale_factor.pow(beta)\n\n    pos = (target != 0)\n    scale_factor = target[pos] - pred_sigmoid[pos]\n    loss[pos] = loss_function(\n        pred[pos], target[pos],\n        reduction='none') * scale_factor.abs().pow(beta)\n\n    loss = loss.sum(dim=1, keepdim=False)\n    return loss\n\n\n@weighted_loss\ndef quality_focal_loss_with_prob(pred, target, beta=2.0):\n    r\"\"\"Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n    Qualified and Distributed Bounding Boxes for Dense Object Detection\n    <https://arxiv.org/abs/2006.04388>`_.\n    Different from `quality_focal_loss`, this function accepts probability\n    as input.\n\n    Args:\n        pred (torch.Tensor): Predicted joint representation of classification\n            and quality (IoU) estimation with shape (N, C), C is the number of\n            classes.\n        target (tuple([torch.Tensor])): Target category label with shape (N,)\n            and target quality label with shape (N,).\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    assert len(target) == 2, \"\"\"target for QFL must be a tuple of two elements,\n        including category label and quality label, respectively\"\"\"\n    # label denotes the category id, score denotes the quality score\n    label, score = target\n\n    # negatives are supervised by 0 quality score\n    pred_sigmoid = pred\n    scale_factor = pred_sigmoid\n    zerolabel = scale_factor.new_zeros(pred.shape)\n    loss = F.binary_cross_entropy(\n        pred, zerolabel, reduction='none') * scale_factor.pow(beta)\n\n    # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n    bg_class_ind = pred.size(1)\n    pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)\n    pos_label = label[pos].long()\n    # positives are supervised by bbox quality (IoU) score\n    scale_factor = score[pos] - pred_sigmoid[pos, pos_label]\n    loss[pos, pos_label] = F.binary_cross_entropy(\n        pred[pos, pos_label], score[pos],\n        reduction='none') * scale_factor.abs().pow(beta)\n\n    loss = loss.sum(dim=1, keepdim=False)\n    return loss\n\n\n@weighted_loss\ndef distribution_focal_loss(pred, label):\n    r\"\"\"Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning\n    Qualified and Distributed Bounding Boxes for Dense Object Detection\n    <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        pred (torch.Tensor): Predicted general distribution of bounding boxes\n            (before softmax) with shape (N, n+1), n is the max value of the\n            integral set `{0, ..., n}` in paper.\n        label (torch.Tensor): Target distance label for bounding boxes with\n            shape (N,).\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    dis_left = label.long()\n    dis_right = dis_left + 1\n    weight_left = dis_right.float() - label\n    weight_right = label - dis_left.float()\n    loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \\\n        + F.cross_entropy(pred, dis_right, reduction='none') * weight_right\n    return loss\n\n\n@MODELS.register_module()\nclass QualityFocalLoss(nn.Module):\n    r\"\"\"Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n    Learning Qualified and Distributed Bounding Boxes for Dense Object\n    Detection <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.\n            Defaults to True.\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Loss weight of current loss.\n        activated (bool, optional): Whether the input is activated.\n            If True, it means the input has been activated and can be\n            treated as probabilities. Else, it should be treated as logits.\n            Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 beta=2.0,\n                 reduction='mean',\n                 loss_weight=1.0,\n                 activated=False):\n        super(QualityFocalLoss, self).__init__()\n        assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'\n        self.use_sigmoid = use_sigmoid\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.activated = activated\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): Predicted joint representation of\n                classification and quality (IoU) estimation with shape (N, C),\n                C is the number of classes.\n            target (Union(tuple([torch.Tensor]),Torch.Tensor)): The type is\n                tuple, it should be included Target category label with\n                shape (N,) and target quality label with shape (N,).The type\n                is torch.Tensor, the target should be one-hot form with\n                soft weights.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            if self.activated:\n                calculate_loss_func = quality_focal_loss_with_prob\n            else:\n                calculate_loss_func = quality_focal_loss\n            if isinstance(target, torch.Tensor):\n                # the target shape with (N,C) or (N,C,...), which means\n                # the target is one-hot form with soft weights.\n                calculate_loss_func = partial(\n                    quality_focal_loss_tensor_target, activated=self.activated)\n\n            loss_cls = self.loss_weight * calculate_loss_func(\n                pred,\n                target,\n                weight,\n                beta=self.beta,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        else:\n            raise NotImplementedError\n        return loss_cls\n\n\n@MODELS.register_module()\nclass DistributionFocalLoss(nn.Module):\n    r\"\"\"Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:\n    Learning Qualified and Distributed Bounding Boxes for Dense Object\n    Detection <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n        loss_weight (float): Loss weight of current loss.\n    \"\"\"\n\n    def __init__(self, reduction='mean', loss_weight=1.0):\n        super(DistributionFocalLoss, self).__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): Predicted general distribution of bounding\n                boxes (before softmax) with shape (N, n+1), n is the max value\n                of the integral set `{0, ..., n}` in paper.\n            target (torch.Tensor): Target distance label for bounding boxes\n                with shape (N,).\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_cls = self.loss_weight * distribution_focal_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/ghm_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss\n\n\ndef _expand_onehot_labels(labels, label_weights, label_channels):\n    bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n    inds = torch.nonzero(\n        (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()\n    if inds.numel() > 0:\n        bin_labels[inds, labels[inds]] = 1\n    bin_label_weights = label_weights.view(-1, 1).expand(\n        label_weights.size(0), label_channels)\n    return bin_labels, bin_label_weights\n\n\n# TODO: code refactoring to make it consistent with other losses\n@MODELS.register_module()\nclass GHMC(nn.Module):\n    \"\"\"GHM Classification Loss.\n\n    Details of the theorem can be viewed in the paper\n    `Gradient Harmonized Single-stage Detector\n    <https://arxiv.org/abs/1811.05181>`_.\n\n    Args:\n        bins (int): Number of the unit regions for distribution calculation.\n        momentum (float): The parameter for moving average.\n        use_sigmoid (bool): Can only be true for BCE based loss now.\n        loss_weight (float): The weight of the total GHM-C loss.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n            Defaults to \"mean\"\n    \"\"\"\n\n    def __init__(self,\n                 bins=10,\n                 momentum=0,\n                 use_sigmoid=True,\n                 loss_weight=1.0,\n                 reduction='mean'):\n        super(GHMC, self).__init__()\n        self.bins = bins\n        self.momentum = momentum\n        edges = torch.arange(bins + 1).float() / bins\n        self.register_buffer('edges', edges)\n        self.edges[-1] += 1e-6\n        if momentum > 0:\n            acc_sum = torch.zeros(bins)\n            self.register_buffer('acc_sum', acc_sum)\n        self.use_sigmoid = use_sigmoid\n        if not self.use_sigmoid:\n            raise NotImplementedError\n        self.loss_weight = loss_weight\n        self.reduction = reduction\n\n    def forward(self,\n                pred,\n                target,\n                label_weight,\n                reduction_override=None,\n                **kwargs):\n        \"\"\"Calculate the GHM-C loss.\n\n        Args:\n            pred (float tensor of size [batch_num, class_num]):\n                The direct prediction of classification fc layer.\n            target (float tensor of size [batch_num, class_num]):\n                Binary class target for each sample.\n            label_weight (float tensor of size [batch_num, class_num]):\n                the value is 1 if the sample is valid and 0 if ignored.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        Returns:\n            The gradient harmonized loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        # the target should be binary class label\n        if pred.dim() != target.dim():\n            target, label_weight = _expand_onehot_labels(\n                target, label_weight, pred.size(-1))\n        target, label_weight = target.float(), label_weight.float()\n        edges = self.edges\n        mmt = self.momentum\n        weights = torch.zeros_like(pred)\n\n        # gradient length\n        g = torch.abs(pred.sigmoid().detach() - target)\n\n        valid = label_weight > 0\n        tot = max(valid.float().sum().item(), 1.0)\n        n = 0  # n valid bins\n        for i in range(self.bins):\n            inds = (g >= edges[i]) & (g < edges[i + 1]) & valid\n            num_in_bin = inds.sum().item()\n            if num_in_bin > 0:\n                if mmt > 0:\n                    self.acc_sum[i] = mmt * self.acc_sum[i] \\\n                        + (1 - mmt) * num_in_bin\n                    weights[inds] = tot / self.acc_sum[i]\n                else:\n                    weights[inds] = tot / num_in_bin\n                n += 1\n        if n > 0:\n            weights = weights / n\n\n        loss = F.binary_cross_entropy_with_logits(\n            pred, target, reduction='none')\n        loss = weight_reduce_loss(\n            loss, weights, reduction=reduction, avg_factor=tot)\n        return loss * self.loss_weight\n\n\n# TODO: code refactoring to make it consistent with other losses\n@MODELS.register_module()\nclass GHMR(nn.Module):\n    \"\"\"GHM Regression Loss.\n\n    Details of the theorem can be viewed in the paper\n    `Gradient Harmonized Single-stage Detector\n    <https://arxiv.org/abs/1811.05181>`_.\n\n    Args:\n        mu (float): The parameter for the Authentic Smooth L1 loss.\n        bins (int): Number of the unit regions for distribution calculation.\n        momentum (float): The parameter for moving average.\n        loss_weight (float): The weight of the total GHM-R loss.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n            Defaults to \"mean\"\n    \"\"\"\n\n    def __init__(self,\n                 mu=0.02,\n                 bins=10,\n                 momentum=0,\n                 loss_weight=1.0,\n                 reduction='mean'):\n        super(GHMR, self).__init__()\n        self.mu = mu\n        self.bins = bins\n        edges = torch.arange(bins + 1).float() / bins\n        self.register_buffer('edges', edges)\n        self.edges[-1] = 1e3\n        self.momentum = momentum\n        if momentum > 0:\n            acc_sum = torch.zeros(bins)\n            self.register_buffer('acc_sum', acc_sum)\n        self.loss_weight = loss_weight\n        self.reduction = reduction\n\n    # TODO: support reduction parameter\n    def forward(self,\n                pred,\n                target,\n                label_weight,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Calculate the GHM-R loss.\n\n        Args:\n            pred (float tensor of size [batch_num, 4 (* class_num)]):\n                The prediction of box regression layer. Channel number can be 4\n                or 4 * class_num depending on whether it is class-agnostic.\n            target (float tensor of size [batch_num, 4 (* class_num)]):\n                The target regression values with the same size of pred.\n            label_weight (float tensor of size [batch_num, 4 (* class_num)]):\n                The weight of each sample, 0 if ignored.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        Returns:\n            The gradient harmonized loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        mu = self.mu\n        edges = self.edges\n        mmt = self.momentum\n\n        # ASL1 loss\n        diff = pred - target\n        loss = torch.sqrt(diff * diff + mu * mu) - mu\n\n        # gradient length\n        g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()\n        weights = torch.zeros_like(g)\n\n        valid = label_weight > 0\n        tot = max(label_weight.float().sum().item(), 1.0)\n        n = 0  # n: valid bins\n        for i in range(self.bins):\n            inds = (g >= edges[i]) & (g < edges[i + 1]) & valid\n            num_in_bin = inds.sum().item()\n            if num_in_bin > 0:\n                n += 1\n                if mmt > 0:\n                    self.acc_sum[i] = mmt * self.acc_sum[i] \\\n                        + (1 - mmt) * num_in_bin\n                    weights[inds] = tot / self.acc_sum[i]\n                else:\n                    weights[inds] = tot / num_in_bin\n        if n > 0:\n            weights /= n\n        loss = weight_reduce_loss(\n            loss, weights, reduction=reduction, avg_factor=tot)\n        return loss * self.loss_weight\n"
  },
  {
    "path": "mmdet/models/losses/iou_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom .utils import weighted_loss\n\n\n@weighted_loss\ndef iou_loss(pred: Tensor,\n             target: Tensor,\n             linear: bool = False,\n             mode: str = 'log',\n             eps: float = 1e-6) -> Tensor:\n    \"\"\"IoU loss.\n\n    Computing the IoU loss between a set of predicted bboxes and target bboxes.\n    The loss is calculated as negative log of IoU.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        linear (bool, optional): If True, use linear scale of loss instead of\n            log scale. Default: False.\n        mode (str): Loss scaling mode, including \"linear\", \"square\", and \"log\".\n            Default: 'log'\n        eps (float): Epsilon to avoid log(0).\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    assert mode in ['linear', 'square', 'log']\n    if linear:\n        mode = 'linear'\n        warnings.warn('DeprecationWarning: Setting \"linear=True\" in '\n                      'iou_loss is deprecated, please use \"mode=`linear`\" '\n                      'instead.')\n    ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)\n    if mode == 'linear':\n        loss = 1 - ious\n    elif mode == 'square':\n        loss = 1 - ious**2\n    elif mode == 'log':\n        loss = -ious.log()\n    else:\n        raise NotImplementedError\n    return loss\n\n\n@weighted_loss\ndef bounded_iou_loss(pred: Tensor,\n                     target: Tensor,\n                     beta: float = 0.2,\n                     eps: float = 1e-3) -> Tensor:\n    \"\"\"BIoULoss.\n\n    This is an implementation of paper\n    `Improving Object Localization with Fitness NMS and Bounded IoU Loss.\n    <https://arxiv.org/abs/1711.00164>`_.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        beta (float, optional): Beta parameter in smoothl1.\n        eps (float, optional): Epsilon to avoid NaN values.\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5\n    pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5\n    pred_w = pred[:, 2] - pred[:, 0]\n    pred_h = pred[:, 3] - pred[:, 1]\n    with torch.no_grad():\n        target_ctrx = (target[:, 0] + target[:, 2]) * 0.5\n        target_ctry = (target[:, 1] + target[:, 3]) * 0.5\n        target_w = target[:, 2] - target[:, 0]\n        target_h = target[:, 3] - target[:, 1]\n\n    dx = target_ctrx - pred_ctrx\n    dy = target_ctry - pred_ctry\n\n    loss_dx = 1 - torch.max(\n        (target_w - 2 * dx.abs()) /\n        (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))\n    loss_dy = 1 - torch.max(\n        (target_h - 2 * dy.abs()) /\n        (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))\n    loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /\n                            (target_w + eps))\n    loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /\n                            (target_h + eps))\n    # view(..., -1) does not work for empty tensor\n    loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],\n                            dim=-1).flatten(1)\n\n    loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,\n                       loss_comb - 0.5 * beta)\n    return loss\n\n\n@weighted_loss\ndef giou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:\n    r\"\"\"`Generalized Intersection over Union: A Metric and A Loss for Bounding\n    Box Regression <https://arxiv.org/abs/1902.09630>`_.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        eps (float): Epsilon to avoid log(0).\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)\n    loss = 1 - gious\n    return loss\n\n\n@weighted_loss\ndef diou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:\n    r\"\"\"Implementation of `Distance-IoU Loss: Faster and Better\n    Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_.\n\n    Code is modified from https://github.com/Zzh-tju/DIoU.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        eps (float): Epsilon to avoid log(0).\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw**2 + ch**2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n    rho2 = left + right\n\n    # DIoU\n    dious = ious - rho2 / c2\n    loss = 1 - dious\n    return loss\n\n\n@weighted_loss\ndef ciou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:\n    r\"\"\"`Implementation of paper `Enhancing Geometric Factors into\n    Model Learning and Inference for Object Detection and Instance\n    Segmentation <https://arxiv.org/abs/2005.03572>`_.\n\n    Code is modified from https://github.com/Zzh-tju/CIoU.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        eps (float): Epsilon to avoid log(0).\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw**2 + ch**2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n    rho2 = left + right\n\n    factor = 4 / math.pi**2\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n    with torch.no_grad():\n        alpha = (ious > 0.5).float() * v / (1 - ious + v)\n\n    # CIoU\n    cious = ious - (rho2 / c2 + alpha * v)\n    loss = 1 - cious.clamp(min=-1.0, max=1.0)\n    return loss\n\n\n@weighted_loss\ndef eiou_loss(pred: Tensor,\n              target: Tensor,\n              smooth_point: float = 0.1,\n              eps: float = 1e-7) -> Tensor:\n    r\"\"\"Implementation of paper `Extended-IoU Loss: A Systematic\n    IoU-Related Method: Beyond Simplified Regression for Better\n    Localization <https://ieeexplore.ieee.org/abstract/document/9429909>`_\n\n    Code is modified from https://github.com//ShiqiYu/libfacedetection.train.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        smooth_point (float): hyperparameter, default is 0.1.\n        eps (float): Epsilon to avoid log(0).\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    px1, py1, px2, py2 = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]\n    tx1, ty1, tx2, ty2 = target[:, 0], target[:, 1], target[:, 2], target[:, 3]\n\n    # extent top left\n    ex1 = torch.min(px1, tx1)\n    ey1 = torch.min(py1, ty1)\n\n    # intersection coordinates\n    ix1 = torch.max(px1, tx1)\n    iy1 = torch.max(py1, ty1)\n    ix2 = torch.min(px2, tx2)\n    iy2 = torch.min(py2, ty2)\n\n    # extra\n    xmin = torch.min(ix1, ix2)\n    ymin = torch.min(iy1, iy2)\n    xmax = torch.max(ix1, ix2)\n    ymax = torch.max(iy1, iy2)\n\n    # Intersection\n    intersection = (ix2 - ex1) * (iy2 - ey1) + (xmin - ex1) * (ymin - ey1) - (\n        ix1 - ex1) * (ymax - ey1) - (xmax - ex1) * (\n            iy1 - ey1)\n    # Union\n    union = (px2 - px1) * (py2 - py1) + (tx2 - tx1) * (\n        ty2 - ty1) - intersection + eps\n    # IoU\n    ious = 1 - (intersection / union)\n\n    # Smooth-EIoU\n    smooth_sign = (ious < smooth_point).detach().float()\n    loss = 0.5 * smooth_sign * (ious**2) / smooth_point + (1 - smooth_sign) * (\n        ious - 0.5 * smooth_point)\n    return loss\n\n\n@MODELS.register_module()\nclass IoULoss(nn.Module):\n    \"\"\"IoULoss.\n\n    Computing the IoU loss between a set of predicted bboxes and target bboxes.\n\n    Args:\n        linear (bool): If True, use linear scale of loss else determined\n            by mode. Default: False.\n        eps (float): Epsilon to avoid log(0).\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n        mode (str): Loss scaling mode, including \"linear\", \"square\", and \"log\".\n            Default: 'log'\n    \"\"\"\n\n    def __init__(self,\n                 linear: bool = False,\n                 eps: float = 1e-6,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0,\n                 mode: str = 'log') -> None:\n        super().__init__()\n        assert mode in ['linear', 'square', 'log']\n        if linear:\n            mode = 'linear'\n            warnings.warn('DeprecationWarning: Setting \"linear=True\" in '\n                          'IOULoss is deprecated, please use \"mode=`linear`\" '\n                          'instead.')\n        self.mode = mode\n        self.linear = linear\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n                shape (n, 4).\n            target (Tensor): The learning target of the prediction,\n                shape (n, 4).\n            weight (Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n\n        Return:\n            Tensor: Loss tensor.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if (weight is not None) and (not torch.any(weight > 0)) and (\n                reduction != 'none'):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # iou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * iou_loss(\n            pred,\n            target,\n            weight,\n            mode=self.mode,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@MODELS.register_module()\nclass BoundedIoULoss(nn.Module):\n    \"\"\"BIoULoss.\n\n    This is an implementation of paper\n    `Improving Object Localization with Fitness NMS and Bounded IoU Loss.\n    <https://arxiv.org/abs/1711.00164>`_.\n\n    Args:\n        beta (float, optional): Beta parameter in smoothl1.\n        eps (float, optional): Epsilon to avoid NaN values.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n    \"\"\"\n\n    def __init__(self,\n                 beta: float = 0.2,\n                 eps: float = 1e-3,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.beta = beta\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n                shape (n, 4).\n            target (Tensor): The learning target of the prediction,\n                shape (n, 4).\n            weight (Optional[Tensor], optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (Optional[int], optional): Average factor that is used\n                to average the loss. Defaults to None.\n            reduction_override (Optional[str], optional): The reduction method\n                used to override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor: Loss tensor.\n        \"\"\"\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss = self.loss_weight * bounded_iou_loss(\n            pred,\n            target,\n            weight,\n            beta=self.beta,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@MODELS.register_module()\nclass GIoULoss(nn.Module):\n    r\"\"\"`Generalized Intersection over Union: A Metric and A Loss for Bounding\n    Box Regression <https://arxiv.org/abs/1902.09630>`_.\n\n    Args:\n        eps (float): Epsilon to avoid log(0).\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n    \"\"\"\n\n    def __init__(self,\n                 eps: float = 1e-6,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n                shape (n, 4).\n            target (Tensor): The learning target of the prediction,\n                shape (n, 4).\n            weight (Optional[Tensor], optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (Optional[int], optional): Average factor that is used\n                to average the loss. Defaults to None.\n            reduction_override (Optional[str], optional): The reduction method\n                used to override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor: Loss tensor.\n        \"\"\"\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # giou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * giou_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@MODELS.register_module()\nclass DIoULoss(nn.Module):\n    r\"\"\"Implementation of `Distance-IoU Loss: Faster and Better\n    Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_.\n\n    Code is modified from https://github.com/Zzh-tju/DIoU.\n\n    Args:\n        eps (float): Epsilon to avoid log(0).\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n    \"\"\"\n\n    def __init__(self,\n                 eps: float = 1e-6,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n                shape (n, 4).\n            target (Tensor): The learning target of the prediction,\n                shape (n, 4).\n            weight (Optional[Tensor], optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (Optional[int], optional): Average factor that is used\n                to average the loss. Defaults to None.\n            reduction_override (Optional[str], optional): The reduction method\n                used to override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor: Loss tensor.\n        \"\"\"\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # giou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * diou_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@MODELS.register_module()\nclass CIoULoss(nn.Module):\n    r\"\"\"`Implementation of paper `Enhancing Geometric Factors into\n    Model Learning and Inference for Object Detection and Instance\n    Segmentation <https://arxiv.org/abs/2005.03572>`_.\n\n    Code is modified from https://github.com/Zzh-tju/CIoU.\n\n    Args:\n        eps (float): Epsilon to avoid log(0).\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n    \"\"\"\n\n    def __init__(self,\n                 eps: float = 1e-6,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n                shape (n, 4).\n            target (Tensor): The learning target of the prediction,\n                shape (n, 4).\n            weight (Optional[Tensor], optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (Optional[int], optional): Average factor that is used\n                to average the loss. Defaults to None.\n            reduction_override (Optional[str], optional): The reduction method\n                used to override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor: Loss tensor.\n        \"\"\"\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # giou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * ciou_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@MODELS.register_module()\nclass EIoULoss(nn.Module):\n    r\"\"\"Implementation of paper `Extended-IoU Loss: A Systematic\n    IoU-Related Method: Beyond Simplified Regression for Better\n    Localization <https://ieeexplore.ieee.org/abstract/document/9429909>`_\n\n    Code is modified from https://github.com//ShiqiYu/libfacedetection.train.\n\n    Args:\n        eps (float): Epsilon to avoid log(0).\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n        smooth_point (float): hyperparameter, default is 0.1.\n    \"\"\"\n\n    def __init__(self,\n                 eps: float = 1e-6,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0,\n                 smooth_point: float = 0.1) -> None:\n        super().__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.smooth_point = smooth_point\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n                shape (n, 4).\n            target (Tensor): The learning target of the prediction,\n                shape (n, 4).\n            weight (Optional[Tensor], optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (Optional[int], optional): Average factor that is used\n                to average the loss. Defaults to None.\n            reduction_override (Optional[str], optional): The reduction method\n                used to override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor: Loss tensor.\n        \"\"\"\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * eiou_loss(\n            pred,\n            target,\n            weight,\n            smooth_point=self.smooth_point,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/kd_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss, weighted_loss\n\n\n@weighted_loss\ndef knowledge_distillation_kl_div_loss(pred,\n                                       soft_label,\n                                       T,\n                                       class_reduction='mean',\n                                       detach_target=True):\n    r\"\"\"Loss function for knowledge distilling using KL divergence.\n\n    Args:\n        pred (Tensor): Predicted logits with shape (N, n + 1).\n        soft_label (Tensor): Target logits with shape (N, N + 1).\n        T (int): Temperature for distillation.\n        detach_target (bool): Remove soft_label from automatic differentiation\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    assert pred.size() == soft_label.size()\n    target = F.softmax(soft_label / T, dim=1)\n    if detach_target:\n        target = target.detach()\n\n    kd_loss = F.kl_div(\n        F.log_softmax(pred / T, dim=1), target, reduction='none')\n    if class_reduction == 'mean':\n        kd_loss = kd_loss.mean(1)\n    elif class_reduction == 'sum':\n        kd_loss = kd_loss.sum(1)\n    else:\n        raise NotImplementedError\n    kd_loss = kd_loss * (T * T)\n    return kd_loss\n\n\ndef kd_quality_focal_loss(pred,\n                          target,\n                          weight=None,\n                          beta=1,\n                          reduction='mean',\n                          avg_factor=None):\n    num_classes = pred.size(1)\n    if weight is not None:\n        weight = weight[:, None].repeat(1, num_classes)\n\n    target = target.detach().sigmoid()\n    loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none')\n    focal_weight = torch.abs(pred.sigmoid() - target).pow(beta)\n    loss = loss * focal_weight\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@MODELS.register_module()\nclass KnowledgeDistillationKLDivLoss(nn.Module):\n    \"\"\"Loss function for knowledge distilling using KL divergence.\n\n    Args:\n        reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n        loss_weight (float): Loss weight of current loss.\n        T (int): Temperature for distillation.\n    \"\"\"\n\n    def __init__(self,\n                 class_reduction='mean',\n                 reduction='mean',\n                 loss_weight=1.0,\n                 T=10):\n        super(KnowledgeDistillationKLDivLoss, self).__init__()\n        assert T >= 1\n        self.class_reduction = class_reduction\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.T = T\n\n    def forward(self,\n                pred,\n                soft_label,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted logits with shape (N, n + 1).\n            soft_label (Tensor): Target logits with shape (N, N + 1).\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n\n        loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(\n            pred,\n            soft_label,\n            weight,\n            class_reduction=self.class_reduction,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            T=self.T)\n\n        return loss_kd\n\n\n@MODELS.register_module()\nclass KDQualityFocalLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 beta=1.0,\n                 reduction='mean',\n                 loss_weight=1.0):\n        super(KDQualityFocalLoss, self).__init__()\n        assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'\n        self.use_sigmoid = use_sigmoid\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): Predicted joint representation of\n                classification and quality (IoU) estimation with shape (N, C),\n                C is the number of classes.\n            target (tuple([torch.Tensor])): Target category label with shape\n                (N,) and target quality label with shape (N,).\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            loss = self.loss_weight * kd_quality_focal_loss(\n                pred,\n                target,\n                weight,\n                beta=self.beta,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        else:\n            raise NotImplementedError\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/mse_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .utils import weighted_loss\n\n\n@weighted_loss\ndef mse_loss(pred: Tensor, target: Tensor) -> Tensor:\n    \"\"\"A Wrapper of MSE loss.\n    Args:\n        pred (Tensor): The prediction.\n        target (Tensor): The learning target of the prediction.\n\n    Returns:\n        Tensor: loss Tensor\n    \"\"\"\n    return F.mse_loss(pred, target, reduction='none')\n\n\n@MODELS.register_module()\nclass MSELoss(nn.Module):\n    \"\"\"MSELoss.\n\n    Args:\n        reduction (str, optional): The method that reduces the loss to a\n            scalar. Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n    \"\"\"\n\n    def __init__(self,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None) -> Tensor:\n        \"\"\"Forward function of loss.\n\n        Args:\n            pred (Tensor): The prediction.\n            target (Tensor): The learning target of the prediction.\n            weight (Tensor, optional): Weight of the loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n\n        Returns:\n            Tensor: The calculated loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss = self.loss_weight * mse_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/pisa_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmdet.structures.bbox import bbox_overlaps\nfrom ..task_modules.coders import BaseBBoxCoder\nfrom ..task_modules.samplers import SamplingResult\n\n\ndef isr_p(cls_score: Tensor,\n          bbox_pred: Tensor,\n          bbox_targets: Tuple[Tensor],\n          rois: Tensor,\n          sampling_results: List[SamplingResult],\n          loss_cls: nn.Module,\n          bbox_coder: BaseBBoxCoder,\n          k: float = 2,\n          bias: float = 0,\n          num_class: int = 80) -> tuple:\n    \"\"\"Importance-based Sample Reweighting (ISR_P), positive part.\n\n    Args:\n        cls_score (Tensor): Predicted classification scores.\n        bbox_pred (Tensor): Predicted bbox deltas.\n        bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are\n            labels, label_weights, bbox_targets, bbox_weights, respectively.\n        rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs\n            (two_stage) in shape (n, 5).\n        sampling_results (:obj:`SamplingResult`): Sampling results.\n        loss_cls (:obj:`nn.Module`): Classification loss func of the head.\n        bbox_coder (:obj:`BaseBBoxCoder`): BBox coder of the head.\n        k (float): Power of the non-linear mapping. Defaults to 2.\n        bias (float): Shift of the non-linear mapping. Defaults to 0.\n        num_class (int): Number of classes, defaults to 80.\n\n    Return:\n        tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,\n            bbox_target_weights\n    \"\"\"\n\n    labels, label_weights, bbox_targets, bbox_weights = bbox_targets\n    pos_label_inds = ((labels >= 0) &\n                      (labels < num_class)).nonzero().reshape(-1)\n    pos_labels = labels[pos_label_inds]\n\n    # if no positive samples, return the original targets\n    num_pos = float(pos_label_inds.size(0))\n    if num_pos == 0:\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    # merge pos_assigned_gt_inds of per image to a single tensor\n    gts = list()\n    last_max_gt = 0\n    for i in range(len(sampling_results)):\n        gt_i = sampling_results[i].pos_assigned_gt_inds\n        gts.append(gt_i + last_max_gt)\n        if len(gt_i) != 0:\n            last_max_gt = gt_i.max() + 1\n    gts = torch.cat(gts)\n    assert len(gts) == num_pos\n\n    cls_score = cls_score.detach()\n    bbox_pred = bbox_pred.detach()\n\n    # For single stage detectors, rois here indicate anchors, in shape (N, 4)\n    # For two stage detectors, rois are in shape (N, 5)\n    if rois.size(-1) == 5:\n        pos_rois = rois[pos_label_inds][:, 1:]\n    else:\n        pos_rois = rois[pos_label_inds]\n\n    if bbox_pred.size(-1) > 4:\n        bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)\n        pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)\n    else:\n        pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)\n\n    # compute iou of the predicted bbox and the corresponding GT\n    pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)\n    pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)\n    target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)\n    ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)\n\n    pos_imp_weights = label_weights[pos_label_inds]\n    # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,\n    # then sorted again within the same-rank group\n    max_l_num = pos_labels.bincount().max()\n    for label in pos_labels.unique():\n        l_inds = (pos_labels == label).nonzero().view(-1)\n        l_gts = gts[l_inds]\n        for t in l_gts.unique():\n            t_inds = l_inds[l_gts == t]\n            t_ious = ious[t_inds]\n            _, t_iou_rank_idx = t_ious.sort(descending=True)\n            _, t_iou_rank = t_iou_rank_idx.sort()\n            ious[t_inds] += max_l_num - t_iou_rank.float()\n        l_ious = ious[l_inds]\n        _, l_iou_rank_idx = l_ious.sort(descending=True)\n        _, l_iou_rank = l_iou_rank_idx.sort()  # IoU-HLR\n        # linearly map HLR to label weights\n        pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num\n\n    pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)\n\n    # normalize to make the new weighted loss value equal to the original loss\n    pos_loss_cls = loss_cls(\n        cls_score[pos_label_inds], pos_labels, reduction_override='none')\n    if pos_loss_cls.dim() > 1:\n        ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,\n                                                                        None]\n        new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]\n    else:\n        ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]\n        new_pos_loss_cls = pos_loss_cls * pos_imp_weights\n    pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()\n    pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio\n    label_weights[pos_label_inds] = pos_imp_weights\n\n    bbox_targets = labels, label_weights, bbox_targets, bbox_weights\n    return bbox_targets\n\n\ndef carl_loss(cls_score: Tensor,\n              labels: Tensor,\n              bbox_pred: Tensor,\n              bbox_targets: Tensor,\n              loss_bbox: nn.Module,\n              k: float = 1,\n              bias: float = 0.2,\n              avg_factor: Optional[int] = None,\n              sigmoid: bool = False,\n              num_class: int = 80) -> dict:\n    \"\"\"Classification-Aware Regression Loss (CARL).\n\n    Args:\n        cls_score (Tensor): Predicted classification scores.\n        labels (Tensor): Targets of classification.\n        bbox_pred (Tensor): Predicted bbox deltas.\n        bbox_targets (Tensor): Target of bbox regression.\n        loss_bbox (func): Regression loss func of the head.\n        bbox_coder (obj): BBox coder of the head.\n        k (float): Power of the non-linear mapping. Defaults to 1.\n        bias (float): Shift of the non-linear mapping. Defaults to 0.2.\n        avg_factor (int, optional): Average factor used in regression loss.\n        sigmoid (bool): Activation of the classification score.\n        num_class (int): Number of classes, defaults to 80.\n\n    Return:\n        dict: CARL loss dict.\n    \"\"\"\n    pos_label_inds = ((labels >= 0) &\n                      (labels < num_class)).nonzero().reshape(-1)\n    if pos_label_inds.numel() == 0:\n        return dict(loss_carl=cls_score.sum()[None] * 0.)\n    pos_labels = labels[pos_label_inds]\n\n    # multiply pos_cls_score with the corresponding bbox weight\n    # and remain gradient\n    if sigmoid:\n        pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]\n    else:\n        pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]\n    carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)\n\n    # normalize carl_loss_weight to make its sum equal to num positive\n    num_pos = float(pos_cls_score.size(0))\n    weight_ratio = num_pos / carl_loss_weights.sum()\n    carl_loss_weights *= weight_ratio\n\n    if avg_factor is None:\n        avg_factor = bbox_targets.size(0)\n    # if is class agnostic, bbox pred is in shape (N, 4)\n    # otherwise, bbox pred is in shape (N, #classes, 4)\n    if bbox_pred.size(-1) > 4:\n        bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)\n        pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]\n    else:\n        pos_bbox_preds = bbox_pred[pos_label_inds]\n    ori_loss_reg = loss_bbox(\n        pos_bbox_preds,\n        bbox_targets[pos_label_inds],\n        reduction_override='none') / avg_factor\n    loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()\n    return dict(loss_carl=loss_carl[None])\n"
  },
  {
    "path": "mmdet/models/losses/pkd_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.registry import MODELS\nfrom .utils import weighted_loss\n\n\ndef norm(feat: torch.Tensor) -> torch.Tensor:\n    \"\"\"Normalize the feature maps to have zero mean and unit variances.\n\n    Args:\n        feat (torch.Tensor): The original feature map with shape\n            (N, C, H, W).\n    \"\"\"\n    assert len(feat.shape) == 4\n    N, C, H, W = feat.shape\n    feat = feat.permute(1, 0, 2, 3).reshape(C, -1)\n    mean = feat.mean(dim=-1, keepdim=True)\n    std = feat.std(dim=-1, keepdim=True)\n    feat = (feat - mean) / (std + 1e-6)\n    return feat.reshape(C, N, H, W).permute(1, 0, 2, 3)\n\n\n@weighted_loss\ndef pkd_loss(pred, target):\n    pred = norm(pred)\n    target = norm(target)\n    return F.mse_loss(pred, target, reduction='none') / 2\n\n\n@MODELS.register_module()\nclass PKDLoss(nn.Module):\n\n    def __init__(self, reduction='mean', loss_weight=1.0):\n        super(PKDLoss, self).__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None) -> torch.Tensor:\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss = self.loss_weight * pkd_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/seesaw_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .accuracy import accuracy\nfrom .cross_entropy_loss import cross_entropy\nfrom .utils import weight_reduce_loss\n\n\ndef seesaw_ce_loss(cls_score: Tensor,\n                   labels: Tensor,\n                   label_weights: Tensor,\n                   cum_samples: Tensor,\n                   num_classes: int,\n                   p: float,\n                   q: float,\n                   eps: float,\n                   reduction: str = 'mean',\n                   avg_factor: Optional[int] = None) -> Tensor:\n    \"\"\"Calculate the Seesaw CrossEntropy loss.\n\n    Args:\n        cls_score (Tensor): The prediction with shape (N, C),\n             C is the number of classes.\n        labels (Tensor): The learning label of the prediction.\n        label_weights (Tensor): Sample-wise loss weight.\n        cum_samples (Tensor): Cumulative samples for each category.\n        num_classes (int): The number of classes.\n        p (float): The ``p`` in the mitigation factor.\n        q (float): The ``q`` in the compenstation factor.\n        eps (float): The minimal value of divisor to smooth\n             the computation of compensation factor\n        reduction (str, optional): The method used to reduce the loss.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n\n    Returns:\n        Tensor: The calculated loss\n    \"\"\"\n    assert cls_score.size(-1) == num_classes\n    assert len(cum_samples) == num_classes\n\n    onehot_labels = F.one_hot(labels, num_classes)\n    seesaw_weights = cls_score.new_ones(onehot_labels.size())\n\n    # mitigation factor\n    if p > 0:\n        sample_ratio_matrix = cum_samples[None, :].clamp(\n            min=1) / cum_samples[:, None].clamp(min=1)\n        index = (sample_ratio_matrix < 1.0).float()\n        sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index)\n        mitigation_factor = sample_weights[labels.long(), :]\n        seesaw_weights = seesaw_weights * mitigation_factor\n\n    # compensation factor\n    if q > 0:\n        scores = F.softmax(cls_score.detach(), dim=1)\n        self_scores = scores[\n            torch.arange(0, len(scores)).to(scores.device).long(),\n            labels.long()]\n        score_matrix = scores / self_scores[:, None].clamp(min=eps)\n        index = (score_matrix > 1.0).float()\n        compensation_factor = score_matrix.pow(q) * index + (1 - index)\n        seesaw_weights = seesaw_weights * compensation_factor\n\n    cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))\n\n    loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')\n\n    if label_weights is not None:\n        label_weights = label_weights.float()\n    loss = weight_reduce_loss(\n        loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)\n    return loss\n\n\n@MODELS.register_module()\nclass SeesawLoss(nn.Module):\n    \"\"\"\n    Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)\n    arXiv: https://arxiv.org/abs/2008.10032\n\n    Args:\n        use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n             of softmax. Only False is supported.\n        p (float, optional): The ``p`` in the mitigation factor.\n             Defaults to 0.8.\n        q (float, optional): The ``q`` in the compenstation factor.\n             Defaults to 2.0.\n        num_classes (int, optional): The number of classes.\n             Default to 1203 for LVIS v1 dataset.\n        eps (float, optional): The minimal value of divisor to smooth\n             the computation of compensation factor\n        reduction (str, optional): The method that reduces the loss to a\n             scalar. Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n        return_dict (bool, optional): Whether return the losses as a dict.\n             Default to True.\n    \"\"\"\n\n    def __init__(self,\n                 use_sigmoid: bool = False,\n                 p: float = 0.8,\n                 q: float = 2.0,\n                 num_classes: int = 1203,\n                 eps: float = 1e-2,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0,\n                 return_dict: bool = True) -> None:\n        super().__init__()\n        assert not use_sigmoid\n        self.use_sigmoid = False\n        self.p = p\n        self.q = q\n        self.num_classes = num_classes\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.return_dict = return_dict\n\n        # 0 for pos, 1 for neg\n        self.cls_criterion = seesaw_ce_loss\n\n        # cumulative samples for each category\n        self.register_buffer(\n            'cum_samples',\n            torch.zeros(self.num_classes + 1, dtype=torch.float))\n\n        # custom output channels of the classifier\n        self.custom_cls_channels = True\n        # custom activation of cls_score\n        self.custom_activation = True\n        # custom accuracy of the classsifier\n        self.custom_accuracy = True\n\n    def _split_cls_score(self, cls_score: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"split cls_score.\n\n        Args:\n            cls_score (Tensor): The prediction with shape (N, C + 2).\n\n        Returns:\n            Tuple[Tensor, Tensor]: The score for classes and objectness,\n                 respectively\n        \"\"\"\n        # split cls_score to cls_score_classes and cls_score_objectness\n        assert cls_score.size(-1) == self.num_classes + 2\n        cls_score_classes = cls_score[..., :-2]\n        cls_score_objectness = cls_score[..., -2:]\n        return cls_score_classes, cls_score_objectness\n\n    def get_cls_channels(self, num_classes: int) -> int:\n        \"\"\"Get custom classification channels.\n\n        Args:\n            num_classes (int): The number of classes.\n\n        Returns:\n            int: The custom classification channels.\n        \"\"\"\n        assert num_classes == self.num_classes\n        return num_classes + 2\n\n    def get_activation(self, cls_score: Tensor) -> Tensor:\n        \"\"\"Get custom activation of cls_score.\n\n        Args:\n            cls_score (Tensor): The prediction with shape (N, C + 2).\n\n        Returns:\n            Tensor: The custom activation of cls_score with shape\n                 (N, C + 1).\n        \"\"\"\n        cls_score_classes, cls_score_objectness = self._split_cls_score(\n            cls_score)\n        score_classes = F.softmax(cls_score_classes, dim=-1)\n        score_objectness = F.softmax(cls_score_objectness, dim=-1)\n        score_pos = score_objectness[..., [0]]\n        score_neg = score_objectness[..., [1]]\n        score_classes = score_classes * score_pos\n        scores = torch.cat([score_classes, score_neg], dim=-1)\n        return scores\n\n    def get_accuracy(self, cls_score: Tensor,\n                     labels: Tensor) -> Dict[str, Tensor]:\n        \"\"\"Get custom accuracy w.r.t. cls_score and labels.\n\n        Args:\n            cls_score (Tensor): The prediction with shape (N, C + 2).\n            labels (Tensor): The learning label of the prediction.\n\n        Returns:\n            Dict [str, Tensor]: The accuracy for objectness and classes,\n                 respectively.\n        \"\"\"\n        pos_inds = labels < self.num_classes\n        obj_labels = (labels == self.num_classes).long()\n        cls_score_classes, cls_score_objectness = self._split_cls_score(\n            cls_score)\n        acc_objectness = accuracy(cls_score_objectness, obj_labels)\n        acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds])\n        acc = dict()\n        acc['acc_objectness'] = acc_objectness\n        acc['acc_classes'] = acc_classes\n        return acc\n\n    def forward(\n        self,\n        cls_score: Tensor,\n        labels: Tensor,\n        label_weights: Optional[Tensor] = None,\n        avg_factor: Optional[int] = None,\n        reduction_override: Optional[str] = None\n    ) -> Union[Tensor, Dict[str, Tensor]]:\n        \"\"\"Forward function.\n\n        Args:\n            cls_score (Tensor): The prediction with shape (N, C + 2).\n            labels (Tensor): The learning label of the prediction.\n            label_weights (Tensor, optional): Sample-wise loss weight.\n            avg_factor (int, optional): Average factor that is used to average\n                 the loss. Defaults to None.\n            reduction (str, optional): The method used to reduce the loss.\n                 Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor | Dict [str, Tensor]:\n                 if return_dict == False: The calculated loss |\n                 if return_dict == True: The dict of calculated losses\n                 for objectness and classes, respectively.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        assert cls_score.size(-1) == self.num_classes + 2\n        pos_inds = labels < self.num_classes\n        # 0 for pos, 1 for neg\n        obj_labels = (labels == self.num_classes).long()\n\n        # accumulate the samples for each category\n        unique_labels = labels.unique()\n        for u_l in unique_labels:\n            inds_ = labels == u_l.item()\n            self.cum_samples[u_l] += inds_.sum()\n\n        if label_weights is not None:\n            label_weights = label_weights.float()\n        else:\n            label_weights = labels.new_ones(labels.size(), dtype=torch.float)\n\n        cls_score_classes, cls_score_objectness = self._split_cls_score(\n            cls_score)\n        # calculate loss_cls_classes (only need pos samples)\n        if pos_inds.sum() > 0:\n            loss_cls_classes = self.loss_weight * self.cls_criterion(\n                cls_score_classes[pos_inds], labels[pos_inds],\n                label_weights[pos_inds], self.cum_samples[:self.num_classes],\n                self.num_classes, self.p, self.q, self.eps, reduction,\n                avg_factor)\n        else:\n            loss_cls_classes = cls_score_classes[pos_inds].sum()\n        # calculate loss_cls_objectness\n        loss_cls_objectness = self.loss_weight * cross_entropy(\n            cls_score_objectness, obj_labels, label_weights, reduction,\n            avg_factor)\n\n        if self.return_dict:\n            loss_cls = dict()\n            loss_cls['loss_cls_objectness'] = loss_cls_objectness\n            loss_cls['loss_cls_classes'] = loss_cls_classes\n        else:\n            loss_cls = loss_cls_classes + loss_cls_objectness\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/smooth_l1_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .utils import weighted_loss\n\n\n@weighted_loss\ndef smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:\n    \"\"\"Smooth L1 loss.\n\n    Args:\n        pred (Tensor): The prediction.\n        target (Tensor): The learning target of the prediction.\n        beta (float, optional): The threshold in the piecewise function.\n            Defaults to 1.0.\n\n    Returns:\n        Tensor: Calculated loss\n    \"\"\"\n    assert beta > 0\n    if target.numel() == 0:\n        return pred.sum() * 0\n\n    assert pred.size() == target.size()\n    diff = torch.abs(pred - target)\n    loss = torch.where(diff < beta, 0.5 * diff * diff / beta,\n                       diff - 0.5 * beta)\n    return loss\n\n\n@weighted_loss\ndef l1_loss(pred: Tensor, target: Tensor) -> Tensor:\n    \"\"\"L1 loss.\n\n    Args:\n        pred (Tensor): The prediction.\n        target (Tensor): The learning target of the prediction.\n\n    Returns:\n        Tensor: Calculated loss\n    \"\"\"\n    if target.numel() == 0:\n        return pred.sum() * 0\n\n    assert pred.size() == target.size()\n    loss = torch.abs(pred - target)\n    return loss\n\n\n@MODELS.register_module()\nclass SmoothL1Loss(nn.Module):\n    \"\"\"Smooth L1 loss.\n\n    Args:\n        beta (float, optional): The threshold in the piecewise function.\n            Defaults to 1.0.\n        reduction (str, optional): The method to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\". Defaults to \"mean\".\n        loss_weight (float, optional): The weight of loss.\n    \"\"\"\n\n    def __init__(self,\n                 beta: float = 1.0,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None,\n                **kwargs) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): The prediction.\n            target (Tensor): The learning target of the prediction.\n            weight (Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n\n        Returns:\n            Tensor: Calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_bbox = self.loss_weight * smooth_l1_loss(\n            pred,\n            target,\n            weight,\n            beta=self.beta,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss_bbox\n\n\n@MODELS.register_module()\nclass L1Loss(nn.Module):\n    \"\"\"L1 loss.\n\n    Args:\n        reduction (str, optional): The method to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of loss.\n    \"\"\"\n\n    def __init__(self,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        super().__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): The prediction.\n            target (Tensor): The learning target of the prediction.\n            weight (Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n\n        Returns:\n            Tensor: Calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_bbox = self.loss_weight * l1_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss_bbox\n"
  },
  {
    "path": "mmdet/models/losses/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport functools\nfrom typing import Callable, Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\ndef reduce_loss(loss: Tensor, reduction: str) -> Tensor:\n    \"\"\"Reduce loss as specified.\n\n    Args:\n        loss (Tensor): Elementwise loss tensor.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n\n    Return:\n        Tensor: Reduced loss tensor.\n    \"\"\"\n    reduction_enum = F._Reduction.get_enum(reduction)\n    # none: 0, elementwise_mean:1, sum: 2\n    if reduction_enum == 0:\n        return loss\n    elif reduction_enum == 1:\n        return loss.mean()\n    elif reduction_enum == 2:\n        return loss.sum()\n\n\ndef weight_reduce_loss(loss: Tensor,\n                       weight: Optional[Tensor] = None,\n                       reduction: str = 'mean',\n                       avg_factor: Optional[float] = None) -> Tensor:\n    \"\"\"Apply element-wise weight and reduce loss.\n\n    Args:\n        loss (Tensor): Element-wise loss.\n        weight (Optional[Tensor], optional): Element-wise weights.\n            Defaults to None.\n        reduction (str, optional): Same as built-in losses of PyTorch.\n            Defaults to 'mean'.\n        avg_factor (Optional[float], optional): Average factor when\n            computing the mean of losses. Defaults to None.\n\n    Returns:\n        Tensor: Processed loss values.\n    \"\"\"\n    # if weight is specified, apply element-wise weight\n    if weight is not None:\n        loss = loss * weight\n\n    # if avg_factor is not specified, just reduce the loss\n    if avg_factor is None:\n        loss = reduce_loss(loss, reduction)\n    else:\n        # if reduction is mean, then average the loss by avg_factor\n        if reduction == 'mean':\n            # Avoid causing ZeroDivisionError when avg_factor is 0.0,\n            # i.e., all labels of an image belong to ignore index.\n            eps = torch.finfo(torch.float32).eps\n            loss = loss.sum() / (avg_factor + eps)\n        # if reduction is 'none', then do nothing, otherwise raise an error\n        elif reduction != 'none':\n            raise ValueError('avg_factor can not be used with reduction=\"sum\"')\n    return loss\n\n\ndef weighted_loss(loss_func: Callable) -> Callable:\n    \"\"\"Create a weighted version of a given loss function.\n\n    To use this decorator, the loss function must have the signature like\n    `loss_func(pred, target, **kwargs)`. The function only needs to compute\n    element-wise loss without any reduction. This decorator will add weight\n    and reduction arguments to the function. The decorated function will have\n    the signature like `loss_func(pred, target, weight=None, reduction='mean',\n    avg_factor=None, **kwargs)`.\n\n    :Example:\n\n    >>> import torch\n    >>> @weighted_loss\n    >>> def l1_loss(pred, target):\n    >>>     return (pred - target).abs()\n\n    >>> pred = torch.Tensor([0, 2, 3])\n    >>> target = torch.Tensor([1, 1, 1])\n    >>> weight = torch.Tensor([1, 0, 1])\n\n    >>> l1_loss(pred, target)\n    tensor(1.3333)\n    >>> l1_loss(pred, target, weight)\n    tensor(1.)\n    >>> l1_loss(pred, target, reduction='none')\n    tensor([1., 1., 2.])\n    >>> l1_loss(pred, target, weight, avg_factor=2)\n    tensor(1.5000)\n    \"\"\"\n\n    @functools.wraps(loss_func)\n    def wrapper(pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                reduction: str = 'mean',\n                avg_factor: Optional[int] = None,\n                **kwargs) -> Tensor:\n        \"\"\"\n        Args:\n            pred (Tensor): The prediction.\n            target (Tensor): Target bboxes.\n            weight (Optional[Tensor], optional): The weight of loss for each\n                prediction. Defaults to None.\n            reduction (str, optional): Options are \"none\", \"mean\" and \"sum\".\n                Defaults to 'mean'.\n            avg_factor (Optional[int], optional): Average factor that is used\n                to average the loss. Defaults to None.\n\n        Returns:\n            Tensor: Loss tensor.\n        \"\"\"\n        # get element-wise loss\n        loss = loss_func(pred, target, **kwargs)\n        loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n        return loss\n\n    return wrapper\n"
  },
  {
    "path": "mmdet/models/losses/varifocal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .utils import weight_reduce_loss\n\n\ndef varifocal_loss(pred: Tensor,\n                   target: Tensor,\n                   weight: Optional[Tensor] = None,\n                   alpha: float = 0.75,\n                   gamma: float = 2.0,\n                   iou_weighted: bool = True,\n                   reduction: str = 'mean',\n                   avg_factor: Optional[int] = None) -> Tensor:\n    \"\"\"`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n    Args:\n        pred (Tensor): The prediction with shape (N, C), C is the\n            number of classes.\n        target (Tensor): The learning target of the iou-aware\n            classification score with shape (N, C), C is the number of classes.\n        weight (Tensor, optional): The weight of loss for each\n            prediction. Defaults to None.\n        alpha (float, optional): A balance factor for the negative part of\n            Varifocal Loss, which is different from the alpha of Focal Loss.\n            Defaults to 0.75.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        iou_weighted (bool, optional): Whether to weight the loss of the\n            positive example with the iou target. Defaults to True.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and\n            \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n\n    Returns:\n        Tensor: Loss tensor.\n    \"\"\"\n    # pred and target should be of the same size\n    assert pred.size() == target.size()\n    pred_sigmoid = pred.sigmoid()\n    target = target.type_as(pred)\n    if iou_weighted:\n        focal_weight = target * (target > 0.0).float() + \\\n            alpha * (pred_sigmoid - target).abs().pow(gamma) * \\\n            (target <= 0.0).float()\n    else:\n        focal_weight = (target > 0.0).float() + \\\n            alpha * (pred_sigmoid - target).abs().pow(gamma) * \\\n            (target <= 0.0).float()\n    loss = F.binary_cross_entropy_with_logits(\n        pred, target, reduction='none') * focal_weight\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@MODELS.register_module()\nclass VarifocalLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid: bool = True,\n                 alpha: float = 0.75,\n                 gamma: float = 2.0,\n                 iou_weighted: bool = True,\n                 reduction: str = 'mean',\n                 loss_weight: float = 1.0) -> None:\n        \"\"\"`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n        Args:\n            use_sigmoid (bool, optional): Whether the prediction is\n                used for sigmoid or softmax. Defaults to True.\n            alpha (float, optional): A balance factor for the negative part of\n                Varifocal Loss, which is different from the alpha of Focal\n                Loss. Defaults to 0.75.\n            gamma (float, optional): The gamma for calculating the modulating\n                factor. Defaults to 2.0.\n            iou_weighted (bool, optional): Whether to weight the loss of the\n                positive examples with the iou target. Defaults to True.\n            reduction (str, optional): The method used to reduce the loss into\n                a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and\n                \"sum\".\n            loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n        \"\"\"\n        super().__init__()\n        assert use_sigmoid is True, \\\n            'Only sigmoid varifocal loss supported now.'\n        assert alpha >= 0.0\n        self.use_sigmoid = use_sigmoid\n        self.alpha = alpha\n        self.gamma = gamma\n        self.iou_weighted = iou_weighted\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred: Tensor,\n                target: Tensor,\n                weight: Optional[Tensor] = None,\n                avg_factor: Optional[int] = None,\n                reduction_override: Optional[str] = None) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): The prediction with shape (N, C), C is the\n                number of classes.\n            target (Tensor): The learning target of the iou-aware\n                classification score with shape (N, C), C is\n                the number of classes.\n            weight (Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            loss_cls = self.loss_weight * varifocal_loss(\n                pred,\n                target,\n                weight,\n                alpha=self.alpha,\n                gamma=self.gamma,\n                iou_weighted=self.iou_weighted,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        else:\n            raise NotImplementedError\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/necks/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bfp import BFP\nfrom .channel_mapper import ChannelMapper\nfrom .cspnext_pafpn import CSPNeXtPAFPN\nfrom .ct_resnet_neck import CTResNetNeck\nfrom .dilated_encoder import DilatedEncoder\nfrom .dyhead import DyHead\nfrom .fpg import FPG\nfrom .fpn import FPN\nfrom .fpn_carafe import FPN_CARAFE\nfrom .hrfpn import HRFPN\nfrom .nas_fpn import NASFPN\nfrom .nasfcos_fpn import NASFCOS_FPN\nfrom .pafpn import PAFPN\nfrom .rfp import RFP\nfrom .ssd_neck import SSDNeck\nfrom .ssh import SSH\nfrom .yolo_neck import YOLOV3Neck\nfrom .yolox_pafpn import YOLOXPAFPN\n\n__all__ = [\n    'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',\n    'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',\n    'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH'\n]\n"
  },
  {
    "path": "mmdet/models/necks/bfp.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.cnn.bricks import NonLocal2d\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\n\n\n@MODELS.register_module()\nclass BFP(BaseModule):\n    \"\"\"BFP (Balanced Feature Pyramids)\n\n    BFP takes multi-level features as inputs and gather them into a single one,\n    then refine the gathered feature and scatter the refined results to\n    multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n    the paper `Libra R-CNN: Towards Balanced Learning for Object Detection\n    <https://arxiv.org/abs/1904.02701>`_ for details.\n\n    Args:\n        in_channels (int): Number of input channels (feature maps of all levels\n            should have the same channels).\n        num_levels (int): Number of input feature levels.\n        refine_level (int): Index of integration and refine level of BSF in\n            multi-level features from bottom to top.\n        refine_type (str): Type of the refine op, currently support\n            [None, 'conv', 'non_local'].\n        conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict for\n            convolution layers.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): The config dict for\n            normalization layers.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or\n            dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        num_levels: int,\n        refine_level: int = 2,\n        refine_type: str = None,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        init_cfg: OptMultiConfig = dict(\n            type='Xavier', layer='Conv2d', distribution='uniform')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert refine_type in [None, 'conv', 'non_local']\n\n        self.in_channels = in_channels\n        self.num_levels = num_levels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        self.refine_level = refine_level\n        self.refine_type = refine_type\n        assert 0 <= self.refine_level < self.num_levels\n\n        if self.refine_type == 'conv':\n            self.refine = ConvModule(\n                self.in_channels,\n                self.in_channels,\n                3,\n                padding=1,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n        elif self.refine_type == 'non_local':\n            self.refine = NonLocal2d(\n                self.in_channels,\n                reduction=1,\n                use_scale=False,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n\n    def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == self.num_levels\n\n        # step 1: gather multi-level features by resize and average\n        feats = []\n        gather_size = inputs[self.refine_level].size()[2:]\n        for i in range(self.num_levels):\n            if i < self.refine_level:\n                gathered = F.adaptive_max_pool2d(\n                    inputs[i], output_size=gather_size)\n            else:\n                gathered = F.interpolate(\n                    inputs[i], size=gather_size, mode='nearest')\n            feats.append(gathered)\n\n        bsf = sum(feats) / len(feats)\n\n        # step 2: refine gathered features\n        if self.refine_type is not None:\n            bsf = self.refine(bsf)\n\n        # step 3: scatter refined features to multi-levels by a residual path\n        outs = []\n        for i in range(self.num_levels):\n            out_size = inputs[i].size()[2:]\n            if i < self.refine_level:\n                residual = F.interpolate(bsf, size=out_size, mode='nearest')\n            else:\n                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)\n            outs.append(residual + inputs[i])\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/channel_mapper.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\n\n\n@MODELS.register_module()\nclass ChannelMapper(BaseModule):\n    \"\"\"Channel Mapper to reduce/increase channels of backbone features.\n\n    This is used to reduce/increase channels of backbone features.\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale).\n        kernel_size (int, optional): kernel_size for reducing channels (used\n            at each scale). Default: 3.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Default: None.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            normalization layer. Default: None.\n        act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            activation layer in ConvModule. Default: dict(type='ReLU').\n        num_outs (int, optional): Number of output feature maps. There would\n            be extra_convs when num_outs larger than the length of in_channels.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict],\n            optional): Initialization config dict.\n    Example:\n        >>> import torch\n        >>> in_channels = [2, 3, 5, 7]\n        >>> scales = [340, 170, 84, 43]\n        >>> inputs = [torch.rand(1, c, s, s)\n        ...           for c, s in zip(in_channels, scales)]\n        >>> self = ChannelMapper(in_channels, 11, 3).eval()\n        >>> outputs = self.forward(inputs)\n        >>> for i in range(len(outputs)):\n        ...     print(f'outputs[{i}].shape = {outputs[i].shape}')\n        outputs[0].shape = torch.Size([1, 11, 340, 340])\n        outputs[1].shape = torch.Size([1, 11, 170, 170])\n        outputs[2].shape = torch.Size([1, 11, 84, 84])\n        outputs[3].shape = torch.Size([1, 11, 43, 43])\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: List[int],\n        out_channels: int,\n        kernel_size: int = 3,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        act_cfg: OptConfigType = dict(type='ReLU'),\n        num_outs: int = None,\n        init_cfg: OptMultiConfig = dict(\n            type='Xavier', layer='Conv2d', distribution='uniform')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert isinstance(in_channels, list)\n        self.extra_convs = None\n        if num_outs is None:\n            num_outs = len(in_channels)\n        self.convs = nn.ModuleList()\n        for in_channel in in_channels:\n            self.convs.append(\n                ConvModule(\n                    in_channel,\n                    out_channels,\n                    kernel_size,\n                    padding=(kernel_size - 1) // 2,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n        if num_outs > len(in_channels):\n            self.extra_convs = nn.ModuleList()\n            for i in range(len(in_channels), num_outs):\n                if i == len(in_channels):\n                    in_channel = in_channels[-1]\n                else:\n                    in_channel = out_channels\n                self.extra_convs.append(\n                    ConvModule(\n                        in_channel,\n                        out_channels,\n                        3,\n                        stride=2,\n                        padding=1,\n                        conv_cfg=conv_cfg,\n                        norm_cfg=norm_cfg,\n                        act_cfg=act_cfg))\n\n    def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.convs)\n        outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]\n        if self.extra_convs:\n            for i in range(len(self.extra_convs)):\n                if i == 0:\n                    outs.append(self.extra_convs[0](inputs[-1]))\n                else:\n                    outs.append(self.extra_convs[i](outs[-1]))\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/cspnext_pafpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptMultiConfig\nfrom ..layers import CSPLayer\n\n\n@MODELS.register_module()\nclass CSPNeXtPAFPN(BaseModule):\n    \"\"\"Path Aggregation Network with CSPNeXt blocks.\n\n    Args:\n        in_channels (Sequence[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_csp_blocks (int): Number of bottlenecks in CSPLayer.\n            Defaults to 3.\n        use_depthwise (bool): Whether to use depthwise separable convolution in\n            blocks. Defaults to False.\n        expand_ratio (float): Ratio to adjust the number of channels of the\n            hidden layer. Default: 0.5\n        upsample_cfg (dict): Config dict for interpolate layer.\n            Default: `dict(scale_factor=2, mode='nearest')`\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN')\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish')\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: Sequence[int],\n        out_channels: int,\n        num_csp_blocks: int = 3,\n        use_depthwise: bool = False,\n        expand_ratio: float = 0.5,\n        upsample_cfg: ConfigType = dict(scale_factor=2, mode='nearest'),\n        conv_cfg: bool = None,\n        norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),\n        act_cfg: ConfigType = dict(type='Swish'),\n        init_cfg: OptMultiConfig = dict(\n            type='Kaiming',\n            layer='Conv2d',\n            a=math.sqrt(5),\n            distribution='uniform',\n            mode='fan_in',\n            nonlinearity='leaky_relu')\n    ) -> None:\n        super().__init__(init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n\n        # build top-down blocks\n        self.upsample = nn.Upsample(**upsample_cfg)\n        self.reduce_layers = nn.ModuleList()\n        self.top_down_blocks = nn.ModuleList()\n        for idx in range(len(in_channels) - 1, 0, -1):\n            self.reduce_layers.append(\n                ConvModule(\n                    in_channels[idx],\n                    in_channels[idx - 1],\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.top_down_blocks.append(\n                CSPLayer(\n                    in_channels[idx - 1] * 2,\n                    in_channels[idx - 1],\n                    num_blocks=num_csp_blocks,\n                    add_identity=False,\n                    use_depthwise=use_depthwise,\n                    use_cspnext_block=True,\n                    expand_ratio=expand_ratio,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n        # build bottom-up blocks\n        self.downsamples = nn.ModuleList()\n        self.bottom_up_blocks = nn.ModuleList()\n        for idx in range(len(in_channels) - 1):\n            self.downsamples.append(\n                conv(\n                    in_channels[idx],\n                    in_channels[idx],\n                    3,\n                    stride=2,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.bottom_up_blocks.append(\n                CSPLayer(\n                    in_channels[idx] * 2,\n                    in_channels[idx + 1],\n                    num_blocks=num_csp_blocks,\n                    add_identity=False,\n                    use_depthwise=use_depthwise,\n                    use_cspnext_block=True,\n                    expand_ratio=expand_ratio,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n        self.out_convs = nn.ModuleList()\n        for i in range(len(in_channels)):\n            self.out_convs.append(\n                conv(\n                    in_channels[i],\n                    out_channels,\n                    3,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n    def forward(self, inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:\n        \"\"\"\n        Args:\n            inputs (tuple[Tensor]): input features.\n\n        Returns:\n            tuple[Tensor]: YOLOXPAFPN features.\n        \"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # top-down path\n        inner_outs = [inputs[-1]]\n        for idx in range(len(self.in_channels) - 1, 0, -1):\n            feat_heigh = inner_outs[0]\n            feat_low = inputs[idx - 1]\n            feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](\n                feat_heigh)\n            inner_outs[0] = feat_heigh\n\n            upsample_feat = self.upsample(feat_heigh)\n\n            inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](\n                torch.cat([upsample_feat, feat_low], 1))\n            inner_outs.insert(0, inner_out)\n\n        # bottom-up path\n        outs = [inner_outs[0]]\n        for idx in range(len(self.in_channels) - 1):\n            feat_low = outs[-1]\n            feat_height = inner_outs[idx + 1]\n            downsample_feat = self.downsamples[idx](feat_low)\n            out = self.bottom_up_blocks[idx](\n                torch.cat([downsample_feat, feat_height], 1))\n            outs.append(out)\n\n        # out convs\n        for idx, conv in enumerate(self.out_convs):\n            outs[idx] = conv(outs[idx])\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/ct_resnet_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptMultiConfig\n\n\n@MODELS.register_module()\nclass CTResNetNeck(BaseModule):\n    \"\"\"The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for\n    object classification and box regression.\n\n    Args:\n         in_channels (int): Number of input channels.\n         num_deconv_filters (tuple[int]): Number of filters per stage.\n         num_deconv_kernels (tuple[int]): Number of kernels per stage.\n         use_dcn (bool): If True, use DCNv2. Defaults to True.\n         init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n             list[:obj:`ConfigDict`], optional): Initialization\n             config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 num_deconv_filters: Tuple[int, ...],\n                 num_deconv_kernels: Tuple[int, ...],\n                 use_dcn: bool = True,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert len(num_deconv_filters) == len(num_deconv_kernels)\n        self.fp16_enabled = False\n        self.use_dcn = use_dcn\n        self.in_channels = in_channels\n        self.deconv_layers = self._make_deconv_layer(num_deconv_filters,\n                                                     num_deconv_kernels)\n\n    def _make_deconv_layer(\n            self, num_deconv_filters: Tuple[int, ...],\n            num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:\n        \"\"\"use deconv layers to upsample backbone's output.\"\"\"\n        layers = []\n        for i in range(len(num_deconv_filters)):\n            feat_channels = num_deconv_filters[i]\n            conv_module = ConvModule(\n                self.in_channels,\n                feat_channels,\n                3,\n                padding=1,\n                conv_cfg=dict(type='DCNv2') if self.use_dcn else None,\n                norm_cfg=dict(type='BN'))\n            layers.append(conv_module)\n            upsample_module = ConvModule(\n                feat_channels,\n                feat_channels,\n                num_deconv_kernels[i],\n                stride=2,\n                padding=1,\n                conv_cfg=dict(type='deconv'),\n                norm_cfg=dict(type='BN'))\n            layers.append(upsample_module)\n            self.in_channels = feat_channels\n\n        return nn.Sequential(*layers)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize the parameters.\"\"\"\n        for m in self.modules():\n            if isinstance(m, nn.ConvTranspose2d):\n                # In order to be consistent with the source code,\n                # reset the ConvTranspose2d initialization parameters\n                m.reset_parameters()\n                # Simulated bilinear upsampling kernel\n                w = m.weight.data\n                f = math.ceil(w.size(2) / 2)\n                c = (2 * f - 1 - f % 2) / (2. * f)\n                for i in range(w.size(2)):\n                    for j in range(w.size(3)):\n                        w[0, 0, i, j] = \\\n                            (1 - math.fabs(i / f - c)) * (\n                                    1 - math.fabs(j / f - c))\n                for c in range(1, w.size(0)):\n                    w[c, 0, :, :] = w[0, 0, :, :]\n            elif isinstance(m, nn.BatchNorm2d):\n                nn.init.constant_(m.weight, 1)\n                nn.init.constant_(m.bias, 0)\n            # self.use_dcn is False\n            elif not self.use_dcn and isinstance(m, nn.Conv2d):\n                # In order to be consistent with the source code,\n                # reset the Conv2d initialization parameters\n                m.reset_parameters()\n\n    def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:\n        \"\"\"model forward.\"\"\"\n        assert isinstance(x, (list, tuple))\n        outs = self.deconv_layers(x[-1])\n        return outs,\n"
  },
  {
    "path": "mmdet/models/necks/dilated_encoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, is_norm\nfrom mmengine.model import caffe2_xavier_init, constant_init, normal_init\nfrom torch.nn import BatchNorm2d\n\nfrom mmdet.registry import MODELS\n\n\nclass Bottleneck(nn.Module):\n    \"\"\"Bottleneck block for DilatedEncoder used in `YOLOF.\n\n    <https://arxiv.org/abs/2103.09460>`.\n\n    The Bottleneck contains three ConvLayers and one residual connection.\n\n    Args:\n        in_channels (int): The number of input channels.\n        mid_channels (int): The number of middle output channels.\n        dilation (int): Dilation rate.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 mid_channels,\n                 dilation,\n                 norm_cfg=dict(type='BN', requires_grad=True)):\n        super(Bottleneck, self).__init__()\n        self.conv1 = ConvModule(\n            in_channels, mid_channels, 1, norm_cfg=norm_cfg)\n        self.conv2 = ConvModule(\n            mid_channels,\n            mid_channels,\n            3,\n            padding=dilation,\n            dilation=dilation,\n            norm_cfg=norm_cfg)\n        self.conv3 = ConvModule(\n            mid_channels, in_channels, 1, norm_cfg=norm_cfg)\n\n    def forward(self, x):\n        identity = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n        out = self.conv3(out)\n        out = out + identity\n        return out\n\n\n@MODELS.register_module()\nclass DilatedEncoder(nn.Module):\n    \"\"\"Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.\n\n    This module contains two types of components:\n        - the original FPN lateral convolution layer and fpn convolution layer,\n              which are 1x1 conv + 3x3 conv\n        - the dilated residual block\n\n    Args:\n        in_channels (int): The number of input channels.\n        out_channels (int): The number of output channels.\n        block_mid_channels (int): The number of middle block output channels\n        num_residual_blocks (int): The number of residual blocks.\n        block_dilations (list): The list of residual blocks dilation.\n    \"\"\"\n\n    def __init__(self, in_channels, out_channels, block_mid_channels,\n                 num_residual_blocks, block_dilations):\n        super(DilatedEncoder, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.block_mid_channels = block_mid_channels\n        self.num_residual_blocks = num_residual_blocks\n        self.block_dilations = block_dilations\n        self._init_layers()\n\n    def _init_layers(self):\n        self.lateral_conv = nn.Conv2d(\n            self.in_channels, self.out_channels, kernel_size=1)\n        self.lateral_norm = BatchNorm2d(self.out_channels)\n        self.fpn_conv = nn.Conv2d(\n            self.out_channels, self.out_channels, kernel_size=3, padding=1)\n        self.fpn_norm = BatchNorm2d(self.out_channels)\n        encoder_blocks = []\n        for i in range(self.num_residual_blocks):\n            dilation = self.block_dilations[i]\n            encoder_blocks.append(\n                Bottleneck(\n                    self.out_channels,\n                    self.block_mid_channels,\n                    dilation=dilation))\n        self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)\n\n    def init_weights(self):\n        caffe2_xavier_init(self.lateral_conv)\n        caffe2_xavier_init(self.fpn_conv)\n        for m in [self.lateral_norm, self.fpn_norm]:\n            constant_init(m, 1)\n        for m in self.dilated_encoder_blocks.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n\n    def forward(self, feature):\n        out = self.lateral_norm(self.lateral_conv(feature[-1]))\n        out = self.fpn_norm(self.fpn_conv(out))\n        return self.dilated_encoder_blocks(out),\n"
  },
  {
    "path": "mmdet/models/necks/dyhead.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import build_activation_layer, build_norm_layer\nfrom mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d\nfrom mmengine.model import BaseModule, constant_init, normal_init\n\nfrom mmdet.registry import MODELS\nfrom ..layers import DyReLU\n\n# Reference:\n# https://github.com/microsoft/DynamicHead\n# https://github.com/jshilong/SEPC\n\n\nclass DyDCNv2(nn.Module):\n    \"\"\"ModulatedDeformConv2d with normalization layer used in DyHead.\n\n    This module cannot be configured with `conv_cfg=dict(type='DCNv2')`\n    because DyHead calculates offset and mask from middle-level feature.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        stride (int | tuple[int], optional): Stride of the convolution.\n            Default: 1.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: dict(type='GN', num_groups=16, requires_grad=True).\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 stride=1,\n                 norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)):\n        super().__init__()\n        self.with_norm = norm_cfg is not None\n        bias = not self.with_norm\n        self.conv = ModulatedDeformConv2d(\n            in_channels, out_channels, 3, stride=stride, padding=1, bias=bias)\n        if self.with_norm:\n            self.norm = build_norm_layer(norm_cfg, out_channels)[1]\n\n    def forward(self, x, offset, mask):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv(x.contiguous(), offset, mask)\n        if self.with_norm:\n            x = self.norm(x)\n        return x\n\n\nclass DyHeadBlock(nn.Module):\n    \"\"\"DyHead Block with three types of attention.\n\n    HSigmoid arguments in default act_cfg follow official code, not paper.\n    https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        zero_init_offset (bool, optional): Whether to use zero init for\n            `spatial_conv_offset`. Default: True.\n        act_cfg (dict, optional): Config dict for the last activation layer of\n            scale-aware attention. Default: dict(type='HSigmoid', bias=3.0,\n            divisor=6.0).\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 zero_init_offset=True,\n                 act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)):\n        super().__init__()\n        self.zero_init_offset = zero_init_offset\n        # (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x\n        self.offset_and_mask_dim = 3 * 3 * 3\n        self.offset_dim = 2 * 3 * 3\n\n        self.spatial_conv_high = DyDCNv2(in_channels, out_channels)\n        self.spatial_conv_mid = DyDCNv2(in_channels, out_channels)\n        self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2)\n        self.spatial_conv_offset = nn.Conv2d(\n            in_channels, self.offset_and_mask_dim, 3, padding=1)\n        self.scale_attn_module = nn.Sequential(\n            nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1),\n            nn.ReLU(inplace=True), build_activation_layer(act_cfg))\n        self.task_attn_module = DyReLU(out_channels)\n        self._init_weights()\n\n    def _init_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, 0, 0.01)\n        if self.zero_init_offset:\n            constant_init(self.spatial_conv_offset, 0)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        outs = []\n        for level in range(len(x)):\n            # calculate offset and mask of DCNv2 from middle-level feature\n            offset_and_mask = self.spatial_conv_offset(x[level])\n            offset = offset_and_mask[:, :self.offset_dim, :, :]\n            mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid()\n\n            mid_feat = self.spatial_conv_mid(x[level], offset, mask)\n            sum_feat = mid_feat * self.scale_attn_module(mid_feat)\n            summed_levels = 1\n            if level > 0:\n                low_feat = self.spatial_conv_low(x[level - 1], offset, mask)\n                sum_feat += low_feat * self.scale_attn_module(low_feat)\n                summed_levels += 1\n            if level < len(x) - 1:\n                # this upsample order is weird, but faster than natural order\n                # https://github.com/microsoft/DynamicHead/issues/25\n                high_feat = F.interpolate(\n                    self.spatial_conv_high(x[level + 1], offset, mask),\n                    size=x[level].shape[-2:],\n                    mode='bilinear',\n                    align_corners=True)\n                sum_feat += high_feat * self.scale_attn_module(high_feat)\n                summed_levels += 1\n            outs.append(self.task_attn_module(sum_feat / summed_levels))\n\n        return outs\n\n\n@MODELS.register_module()\nclass DyHead(BaseModule):\n    \"\"\"DyHead neck consisting of multiple DyHead Blocks.\n\n    See `Dynamic Head: Unifying Object Detection Heads with Attentions\n    <https://arxiv.org/abs/2106.08322>`_ for details.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        num_blocks (int, optional): Number of DyHead Blocks. Default: 6.\n        zero_init_offset (bool, optional): Whether to use zero init for\n            `spatial_conv_offset`. Default: True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_blocks=6,\n                 zero_init_offset=True,\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_blocks = num_blocks\n        self.zero_init_offset = zero_init_offset\n\n        dyhead_blocks = []\n        for i in range(num_blocks):\n            in_channels = self.in_channels if i == 0 else self.out_channels\n            dyhead_blocks.append(\n                DyHeadBlock(\n                    in_channels,\n                    self.out_channels,\n                    zero_init_offset=zero_init_offset))\n        self.dyhead_blocks = nn.Sequential(*dyhead_blocks)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert isinstance(inputs, (tuple, list))\n        outs = self.dyhead_blocks(inputs)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/fpg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\n\n\nclass Transition(BaseModule):\n    \"\"\"Base class for transition.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n    \"\"\"\n\n    def __init__(self, in_channels, out_channels, init_cfg=None):\n        super().__init__(init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n    def forward(x):\n        pass\n\n\nclass UpInterpolationConv(Transition):\n    \"\"\"A transition used for up-sampling.\n\n    Up-sample the input by interpolation then refines the feature by\n    a convolution layer.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        scale_factor (int): Up-sampling factor. Default: 2.\n        mode (int): Interpolation mode. Default: nearest.\n        align_corners (bool): Whether align corners when interpolation.\n            Default: None.\n        kernel_size (int): Kernel size for the conv. Default: 3.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 scale_factor=2,\n                 mode='nearest',\n                 align_corners=None,\n                 kernel_size=3,\n                 init_cfg=None,\n                 **kwargs):\n        super().__init__(in_channels, out_channels, init_cfg)\n        self.mode = mode\n        self.scale_factor = scale_factor\n        self.align_corners = align_corners\n        self.conv = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size,\n            padding=(kernel_size - 1) // 2,\n            **kwargs)\n\n    def forward(self, x):\n        x = F.interpolate(\n            x,\n            scale_factor=self.scale_factor,\n            mode=self.mode,\n            align_corners=self.align_corners)\n        x = self.conv(x)\n        return x\n\n\nclass LastConv(Transition):\n    \"\"\"A transition used for refining the output of the last stage.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        num_inputs (int): Number of inputs of the FPN features.\n        kernel_size (int): Kernel size for the conv. Default: 3.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_inputs,\n                 kernel_size=3,\n                 init_cfg=None,\n                 **kwargs):\n        super().__init__(in_channels, out_channels, init_cfg)\n        self.num_inputs = num_inputs\n        self.conv_out = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size,\n            padding=(kernel_size - 1) // 2,\n            **kwargs)\n\n    def forward(self, inputs):\n        assert len(inputs) == self.num_inputs\n        return self.conv_out(inputs[-1])\n\n\n@MODELS.register_module()\nclass FPG(BaseModule):\n    \"\"\"FPG.\n\n    Implementation of `Feature Pyramid Grids (FPG)\n    <https://arxiv.org/abs/2004.03580>`_.\n    This implementation only gives the basic structure stated in the paper.\n    But users can implement different type of transitions to fully explore the\n    the potential power of the structure of FPG.\n\n    Args:\n        in_channels (int): Number of input channels (feature maps of all levels\n            should have the same channels).\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        stack_times (int): The number of times the pyramid architecture will\n            be stacked.\n        paths (list[str]): Specify the path order of each stack level.\n            Each element in the list should be either 'bu' (bottom-up) or\n            'td' (top-down).\n        inter_channels (int): Number of inter channels.\n        same_up_trans (dict): Transition that goes down at the same stage.\n        same_down_trans (dict): Transition that goes up at the same stage.\n        across_lateral_trans (dict): Across-pathway same-stage\n        across_down_trans (dict): Across-pathway bottom-up connection.\n        across_up_trans (dict): Across-pathway top-down connection.\n        across_skip_trans (dict): Across-pathway skip connection.\n        output_trans (dict): Transition that trans the output of the\n            last stage.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool): It decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, its actual mode is specified by `extra_convs_on_inputs`.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    transition_types = {\n        'conv': ConvModule,\n        'interpolation_conv': UpInterpolationConv,\n        'last_conv': LastConv,\n    }\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 stack_times,\n                 paths,\n                 inter_channels=None,\n                 same_down_trans=None,\n                 same_up_trans=dict(\n                     type='conv', kernel_size=3, stride=2, padding=1),\n                 across_lateral_trans=dict(type='conv', kernel_size=1),\n                 across_down_trans=dict(type='conv', kernel_size=3),\n                 across_up_trans=None,\n                 across_skip_trans=dict(type='identity'),\n                 output_trans=dict(type='last_conv', kernel_size=3),\n                 start_level=0,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 norm_cfg=None,\n                 skip_inds=None,\n                 init_cfg=[\n                     dict(type='Caffe2Xavier', layer='Conv2d'),\n                     dict(\n                         type='Constant',\n                         layer=[\n                             '_BatchNorm', '_InstanceNorm', 'GroupNorm',\n                             'LayerNorm'\n                         ],\n                         val=1.0)\n                 ]):\n        super(FPG, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        if inter_channels is None:\n            self.inter_channels = [out_channels for _ in range(num_outs)]\n        elif isinstance(inter_channels, int):\n            self.inter_channels = [inter_channels for _ in range(num_outs)]\n        else:\n            assert isinstance(inter_channels, list)\n            assert len(inter_channels) == num_outs\n            self.inter_channels = inter_channels\n        self.stack_times = stack_times\n        self.paths = paths\n        assert isinstance(paths, list) and len(paths) == stack_times\n        for d in paths:\n            assert d in ('bu', 'td')\n\n        self.same_down_trans = same_down_trans\n        self.same_up_trans = same_up_trans\n        self.across_lateral_trans = across_lateral_trans\n        self.across_down_trans = across_down_trans\n        self.across_up_trans = across_up_trans\n        self.output_trans = output_trans\n        self.across_skip_trans = across_skip_trans\n\n        self.with_bias = norm_cfg is None\n        # skip inds must be specified if across skip trans is not None\n        if self.across_skip_trans is not None:\n            skip_inds is not None\n        self.skip_inds = skip_inds\n        assert len(self.skip_inds[0]) <= self.stack_times\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n\n        # build lateral 1x1 convs to reduce channels\n        self.lateral_convs = nn.ModuleList()\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = nn.Conv2d(self.in_channels[i],\n                               self.inter_channels[i - self.start_level], 1)\n            self.lateral_convs.append(l_conv)\n\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n        self.extra_downsamples = nn.ModuleList()\n        for i in range(extra_levels):\n            if self.add_extra_convs:\n                fpn_idx = self.backbone_end_level - self.start_level + i\n                extra_conv = nn.Conv2d(\n                    self.inter_channels[fpn_idx - 1],\n                    self.inter_channels[fpn_idx],\n                    3,\n                    stride=2,\n                    padding=1)\n                self.extra_downsamples.append(extra_conv)\n            else:\n                self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))\n\n        self.fpn_transitions = nn.ModuleList()  # stack times\n        for s in range(self.stack_times):\n            stage_trans = nn.ModuleList()  # num of feature levels\n            for i in range(self.num_outs):\n                # same, across_lateral, across_down, across_up\n                trans = nn.ModuleDict()\n                if s in self.skip_inds[i]:\n                    stage_trans.append(trans)\n                    continue\n                # build same-stage down trans (used in bottom-up paths)\n                if i == 0 or self.same_up_trans is None:\n                    same_up_trans = None\n                else:\n                    same_up_trans = self.build_trans(\n                        self.same_up_trans, self.inter_channels[i - 1],\n                        self.inter_channels[i])\n                trans['same_up'] = same_up_trans\n                # build same-stage up trans (used in top-down paths)\n                if i == self.num_outs - 1 or self.same_down_trans is None:\n                    same_down_trans = None\n                else:\n                    same_down_trans = self.build_trans(\n                        self.same_down_trans, self.inter_channels[i + 1],\n                        self.inter_channels[i])\n                trans['same_down'] = same_down_trans\n                # build across lateral trans\n                across_lateral_trans = self.build_trans(\n                    self.across_lateral_trans, self.inter_channels[i],\n                    self.inter_channels[i])\n                trans['across_lateral'] = across_lateral_trans\n                # build across down trans\n                if i == self.num_outs - 1 or self.across_down_trans is None:\n                    across_down_trans = None\n                else:\n                    across_down_trans = self.build_trans(\n                        self.across_down_trans, self.inter_channels[i + 1],\n                        self.inter_channels[i])\n                trans['across_down'] = across_down_trans\n                # build across up trans\n                if i == 0 or self.across_up_trans is None:\n                    across_up_trans = None\n                else:\n                    across_up_trans = self.build_trans(\n                        self.across_up_trans, self.inter_channels[i - 1],\n                        self.inter_channels[i])\n                trans['across_up'] = across_up_trans\n                if self.across_skip_trans is None:\n                    across_skip_trans = None\n                else:\n                    across_skip_trans = self.build_trans(\n                        self.across_skip_trans, self.inter_channels[i - 1],\n                        self.inter_channels[i])\n                trans['across_skip'] = across_skip_trans\n                # build across_skip trans\n                stage_trans.append(trans)\n            self.fpn_transitions.append(stage_trans)\n\n        self.output_transition = nn.ModuleList()  # output levels\n        for i in range(self.num_outs):\n            trans = self.build_trans(\n                self.output_trans,\n                self.inter_channels[i],\n                self.out_channels,\n                num_inputs=self.stack_times + 1)\n            self.output_transition.append(trans)\n\n        self.relu = nn.ReLU(inplace=True)\n\n    def build_trans(self, cfg, in_channels, out_channels, **extra_args):\n        cfg_ = cfg.copy()\n        trans_type = cfg_.pop('type')\n        trans_cls = self.transition_types[trans_type]\n        return trans_cls(in_channels, out_channels, **cfg_, **extra_args)\n\n    def fuse(self, fuse_dict):\n        out = None\n        for item in fuse_dict.values():\n            if item is not None:\n                if out is None:\n                    out = item\n                else:\n                    out = out + item\n        return out\n\n    def forward(self, inputs):\n        assert len(inputs) == len(self.in_channels)\n\n        # build all levels from original feature maps\n        feats = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n        for downsample in self.extra_downsamples:\n            feats.append(downsample(feats[-1]))\n\n        outs = [feats]\n\n        for i in range(self.stack_times):\n            current_outs = outs[-1]\n            next_outs = []\n            direction = self.paths[i]\n            for j in range(self.num_outs):\n                if i in self.skip_inds[j]:\n                    next_outs.append(outs[-1][j])\n                    continue\n                # feature level\n                if direction == 'td':\n                    lvl = self.num_outs - j - 1\n                else:\n                    lvl = j\n                # get transitions\n                if direction == 'td':\n                    same_trans = self.fpn_transitions[i][lvl]['same_down']\n                else:\n                    same_trans = self.fpn_transitions[i][lvl]['same_up']\n                across_lateral_trans = self.fpn_transitions[i][lvl][\n                    'across_lateral']\n                across_down_trans = self.fpn_transitions[i][lvl]['across_down']\n                across_up_trans = self.fpn_transitions[i][lvl]['across_up']\n                across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']\n                # init output\n                to_fuse = dict(\n                    same=None, lateral=None, across_up=None, across_down=None)\n                # same downsample/upsample\n                if same_trans is not None:\n                    to_fuse['same'] = same_trans(next_outs[-1])\n                # across lateral\n                if across_lateral_trans is not None:\n                    to_fuse['lateral'] = across_lateral_trans(\n                        current_outs[lvl])\n                # across downsample\n                if lvl > 0 and across_up_trans is not None:\n                    to_fuse['across_up'] = across_up_trans(current_outs[lvl -\n                                                                        1])\n                # across upsample\n                if (lvl < self.num_outs - 1 and across_down_trans is not None):\n                    to_fuse['across_down'] = across_down_trans(\n                        current_outs[lvl + 1])\n                if across_skip_trans is not None:\n                    to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])\n                x = self.fuse(to_fuse)\n                next_outs.append(x)\n\n            if direction == 'td':\n                outs.append(next_outs[::-1])\n            else:\n                outs.append(next_outs)\n\n        # output trans\n        final_outs = []\n        for i in range(self.num_outs):\n            lvl_out_list = []\n            for s in range(len(outs)):\n                lvl_out_list.append(outs[s][i])\n            lvl_out = self.output_transition[i](lvl_out_list)\n            final_outs.append(lvl_out)\n\n        return final_outs\n"
  },
  {
    "path": "mmdet/models/necks/fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, MultiConfig, OptConfigType\n\n\n@MODELS.register_module()\nclass FPN(BaseModule):\n    r\"\"\"Feature Pyramid Network.\n\n    This is an implementation of paper `Feature Pyramid Networks for Object\n    Detection <https://arxiv.org/abs/1612.03144>`_.\n\n    Args:\n        in_channels (list[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale).\n        num_outs (int): Number of output scales.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Defaults to 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Defaults to -1, which means the\n            last level.\n        add_extra_convs (bool | str): If bool, it decides whether to add conv\n            layers on top of the original feature maps. Defaults to False.\n            If True, it is equivalent to `add_extra_convs='on_input'`.\n            If str, it specifies the source feature map of the extra convs.\n            Only the following options are allowed\n\n            - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n            - 'on_lateral': Last feature map after lateral convs.\n            - 'on_output': The last output feature map after fpn convs.\n        relu_before_extra_convs (bool): Whether to apply relu before the extra\n            conv. Defaults to False.\n        no_norm_on_lateral (bool): Whether to apply norm on lateral.\n            Defaults to False.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            normalization layer. Defaults to None.\n        act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            activation layer in ConvModule. Defaults to None.\n        upsample_cfg (:obj:`ConfigDict` or dict, optional): Config dict\n            for interpolate layer. Defaults to dict(mode='nearest').\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n\n    Example:\n        >>> import torch\n        >>> in_channels = [2, 3, 5, 7]\n        >>> scales = [340, 170, 84, 43]\n        >>> inputs = [torch.rand(1, c, s, s)\n        ...           for c, s in zip(in_channels, scales)]\n        >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n        >>> outputs = self.forward(inputs)\n        >>> for i in range(len(outputs)):\n        ...     print(f'outputs[{i}].shape = {outputs[i].shape}')\n        outputs[0].shape = torch.Size([1, 11, 340, 340])\n        outputs[1].shape = torch.Size([1, 11, 170, 170])\n        outputs[2].shape = torch.Size([1, 11, 84, 84])\n        outputs[3].shape = torch.Size([1, 11, 43, 43])\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: List[int],\n        out_channels: int,\n        num_outs: int,\n        start_level: int = 0,\n        end_level: int = -1,\n        add_extra_convs: Union[bool, str] = False,\n        relu_before_extra_convs: bool = False,\n        no_norm_on_lateral: bool = False,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        act_cfg: OptConfigType = None,\n        upsample_cfg: ConfigType = dict(mode='nearest'),\n        init_cfg: MultiConfig = dict(\n            type='Xavier', layer='Conv2d', distribution='uniform')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.relu_before_extra_convs = relu_before_extra_convs\n        self.no_norm_on_lateral = no_norm_on_lateral\n        self.fp16_enabled = False\n        self.upsample_cfg = upsample_cfg.copy()\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n        assert isinstance(add_extra_convs, (str, bool))\n        if isinstance(add_extra_convs, str):\n            # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'\n            assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')\n        elif add_extra_convs:  # True\n            self.add_extra_convs = 'on_input'\n\n        self.lateral_convs = nn.ModuleList()\n        self.fpn_convs = nn.ModuleList()\n\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n                act_cfg=act_cfg,\n                inplace=False)\n            fpn_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg,\n                inplace=False)\n\n            self.lateral_convs.append(l_conv)\n            self.fpn_convs.append(fpn_conv)\n\n        # add extra conv layers (e.g., RetinaNet)\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n        if self.add_extra_convs and extra_levels >= 1:\n            for i in range(extra_levels):\n                if i == 0 and self.add_extra_convs == 'on_input':\n                    in_channels = self.in_channels[self.backbone_end_level - 1]\n                else:\n                    in_channels = out_channels\n                extra_fpn_conv = ConvModule(\n                    in_channels,\n                    out_channels,\n                    3,\n                    stride=2,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg,\n                    inplace=False)\n                self.fpn_convs.append(extra_fpn_conv)\n\n    def forward(self, inputs: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward function.\n\n        Args:\n            inputs (tuple[Tensor]): Features from the upstream network, each\n                is a 4D-tensor.\n\n        Returns:\n            tuple: Feature maps, each is a 4D-tensor.\n        \"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # build laterals\n        laterals = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n\n        # build top-down path\n        used_backbone_levels = len(laterals)\n        for i in range(used_backbone_levels - 1, 0, -1):\n            # In some cases, fixing `scale factor` (e.g. 2) is preferred, but\n            #  it cannot co-exist with `size` in `F.interpolate`.\n            if 'scale_factor' in self.upsample_cfg:\n                # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n                laterals[i - 1] = laterals[i - 1] + F.interpolate(\n                    laterals[i], **self.upsample_cfg)\n            else:\n                prev_shape = laterals[i - 1].shape[2:]\n                laterals[i - 1] = laterals[i - 1] + F.interpolate(\n                    laterals[i], size=prev_shape, **self.upsample_cfg)\n\n        # build outputs\n        # part 1: from original levels\n        outs = [\n            self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n        ]\n        # part 2: add extra levels\n        if self.num_outs > len(outs):\n            # use max pool to get more levels on top of outputs\n            # (e.g., Faster R-CNN, Mask R-CNN)\n            if not self.add_extra_convs:\n                for i in range(self.num_outs - used_backbone_levels):\n                    outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n            # add conv layers on top of original feature maps (RetinaNet)\n            else:\n                if self.add_extra_convs == 'on_input':\n                    extra_source = inputs[self.backbone_end_level - 1]\n                elif self.add_extra_convs == 'on_lateral':\n                    extra_source = laterals[-1]\n                elif self.add_extra_convs == 'on_output':\n                    extra_source = outs[-1]\n                else:\n                    raise NotImplementedError\n                outs.append(self.fpn_convs[used_backbone_levels](extra_source))\n                for i in range(used_backbone_levels + 1, self.num_outs):\n                    if self.relu_before_extra_convs:\n                        outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n                    else:\n                        outs.append(self.fpn_convs[i](outs[-1]))\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/fpn_carafe.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, build_upsample_layer\nfrom mmcv.ops.carafe import CARAFEPack\nfrom mmengine.model import BaseModule, ModuleList, xavier_init\n\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass FPN_CARAFE(BaseModule):\n    \"\"\"FPN_CARAFE is a more flexible implementation of FPN. It allows more\n    choice for upsample methods during the top-down pathway.\n\n    It can reproduce the performance of ICCV 2019 paper\n    CARAFE: Content-Aware ReAssembly of FEatures\n    Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n    Args:\n        in_channels (list[int]): Number of channels for each input feature map.\n        out_channels (int): Output channels of feature pyramids.\n        num_outs (int): Number of output stages.\n        start_level (int): Start level of feature pyramids.\n            (Default: 0)\n        end_level (int): End level of feature pyramids.\n            (Default: -1 indicates the last level).\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        activate (str): Type of activation function in ConvModule\n            (Default: None indicates w/o activation).\n        order (dict): Order of components in ConvModule.\n        upsample (str): Type of upsample layer.\n        upsample_cfg (dict): Dictionary to construct and config upsample layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=0,\n                 end_level=-1,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 order=('conv', 'norm', 'act'),\n                 upsample_cfg=dict(\n                     type='carafe',\n                     up_kernel=5,\n                     up_group=1,\n                     encoder_kernel=3,\n                     encoder_dilation=1),\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(FPN_CARAFE, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n        self.with_bias = norm_cfg is None\n        self.upsample_cfg = upsample_cfg.copy()\n        self.upsample = self.upsample_cfg.get('type')\n        self.relu = nn.ReLU(inplace=False)\n\n        self.order = order\n        assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]\n\n        assert self.upsample in [\n            'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None\n        ]\n        if self.upsample in ['deconv', 'pixel_shuffle']:\n            assert hasattr(\n                self.upsample_cfg,\n                'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0\n            self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n\n        self.lateral_convs = ModuleList()\n        self.fpn_convs = ModuleList()\n        self.upsample_modules = ModuleList()\n\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                norm_cfg=norm_cfg,\n                bias=self.with_bias,\n                act_cfg=act_cfg,\n                inplace=False,\n                order=self.order)\n            fpn_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                padding=1,\n                norm_cfg=self.norm_cfg,\n                bias=self.with_bias,\n                act_cfg=act_cfg,\n                inplace=False,\n                order=self.order)\n            if i != self.backbone_end_level - 1:\n                upsample_cfg_ = self.upsample_cfg.copy()\n                if self.upsample == 'deconv':\n                    upsample_cfg_.update(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        kernel_size=self.upsample_kernel,\n                        stride=2,\n                        padding=(self.upsample_kernel - 1) // 2,\n                        output_padding=(self.upsample_kernel - 1) // 2)\n                elif self.upsample == 'pixel_shuffle':\n                    upsample_cfg_.update(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        scale_factor=2,\n                        upsample_kernel=self.upsample_kernel)\n                elif self.upsample == 'carafe':\n                    upsample_cfg_.update(channels=out_channels, scale_factor=2)\n                else:\n                    # suppress warnings\n                    align_corners = (None\n                                     if self.upsample == 'nearest' else False)\n                    upsample_cfg_.update(\n                        scale_factor=2,\n                        mode=self.upsample,\n                        align_corners=align_corners)\n                upsample_module = build_upsample_layer(upsample_cfg_)\n                self.upsample_modules.append(upsample_module)\n            self.lateral_convs.append(l_conv)\n            self.fpn_convs.append(fpn_conv)\n\n        # add extra conv layers (e.g., RetinaNet)\n        extra_out_levels = (\n            num_outs - self.backbone_end_level + self.start_level)\n        if extra_out_levels >= 1:\n            for i in range(extra_out_levels):\n                in_channels = (\n                    self.in_channels[self.backbone_end_level -\n                                     1] if i == 0 else out_channels)\n                extra_l_conv = ConvModule(\n                    in_channels,\n                    out_channels,\n                    3,\n                    stride=2,\n                    padding=1,\n                    norm_cfg=norm_cfg,\n                    bias=self.with_bias,\n                    act_cfg=act_cfg,\n                    inplace=False,\n                    order=self.order)\n                if self.upsample == 'deconv':\n                    upsampler_cfg_ = dict(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        kernel_size=self.upsample_kernel,\n                        stride=2,\n                        padding=(self.upsample_kernel - 1) // 2,\n                        output_padding=(self.upsample_kernel - 1) // 2)\n                elif self.upsample == 'pixel_shuffle':\n                    upsampler_cfg_ = dict(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        scale_factor=2,\n                        upsample_kernel=self.upsample_kernel)\n                elif self.upsample == 'carafe':\n                    upsampler_cfg_ = dict(\n                        channels=out_channels,\n                        scale_factor=2,\n                        **self.upsample_cfg)\n                else:\n                    # suppress warnings\n                    align_corners = (None\n                                     if self.upsample == 'nearest' else False)\n                    upsampler_cfg_ = dict(\n                        scale_factor=2,\n                        mode=self.upsample,\n                        align_corners=align_corners)\n                upsampler_cfg_['type'] = self.upsample\n                upsample_module = build_upsample_layer(upsampler_cfg_)\n                extra_fpn_conv = ConvModule(\n                    out_channels,\n                    out_channels,\n                    3,\n                    padding=1,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.with_bias,\n                    act_cfg=act_cfg,\n                    inplace=False,\n                    order=self.order)\n                self.upsample_modules.append(upsample_module)\n                self.fpn_convs.append(extra_fpn_conv)\n                self.lateral_convs.append(extra_l_conv)\n\n    # default init_weights for conv(msra) and norm in ConvModule\n    def init_weights(self):\n        \"\"\"Initialize the weights of module.\"\"\"\n        super(FPN_CARAFE, self).init_weights()\n        for m in self.modules():\n            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n                xavier_init(m, distribution='uniform')\n        for m in self.modules():\n            if isinstance(m, CARAFEPack):\n                m.init_weights()\n\n    def slice_as(self, src, dst):\n        \"\"\"Slice ``src`` as ``dst``\n\n        Note:\n            ``src`` should have the same or larger size than ``dst``.\n\n        Args:\n            src (torch.Tensor): Tensors to be sliced.\n            dst (torch.Tensor): ``src`` will be sliced to have the same\n                size as ``dst``.\n\n        Returns:\n            torch.Tensor: Sliced tensor.\n        \"\"\"\n        assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))\n        if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):\n            return src\n        else:\n            return src[:, :, :dst.size(2), :dst.size(3)]\n\n    def tensor_add(self, a, b):\n        \"\"\"Add tensors ``a`` and ``b`` that might have different sizes.\"\"\"\n        if a.size() == b.size():\n            c = a + b\n        else:\n            c = a + self.slice_as(b, a)\n        return c\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # build laterals\n        laterals = []\n        for i, lateral_conv in enumerate(self.lateral_convs):\n            if i <= self.backbone_end_level - self.start_level:\n                input = inputs[min(i + self.start_level, len(inputs) - 1)]\n            else:\n                input = laterals[-1]\n            lateral = lateral_conv(input)\n            laterals.append(lateral)\n\n        # build top-down path\n        for i in range(len(laterals) - 1, 0, -1):\n            if self.upsample is not None:\n                upsample_feat = self.upsample_modules[i - 1](laterals[i])\n            else:\n                upsample_feat = laterals[i]\n            laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)\n\n        # build outputs\n        num_conv_outs = len(self.fpn_convs)\n        outs = []\n        for i in range(num_conv_outs):\n            out = self.fpn_convs[i](laterals[i])\n            outs.append(out)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/hrfpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch.utils.checkpoint import checkpoint\n\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass HRFPN(BaseModule):\n    \"\"\"HRFPN (High Resolution Feature Pyramids)\n\n    paper: `High-Resolution Representations for Labeling Pixels and Regions\n    <https://arxiv.org/abs/1904.04514>`_.\n\n    Args:\n        in_channels (list): number of channels for each branch.\n        out_channels (int): output channels of feature pyramids.\n        num_outs (int): number of output stages.\n        pooling_type (str): pooling for generating feature pyramids\n            from {MAX, AVG}.\n        conv_cfg (dict): dictionary to construct and config conv layer.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        with_cp  (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        stride (int): stride of 3x3 convolutional layers\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs=5,\n                 pooling_type='AVG',\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 with_cp=False,\n                 stride=1,\n                 init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):\n        super(HRFPN, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.with_cp = with_cp\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        self.reduction_conv = ConvModule(\n            sum(in_channels),\n            out_channels,\n            kernel_size=1,\n            conv_cfg=self.conv_cfg,\n            act_cfg=None)\n\n        self.fpn_convs = nn.ModuleList()\n        for i in range(self.num_outs):\n            self.fpn_convs.append(\n                ConvModule(\n                    out_channels,\n                    out_channels,\n                    kernel_size=3,\n                    padding=1,\n                    stride=stride,\n                    conv_cfg=self.conv_cfg,\n                    act_cfg=None))\n\n        if pooling_type == 'MAX':\n            self.pooling = F.max_pool2d\n        else:\n            self.pooling = F.avg_pool2d\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == self.num_ins\n        outs = [inputs[0]]\n        for i in range(1, self.num_ins):\n            outs.append(\n                F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))\n        out = torch.cat(outs, dim=1)\n        if out.requires_grad and self.with_cp:\n            out = checkpoint(self.reduction_conv, out)\n        else:\n            out = self.reduction_conv(out)\n        outs = [out]\n        for i in range(1, self.num_outs):\n            outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))\n        outputs = []\n\n        for i in range(self.num_outs):\n            if outs[i].requires_grad and self.with_cp:\n                tmp_out = checkpoint(self.fpn_convs[i], outs[i])\n            else:\n                tmp_out = self.fpn_convs[i](outs[i])\n            outputs.append(tmp_out)\n        return tuple(outputs)\n"
  },
  {
    "path": "mmdet/models/necks/nas_fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops.merge_cells import GlobalPoolingCell, SumCell\nfrom mmengine.model import BaseModule, ModuleList\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig, OptConfigType\n\n\n@MODELS.register_module()\nclass NASFPN(BaseModule):\n    \"\"\"NAS-FPN.\n\n    Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\n    for Object Detection <https://arxiv.org/abs/1904.07392>`_\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        stack_times (int): The number of times the pyramid architecture will\n            be stacked.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Defaults to 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Defaults to -1, which means the\n            last level.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            normalization layer. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: List[int],\n        out_channels: int,\n        num_outs: int,\n        stack_times: int,\n        start_level: int = 0,\n        end_level: int = -1,\n        norm_cfg: OptConfigType = None,\n        init_cfg: MultiConfig = dict(type='Caffe2Xavier', layer='Conv2d')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)  # num of input feature levels\n        self.num_outs = num_outs  # num of output feature levels\n        self.stack_times = stack_times\n        self.norm_cfg = norm_cfg\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n\n        # add lateral connections\n        self.lateral_convs = nn.ModuleList()\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                norm_cfg=norm_cfg,\n                act_cfg=None)\n            self.lateral_convs.append(l_conv)\n\n        # add extra downsample layers (stride-2 pooling or conv)\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n        self.extra_downsamples = nn.ModuleList()\n        for i in range(extra_levels):\n            extra_conv = ConvModule(\n                out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\n            self.extra_downsamples.append(\n                nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))\n\n        # add NAS FPN connections\n        self.fpn_stages = ModuleList()\n        for _ in range(self.stack_times):\n            stage = nn.ModuleDict()\n            # gp(p6, p4) -> p4_1\n            stage['gp_64_4'] = GlobalPoolingCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p4_1, p4) -> p4_2\n            stage['sum_44_4'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p4_2, p3) -> p3_out\n            stage['sum_43_3'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p3_out, p4_2) -> p4_out\n            stage['sum_34_4'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p5, gp(p4_out, p3_out)) -> p5_out\n            stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)\n            stage['sum_55_5'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p7, gp(p5_out, p4_2)) -> p7_out\n            stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)\n            stage['sum_77_7'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # gp(p7_out, p5_out) -> p6_out\n            stage['gp_75_6'] = GlobalPoolingCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            self.fpn_stages.append(stage)\n\n    def forward(self, inputs: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward function.\n\n         Args:\n            inputs (tuple[Tensor]): Features from the upstream network, each\n                is a 4D-tensor.\n\n        Returns:\n            tuple: Feature maps, each is a 4D-tensor.\n        \"\"\"\n        # build P3-P5\n        feats = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n        # build P6-P7 on top of P5\n        for downsample in self.extra_downsamples:\n            feats.append(downsample(feats[-1]))\n\n        p3, p4, p5, p6, p7 = feats\n\n        for stage in self.fpn_stages:\n            # gp(p6, p4) -> p4_1\n            p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])\n            # sum(p4_1, p4) -> p4_2\n            p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])\n            # sum(p4_2, p3) -> p3_out\n            p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])\n            # sum(p3_out, p4_2) -> p4_out\n            p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])\n            # sum(p5, gp(p4_out, p3_out)) -> p5_out\n            p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])\n            p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])\n            # sum(p7, gp(p5_out, p4_2)) -> p7_out\n            p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])\n            p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])\n            # gp(p7_out, p5_out) -> p6_out\n            p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])\n\n        return p3, p4, p5, p6, p7\n"
  },
  {
    "path": "mmdet/models/necks/nasfcos_fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops.merge_cells import ConcatCell\nfrom mmengine.model import BaseModule, caffe2_xavier_init\n\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass NASFCOS_FPN(BaseModule):\n    \"\"\"FPN structure in NASFPN.\n\n    Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for\n    Object Detection <https://arxiv.org/abs/1906.04423>`_\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool): It decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, its actual mode is specified by `extra_convs_on_inputs`.\n        conv_cfg (dict): dictionary to construct and config conv layer.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=1,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(NASFCOS_FPN, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.norm_cfg = norm_cfg\n        self.conv_cfg = conv_cfg\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n\n        self.adapt_convs = nn.ModuleList()\n        for i in range(self.start_level, self.backbone_end_level):\n            adapt_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                stride=1,\n                padding=0,\n                bias=False,\n                norm_cfg=dict(type='BN'),\n                act_cfg=dict(type='ReLU', inplace=False))\n            self.adapt_convs.append(adapt_conv)\n\n        # C2 is omitted according to the paper\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n\n        def build_concat_cell(with_input1_conv, with_input2_conv):\n            cell_conv_cfg = dict(\n                kernel_size=1, padding=0, bias=False, groups=out_channels)\n            return ConcatCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                with_out_conv=True,\n                out_conv_cfg=cell_conv_cfg,\n                out_norm_cfg=dict(type='BN'),\n                out_conv_order=('norm', 'act', 'conv'),\n                with_input1_conv=with_input1_conv,\n                with_input2_conv=with_input2_conv,\n                input_conv_cfg=conv_cfg,\n                input_norm_cfg=norm_cfg,\n                upsample_mode='nearest')\n\n        # Denote c3=f0, c4=f1, c5=f2 for convince\n        self.fpn = nn.ModuleDict()\n        self.fpn['c22_1'] = build_concat_cell(True, True)\n        self.fpn['c22_2'] = build_concat_cell(True, True)\n        self.fpn['c32'] = build_concat_cell(True, False)\n        self.fpn['c02'] = build_concat_cell(True, False)\n        self.fpn['c42'] = build_concat_cell(True, True)\n        self.fpn['c36'] = build_concat_cell(True, True)\n        self.fpn['c61'] = build_concat_cell(True, True)  # f9\n        self.extra_downsamples = nn.ModuleList()\n        for i in range(extra_levels):\n            extra_act_cfg = None if i == 0 \\\n                else dict(type='ReLU', inplace=False)\n            self.extra_downsamples.append(\n                ConvModule(\n                    out_channels,\n                    out_channels,\n                    3,\n                    stride=2,\n                    padding=1,\n                    act_cfg=extra_act_cfg,\n                    order=('act', 'norm', 'conv')))\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        feats = [\n            adapt_conv(inputs[i + self.start_level])\n            for i, adapt_conv in enumerate(self.adapt_convs)\n        ]\n\n        for (i, module_name) in enumerate(self.fpn):\n            idx_1, idx_2 = int(module_name[1]), int(module_name[2])\n            res = self.fpn[module_name](feats[idx_1], feats[idx_2])\n            feats.append(res)\n\n        ret = []\n        for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]):  # add P3, P4, P5\n            feats1, feats2 = feats[idx], feats[5]\n            feats2_resize = F.interpolate(\n                feats2,\n                size=feats1.size()[2:],\n                mode='bilinear',\n                align_corners=False)\n\n            feats_sum = feats1 + feats2_resize\n            ret.append(\n                F.interpolate(\n                    feats_sum,\n                    size=inputs[input_idx].size()[2:],\n                    mode='bilinear',\n                    align_corners=False))\n\n        for submodule in self.extra_downsamples:\n            ret.append(submodule(ret[-1]))\n\n        return tuple(ret)\n\n    def init_weights(self):\n        \"\"\"Initialize the weights of module.\"\"\"\n        super(NASFCOS_FPN, self).init_weights()\n        for module in self.fpn.values():\n            if hasattr(module, 'conv_out'):\n                caffe2_xavier_init(module.out_conv.conv)\n\n        for modules in [\n                self.adapt_convs.modules(),\n                self.extra_downsamples.modules()\n        ]:\n            for module in modules:\n                if isinstance(module, nn.Conv2d):\n                    caffe2_xavier_init(module)\n"
  },
  {
    "path": "mmdet/models/necks/pafpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.registry import MODELS\nfrom .fpn import FPN\n\n\n@MODELS.register_module()\nclass PAFPN(FPN):\n    \"\"\"Path Aggregation Network for Instance Segmentation.\n\n    This is an implementation of the `PAFPN in Path Aggregation Network\n    <https://arxiv.org/abs/1803.01534>`_.\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool | str): If bool, it decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, it is equivalent to `add_extra_convs='on_input'`.\n            If str, it specifies the source feature map of the extra convs.\n            Only the following options are allowed\n\n            - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n            - 'on_lateral':  Last feature map after lateral convs.\n            - 'on_output': The last output feature map after fpn convs.\n        relu_before_extra_convs (bool): Whether to apply relu before the extra\n            conv. Default: False.\n        no_norm_on_lateral (bool): Whether to apply norm on lateral.\n            Default: False.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        act_cfg (str): Config dict for activation layer in ConvModule.\n            Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=0,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 relu_before_extra_convs=False,\n                 no_norm_on_lateral=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 init_cfg=dict(\n                     type='Xavier', layer='Conv2d', distribution='uniform')):\n        super(PAFPN, self).__init__(\n            in_channels,\n            out_channels,\n            num_outs,\n            start_level,\n            end_level,\n            add_extra_convs,\n            relu_before_extra_convs,\n            no_norm_on_lateral,\n            conv_cfg,\n            norm_cfg,\n            act_cfg,\n            init_cfg=init_cfg)\n        # add extra bottom up pathway\n        self.downsample_convs = nn.ModuleList()\n        self.pafpn_convs = nn.ModuleList()\n        for i in range(self.start_level + 1, self.backbone_end_level):\n            d_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                stride=2,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg,\n                inplace=False)\n            pafpn_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg,\n                inplace=False)\n            self.downsample_convs.append(d_conv)\n            self.pafpn_convs.append(pafpn_conv)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # build laterals\n        laterals = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n\n        # build top-down path\n        used_backbone_levels = len(laterals)\n        for i in range(used_backbone_levels - 1, 0, -1):\n            prev_shape = laterals[i - 1].shape[2:]\n            laterals[i - 1] = laterals[i - 1] + F.interpolate(\n                laterals[i], size=prev_shape, mode='nearest')\n\n        # build outputs\n        # part 1: from original levels\n        inter_outs = [\n            self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n        ]\n\n        # part 2: add bottom-up path\n        for i in range(0, used_backbone_levels - 1):\n            inter_outs[i + 1] = inter_outs[i + 1] + \\\n                                self.downsample_convs[i](inter_outs[i])\n\n        outs = []\n        outs.append(inter_outs[0])\n        outs.extend([\n            self.pafpn_convs[i - 1](inter_outs[i])\n            for i in range(1, used_backbone_levels)\n        ])\n\n        # part 3: add extra levels\n        if self.num_outs > len(outs):\n            # use max pool to get more levels on top of outputs\n            # (e.g., Faster R-CNN, Mask R-CNN)\n            if not self.add_extra_convs:\n                for i in range(self.num_outs - used_backbone_levels):\n                    outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n            # add conv layers on top of original feature maps (RetinaNet)\n            else:\n                if self.add_extra_convs == 'on_input':\n                    orig = inputs[self.backbone_end_level - 1]\n                    outs.append(self.fpn_convs[used_backbone_levels](orig))\n                elif self.add_extra_convs == 'on_lateral':\n                    outs.append(self.fpn_convs[used_backbone_levels](\n                        laterals[-1]))\n                elif self.add_extra_convs == 'on_output':\n                    outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))\n                else:\n                    raise NotImplementedError\n                for i in range(used_backbone_levels + 1, self.num_outs):\n                    if self.relu_before_extra_convs:\n                        outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n                    else:\n                        outs.append(self.fpn_convs[i](outs[-1]))\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/rfp.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.model import BaseModule, ModuleList, constant_init, xavier_init\n\nfrom mmdet.registry import MODELS\nfrom .fpn import FPN\n\n\nclass ASPP(BaseModule):\n    \"\"\"ASPP (Atrous Spatial Pyramid Pooling)\n\n    This is an implementation of the ASPP module used in DetectoRS\n    (https://arxiv.org/pdf/2006.02334.pdf)\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of channels produced by this module\n        dilations (tuple[int]): Dilations of the four branches.\n            Default: (1, 3, 6, 1)\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 dilations=(1, 3, 6, 1),\n                 init_cfg=dict(type='Kaiming', layer='Conv2d')):\n        super().__init__(init_cfg)\n        assert dilations[-1] == 1\n        self.aspp = nn.ModuleList()\n        for dilation in dilations:\n            kernel_size = 3 if dilation > 1 else 1\n            padding = dilation if dilation > 1 else 0\n            conv = nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size=kernel_size,\n                stride=1,\n                dilation=dilation,\n                padding=padding,\n                bias=True)\n            self.aspp.append(conv)\n        self.gap = nn.AdaptiveAvgPool2d(1)\n\n    def forward(self, x):\n        avg_x = self.gap(x)\n        out = []\n        for aspp_idx in range(len(self.aspp)):\n            inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x\n            out.append(F.relu_(self.aspp[aspp_idx](inp)))\n        out[-1] = out[-1].expand_as(out[-2])\n        out = torch.cat(out, dim=1)\n        return out\n\n\n@MODELS.register_module()\nclass RFP(FPN):\n    \"\"\"RFP (Recursive Feature Pyramid)\n\n    This is an implementation of RFP in `DetectoRS\n    <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the\n    input of RFP should be multi level features along with origin input image\n    of backbone.\n\n    Args:\n        rfp_steps (int): Number of unrolled steps of RFP.\n        rfp_backbone (dict): Configuration of the backbone for RFP.\n        aspp_out_channels (int): Number of output channels of ASPP module.\n        aspp_dilations (tuple[int]): Dilation rates of four branches.\n            Default: (1, 3, 6, 1)\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 rfp_steps,\n                 rfp_backbone,\n                 aspp_out_channels,\n                 aspp_dilations=(1, 3, 6, 1),\n                 init_cfg=None,\n                 **kwargs):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg, **kwargs)\n        self.rfp_steps = rfp_steps\n        # Be careful! Pretrained weights cannot be loaded when use\n        # nn.ModuleList\n        self.rfp_modules = ModuleList()\n        for rfp_idx in range(1, rfp_steps):\n            rfp_module = MODELS.build(rfp_backbone)\n            self.rfp_modules.append(rfp_module)\n        self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,\n                             aspp_dilations)\n        self.rfp_weight = nn.Conv2d(\n            self.out_channels,\n            1,\n            kernel_size=1,\n            stride=1,\n            padding=0,\n            bias=True)\n\n    def init_weights(self):\n        # Avoid using super().init_weights(), which may alter the default\n        # initialization of the modules in self.rfp_modules that have missing\n        # keys in the pretrained checkpoint.\n        for convs in [self.lateral_convs, self.fpn_convs]:\n            for m in convs.modules():\n                if isinstance(m, nn.Conv2d):\n                    xavier_init(m, distribution='uniform')\n        for rfp_idx in range(self.rfp_steps - 1):\n            self.rfp_modules[rfp_idx].init_weights()\n        constant_init(self.rfp_weight, 0)\n\n    def forward(self, inputs):\n        inputs = list(inputs)\n        assert len(inputs) == len(self.in_channels) + 1  # +1 for input image\n        img = inputs.pop(0)\n        # FPN forward\n        x = super().forward(tuple(inputs))\n        for rfp_idx in range(self.rfp_steps - 1):\n            rfp_feats = [x[0]] + list(\n                self.rfp_aspp(x[i]) for i in range(1, len(x)))\n            x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)\n            # FPN forward\n            x_idx = super().forward(x_idx)\n            x_new = []\n            for ft_idx in range(len(x_idx)):\n                add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))\n                x_new.append(add_weight * x_idx[ft_idx] +\n                             (1 - add_weight) * x[ft_idx])\n            x = x_new\n        return x\n"
  },
  {
    "path": "mmdet/models/necks/ssd_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass SSDNeck(BaseModule):\n    \"\"\"Extra layers of SSD backbone to generate multi-scale feature maps.\n\n    Args:\n        in_channels (Sequence[int]): Number of input channels per scale.\n        out_channels (Sequence[int]): Number of output channels per scale.\n        level_strides (Sequence[int]): Stride of 3x3 conv per level.\n        level_paddings (Sequence[int]): Padding size of 3x3 conv per level.\n        l2_norm_scale (float|None): L2 normalization layer init scale.\n            If None, not use L2 normalization on the first input feature.\n        last_kernel_size (int): Kernel size of the last conv layer.\n            Default: 3.\n        use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n            Default: False.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: None.\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='ReLU').\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 level_strides,\n                 level_paddings,\n                 l2_norm_scale=20.,\n                 last_kernel_size=3,\n                 use_depthwise=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=dict(type='ReLU'),\n                 init_cfg=[\n                     dict(\n                         type='Xavier', distribution='uniform',\n                         layer='Conv2d'),\n                     dict(type='Constant', val=1, layer='BatchNorm2d'),\n                 ]):\n        super(SSDNeck, self).__init__(init_cfg)\n        assert len(out_channels) > len(in_channels)\n        assert len(out_channels) - len(in_channels) == len(level_strides)\n        assert len(level_strides) == len(level_paddings)\n        assert in_channels == out_channels[:len(in_channels)]\n\n        if l2_norm_scale:\n            self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)\n            self.init_cfg += [\n                dict(\n                    type='Constant',\n                    val=self.l2_norm.scale,\n                    override=dict(name='l2_norm'))\n            ]\n\n        self.extra_layers = nn.ModuleList()\n        extra_layer_channels = out_channels[len(in_channels):]\n        second_conv = DepthwiseSeparableConvModule if \\\n            use_depthwise else ConvModule\n\n        for i, (out_channel, stride, padding) in enumerate(\n                zip(extra_layer_channels, level_strides, level_paddings)):\n            kernel_size = last_kernel_size \\\n                if i == len(extra_layer_channels) - 1 else 3\n            per_lvl_convs = nn.Sequential(\n                ConvModule(\n                    out_channels[len(in_channels) - 1 + i],\n                    out_channel // 2,\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg),\n                second_conv(\n                    out_channel // 2,\n                    out_channel,\n                    kernel_size,\n                    stride=stride,\n                    padding=padding,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.extra_layers.append(per_lvl_convs)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        outs = [feat for feat in inputs]\n        if hasattr(self, 'l2_norm'):\n            outs[0] = self.l2_norm(outs[0])\n\n        feat = outs[-1]\n        for layer in self.extra_layers:\n            feat = layer(feat)\n            outs.append(feat)\n        return tuple(outs)\n\n\nclass L2Norm(nn.Module):\n\n    def __init__(self, n_dims, scale=20., eps=1e-10):\n        \"\"\"L2 normalization layer.\n\n        Args:\n            n_dims (int): Number of dimensions to be normalized\n            scale (float, optional): Defaults to 20..\n            eps (float, optional): Used to avoid division by zero.\n                Defaults to 1e-10.\n        \"\"\"\n        super(L2Norm, self).__init__()\n        self.n_dims = n_dims\n        self.weight = nn.Parameter(torch.Tensor(self.n_dims))\n        self.eps = eps\n        self.scale = scale\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        # normalization layer convert to FP32 in FP16 training\n        x_float = x.float()\n        norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps\n        return (self.weight[None, :, None, None].float().expand_as(x_float) *\n                x_float / norm).type_as(x)\n"
  },
  {
    "path": "mmdet/models/necks/ssh.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\n\n\nclass SSHContextModule(BaseModule):\n    \"\"\"This is an implementation of `SSH context module` described in `SSH:\n    Single Stage Headless Face Detector.\n\n    <https://arxiv.org/pdf/1708.03979.pdf>`_.\n\n    Args:\n        in_channels (int): Number of input channels used at each scale.\n        out_channels (int): Number of output channels used at each scale.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n            layer. Defaults to dict(type='BN').\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(init_cfg=init_cfg)\n        assert out_channels % 4 == 0\n\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        self.conv5x5_1 = ConvModule(\n            self.in_channels,\n            self.out_channels // 4,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n        )\n\n        self.conv5x5_2 = ConvModule(\n            self.out_channels // 4,\n            self.out_channels // 4,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        self.conv7x7_2 = ConvModule(\n            self.out_channels // 4,\n            self.out_channels // 4,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n        )\n\n        self.conv7x7_3 = ConvModule(\n            self.out_channels // 4,\n            self.out_channels // 4,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None,\n        )\n\n    def forward(self, x: torch.Tensor) -> tuple:\n        conv5x5_1 = self.conv5x5_1(x)\n        conv5x5 = self.conv5x5_2(conv5x5_1)\n        conv7x7_2 = self.conv7x7_2(conv5x5_1)\n        conv7x7 = self.conv7x7_3(conv7x7_2)\n\n        return (conv5x5, conv7x7)\n\n\nclass SSHDetModule(BaseModule):\n    \"\"\"This is an implementation of `SSH detection module` described in `SSH:\n    Single Stage Headless Face Detector.\n\n    <https://arxiv.org/pdf/1708.03979.pdf>`_.\n\n    Args:\n        in_channels (int): Number of input channels used at each scale.\n        out_channels (int): Number of output channels used at each scale.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n            layer. Defaults to dict(type='BN').\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 init_cfg: OptMultiConfig = None):\n        super().__init__(init_cfg=init_cfg)\n        assert out_channels % 4 == 0\n\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        self.conv3x3 = ConvModule(\n            self.in_channels,\n            self.out_channels // 2,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        self.context_module = SSHContextModule(\n            in_channels=self.in_channels,\n            out_channels=self.out_channels,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg)\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        conv3x3 = self.conv3x3(x)\n        conv5x5, conv7x7 = self.context_module(x)\n        out = torch.cat([conv3x3, conv5x5, conv7x7], dim=1)\n        out = F.relu(out)\n\n        return out\n\n\n@MODELS.register_module()\nclass SSH(BaseModule):\n    \"\"\"`SSH Neck` used in `SSH: Single Stage Headless Face Detector.\n\n    <https://arxiv.org/pdf/1708.03979.pdf>`_.\n\n    Args:\n        num_scales (int): The number of scales / stages.\n        in_channels (list[int]): The number of input channels per scale.\n        out_channels (list[int]): The number of output channels  per scale.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n            convolution layer. Defaults to None.\n        norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n            layer. Defaults to dict(type='BN').\n        init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n            list[dict], optional): Initialization config dict.\n\n    Example:\n        >>> import torch\n        >>> in_channels = [8, 16, 32, 64]\n        >>> out_channels = [16, 32, 64, 128]\n        >>> scales = [340, 170, 84, 43]\n        >>> inputs = [torch.rand(1, c, s, s)\n        ...           for c, s in zip(in_channels, scales)]\n        >>> self = SSH(num_scales=4, in_channels=in_channels,\n        ...           out_channels=out_channels)\n        >>> outputs = self.forward(inputs)\n        >>> for i in range(len(outputs)):\n        ...     print(f'outputs[{i}].shape = {outputs[i].shape}')\n        outputs[0].shape = torch.Size([1, 16, 340, 340])\n        outputs[1].shape = torch.Size([1, 32, 170, 170])\n        outputs[2].shape = torch.Size([1, 64, 84, 84])\n        outputs[3].shape = torch.Size([1, 128, 43, 43])\n    \"\"\"\n\n    def __init__(self,\n                 num_scales: int,\n                 in_channels: List[int],\n                 out_channels: List[int],\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 init_cfg: OptMultiConfig = dict(\n                     type='Xavier', layer='Conv2d', distribution='uniform')):\n        super().__init__(init_cfg=init_cfg)\n        assert (num_scales == len(in_channels) == len(out_channels))\n        self.num_scales = num_scales\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        for idx in range(self.num_scales):\n            in_c, out_c = self.in_channels[idx], self.out_channels[idx]\n            self.add_module(\n                f'ssh_module{idx}',\n                SSHDetModule(\n                    in_channels=in_c,\n                    out_channels=out_c,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg))\n\n    def forward(self, inputs: Tuple[torch.Tensor]) -> tuple:\n        assert len(inputs) == self.num_scales\n\n        outs = []\n        for idx, x in enumerate(inputs):\n            ssh_module = getattr(self, f'ssh_module{idx}')\n            out = ssh_module(x)\n            outs.append(out)\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/yolo_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\n\n\nclass DetectionBlock(BaseModule):\n    \"\"\"Detection block in YOLO neck.\n\n    Let out_channels = n, the DetectionBlock contains:\n    Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.\n    The first 6 ConvLayers are formed the following way:\n        1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.\n    The Conv2D layer is 1x1x255.\n    Some block will have branch after the fifth ConvLayer.\n    The input channel is arbitrary (in_channels)\n\n    Args:\n        in_channels (int): The number of input channels.\n        out_channels (int): The number of output channels.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 act_cfg: ConfigType = dict(\n                     type='LeakyReLU', negative_slope=0.1),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super(DetectionBlock, self).__init__(init_cfg)\n        double_out_channels = out_channels * 2\n\n        # shortcut\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n        self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)\n        self.conv2 = ConvModule(\n            out_channels, double_out_channels, 3, padding=1, **cfg)\n        self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)\n        self.conv4 = ConvModule(\n            out_channels, double_out_channels, 3, padding=1, **cfg)\n        self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)\n\n    def forward(self, x: Tensor) -> Tensor:\n        tmp = self.conv1(x)\n        tmp = self.conv2(tmp)\n        tmp = self.conv3(tmp)\n        tmp = self.conv4(tmp)\n        out = self.conv5(tmp)\n        return out\n\n\n@MODELS.register_module()\nclass YOLOV3Neck(BaseModule):\n    \"\"\"The neck of YOLOV3.\n\n    It can be treated as a simplified version of FPN. It\n    will take the result from Darknet backbone and do some upsampling and\n    concatenation. It will finally output the detection result.\n\n    Note:\n        The input feats should be from top to bottom.\n            i.e., from high-lvl to low-lvl\n        But YOLOV3Neck will process them in reversed order.\n            i.e., from bottom (high-lvl) to top (low-lvl)\n\n    Args:\n        num_scales (int): The number of scales / stages.\n        in_channels (List[int]): The number of input channels per scale.\n        out_channels (List[int]): The number of output channels  per scale.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None.\n        norm_cfg (dict, optional): Dictionary to construct and config norm\n            layer. Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict, optional): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 num_scales: int,\n                 in_channels: List[int],\n                 out_channels: List[int],\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN', requires_grad=True),\n                 act_cfg: ConfigType = dict(\n                     type='LeakyReLU', negative_slope=0.1),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super(YOLOV3Neck, self).__init__(init_cfg)\n        assert (num_scales == len(in_channels) == len(out_channels))\n        self.num_scales = num_scales\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        # shortcut\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        # To support arbitrary scales, the code looks awful, but it works.\n        # Better solution is welcomed.\n        self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)\n        for i in range(1, self.num_scales):\n            in_c, out_c = self.in_channels[i], self.out_channels[i]\n            inter_c = out_channels[i - 1]\n            self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg))\n            # in_c + out_c : High-lvl feats will be cat with low-lvl feats\n            self.add_module(f'detect{i+1}',\n                            DetectionBlock(in_c + out_c, out_c, **cfg))\n\n    def forward(self, feats=Tuple[Tensor]) -> Tuple[Tensor]:\n        assert len(feats) == self.num_scales\n\n        # processed from bottom (high-lvl) to top (low-lvl)\n        outs = []\n        out = self.detect1(feats[-1])\n        outs.append(out)\n\n        for i, x in enumerate(reversed(feats[:-1])):\n            conv = getattr(self, f'conv{i+1}')\n            tmp = conv(out)\n\n            # Cat with low-lvl feats\n            tmp = F.interpolate(tmp, scale_factor=2)\n            tmp = torch.cat((tmp, x), 1)\n\n            detect = getattr(self, f'detect{i+2}')\n            out = detect(tmp)\n            outs.append(out)\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/yolox_pafpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom ..layers import CSPLayer\n\n\n@MODELS.register_module()\nclass YOLOXPAFPN(BaseModule):\n    \"\"\"Path Aggregation Network used in YOLOX.\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3\n        use_depthwise (bool): Whether to depthwise separable convolution in\n            blocks. Default: False\n        upsample_cfg (dict): Config dict for interpolate layer.\n            Default: `dict(scale_factor=2, mode='nearest')`\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN')\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish')\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_csp_blocks=3,\n                 use_depthwise=False,\n                 upsample_cfg=dict(scale_factor=2, mode='nearest'),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 init_cfg=dict(\n                     type='Kaiming',\n                     layer='Conv2d',\n                     a=math.sqrt(5),\n                     distribution='uniform',\n                     mode='fan_in',\n                     nonlinearity='leaky_relu')):\n        super(YOLOXPAFPN, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n\n        # build top-down blocks\n        self.upsample = nn.Upsample(**upsample_cfg)\n        self.reduce_layers = nn.ModuleList()\n        self.top_down_blocks = nn.ModuleList()\n        for idx in range(len(in_channels) - 1, 0, -1):\n            self.reduce_layers.append(\n                ConvModule(\n                    in_channels[idx],\n                    in_channels[idx - 1],\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.top_down_blocks.append(\n                CSPLayer(\n                    in_channels[idx - 1] * 2,\n                    in_channels[idx - 1],\n                    num_blocks=num_csp_blocks,\n                    add_identity=False,\n                    use_depthwise=use_depthwise,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n        # build bottom-up blocks\n        self.downsamples = nn.ModuleList()\n        self.bottom_up_blocks = nn.ModuleList()\n        for idx in range(len(in_channels) - 1):\n            self.downsamples.append(\n                conv(\n                    in_channels[idx],\n                    in_channels[idx],\n                    3,\n                    stride=2,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.bottom_up_blocks.append(\n                CSPLayer(\n                    in_channels[idx] * 2,\n                    in_channels[idx + 1],\n                    num_blocks=num_csp_blocks,\n                    add_identity=False,\n                    use_depthwise=use_depthwise,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n        self.out_convs = nn.ModuleList()\n        for i in range(len(in_channels)):\n            self.out_convs.append(\n                ConvModule(\n                    in_channels[i],\n                    out_channels,\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n    def forward(self, inputs):\n        \"\"\"\n        Args:\n            inputs (tuple[Tensor]): input features.\n\n        Returns:\n            tuple[Tensor]: YOLOXPAFPN features.\n        \"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # top-down path\n        inner_outs = [inputs[-1]]\n        for idx in range(len(self.in_channels) - 1, 0, -1):\n            feat_heigh = inner_outs[0]\n            feat_low = inputs[idx - 1]\n            feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](\n                feat_heigh)\n            inner_outs[0] = feat_heigh\n\n            upsample_feat = self.upsample(feat_heigh)\n\n            inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](\n                torch.cat([upsample_feat, feat_low], 1))\n            inner_outs.insert(0, inner_out)\n\n        # bottom-up path\n        outs = [inner_outs[0]]\n        for idx in range(len(self.in_channels) - 1):\n            feat_low = outs[-1]\n            feat_height = inner_outs[idx + 1]\n            downsample_feat = self.downsamples[idx](feat_low)\n            out = self.bottom_up_blocks[idx](\n                torch.cat([downsample_feat, feat_height], 1))\n            outs.append(out)\n\n        # out convs\n        for idx, conv in enumerate(self.out_convs):\n            outs[idx] = conv(outs[idx])\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/roi_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_roi_head import BaseRoIHead\nfrom .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,\n                         DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,\n                         Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)\nfrom .cascade_roi_head import CascadeRoIHead\nfrom .double_roi_head import DoubleHeadRoIHead\nfrom .dynamic_roi_head import DynamicRoIHead\nfrom .grid_roi_head import GridRoIHead\nfrom .htc_roi_head import HybridTaskCascadeRoIHead\nfrom .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,\n                         FusedSemanticHead, GlobalContextHead, GridHead,\n                         HTCMaskHead, MaskIoUHead, MaskPointHead,\n                         SCNetMaskHead, SCNetSemanticHead)\nfrom .mask_scoring_roi_head import MaskScoringRoIHead\nfrom .multi_instance_roi_head import MultiInstanceRoIHead\nfrom .pisa_roi_head import PISARoIHead\nfrom .point_rend_roi_head import PointRendRoIHead\nfrom .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,\n                             SingleRoIExtractor)\nfrom .scnet_roi_head import SCNetRoIHead\nfrom .shared_heads import ResLayer\nfrom .sparse_roi_head import SparseRoIHead\nfrom .standard_roi_head import StandardRoIHead\nfrom .trident_roi_head import TridentRoIHead\n\n__all__ = [\n    'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',\n    'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',\n    'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',\n    'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',\n    'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',\n    'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',\n    'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',\n    'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',\n    'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',\n    'FeatureRelayHead', 'GlobalContextHead', 'MultiInstanceRoIHead'\n]\n"
  },
  {
    "path": "mmdet/models/roi_heads/base_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Tuple\n\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList, OptConfigType, OptMultiConfig\n\n\nclass BaseRoIHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for RoIHeads.\"\"\"\n\n    def __init__(self,\n                 bbox_roi_extractor: OptMultiConfig = None,\n                 bbox_head: OptMultiConfig = None,\n                 mask_roi_extractor: OptMultiConfig = None,\n                 mask_head: OptMultiConfig = None,\n                 shared_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if shared_head is not None:\n            self.shared_head = MODELS.build(shared_head)\n\n        if bbox_head is not None:\n            self.init_bbox_head(bbox_roi_extractor, bbox_head)\n\n        if mask_head is not None:\n            self.init_mask_head(mask_roi_extractor, mask_head)\n\n        self.init_assigner_sampler()\n\n    @property\n    def with_bbox(self) -> bool:\n        \"\"\"bool: whether the RoI head contains a `bbox_head`\"\"\"\n        return hasattr(self, 'bbox_head') and self.bbox_head is not None\n\n    @property\n    def with_mask(self) -> bool:\n        \"\"\"bool: whether the RoI head contains a `mask_head`\"\"\"\n        return hasattr(self, 'mask_head') and self.mask_head is not None\n\n    @property\n    def with_shared_head(self) -> bool:\n        \"\"\"bool: whether the RoI head contains a `shared_head`\"\"\"\n        return hasattr(self, 'shared_head') and self.shared_head is not None\n\n    @abstractmethod\n    def init_bbox_head(self, *args, **kwargs):\n        \"\"\"Initialize ``bbox_head``\"\"\"\n        pass\n\n    @abstractmethod\n    def init_mask_head(self, *args, **kwargs):\n        \"\"\"Initialize ``mask_head``\"\"\"\n        pass\n\n    @abstractmethod\n    def init_assigner_sampler(self, *args, **kwargs):\n        \"\"\"Initialize assigner and sampler.\"\"\"\n        pass\n\n    @abstractmethod\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList):\n        \"\"\"Perform forward propagation and loss calculation of the roi head on\n        the features of the upstream network.\"\"\"\n\n    def predict(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the roi head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (N, C, H, W).\n            rpn_results_list (list[:obj:`InstanceData`]): list of region\n                proposals.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results to\n                the original image. Defaults to True.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        # TODO: nms_op in mmcv need be enhanced, the bbox result may get\n        #  difference when not rescale in bbox_head\n\n        # If it has the mask branch, the bbox branch does not need\n        # to be scaled to the original image scale, because the mask\n        # branch will scale both bbox and mask at the same time.\n        bbox_rescale = rescale if not self.with_mask else False\n        results_list = self.predict_bbox(\n            x,\n            batch_img_metas,\n            rpn_results_list,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=bbox_rescale)\n\n        if self.with_mask:\n            results_list = self.predict_mask(\n                x, batch_img_metas, results_list, rescale=rescale)\n\n        return results_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bbox_head import BBoxHead\nfrom .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,\n                               Shared4Conv1FCBBoxHead)\nfrom .dii_head import DIIHead\nfrom .double_bbox_head import DoubleConvFCBBoxHead\nfrom .multi_instance_bbox_head import MultiInstanceBBoxHead\nfrom .sabl_head import SABLHead\nfrom .scnet_bbox_head import SCNetBBoxHead\n\n__all__ = [\n    'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',\n    'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',\n    'SCNetBBoxHead', 'MultiInstanceBBoxHead'\n]\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.models.layers import multiclass_nms\nfrom mmdet.models.losses import accuracy\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.models.utils import empty_instances, multi_apply\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import get_box_tensor, scale_boxes\nfrom mmdet.utils import ConfigType, InstanceList, OptMultiConfig\n\n\n@MODELS.register_module()\nclass BBoxHead(BaseModule):\n    \"\"\"Simplest RoI head, with only two fc layers for classification and\n    regression respectively.\"\"\"\n\n    def __init__(self,\n                 with_avg_pool: bool = False,\n                 with_cls: bool = True,\n                 with_reg: bool = True,\n                 roi_feat_size: int = 7,\n                 in_channels: int = 256,\n                 num_classes: int = 80,\n                 bbox_coder: ConfigType = dict(\n                     type='DeltaXYWHBBoxCoder',\n                     clip_border=True,\n                     target_means=[0., 0., 0., 0.],\n                     target_stds=[0.1, 0.1, 0.2, 0.2]),\n                 predict_box_type: str = 'hbox',\n                 reg_class_agnostic: bool = False,\n                 reg_decoded_bbox: bool = False,\n                 reg_predictor_cfg: ConfigType = dict(type='Linear'),\n                 cls_predictor_cfg: ConfigType = dict(type='Linear'),\n                 loss_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=1.0),\n                 loss_bbox: ConfigType = dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1.0),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert with_cls or with_reg\n        self.with_avg_pool = with_avg_pool\n        self.with_cls = with_cls\n        self.with_reg = with_reg\n        self.roi_feat_size = _pair(roi_feat_size)\n        self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.predict_box_type = predict_box_type\n        self.reg_class_agnostic = reg_class_agnostic\n        self.reg_decoded_bbox = reg_decoded_bbox\n        self.reg_predictor_cfg = reg_predictor_cfg\n        self.cls_predictor_cfg = cls_predictor_cfg\n\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox = MODELS.build(loss_bbox)\n\n        in_channels = self.in_channels\n        if self.with_avg_pool:\n            self.avg_pool = nn.AvgPool2d(self.roi_feat_size)\n        else:\n            in_channels *= self.roi_feat_area\n        if self.with_cls:\n            # need to add background class\n            if self.custom_cls_channels:\n                cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n            else:\n                cls_channels = num_classes + 1\n            cls_predictor_cfg_ = self.cls_predictor_cfg.copy()\n            cls_predictor_cfg_.update(\n                in_features=in_channels, out_features=cls_channels)\n            self.fc_cls = MODELS.build(cls_predictor_cfg_)\n        if self.with_reg:\n            box_dim = self.bbox_coder.encode_size\n            out_dim_reg = box_dim if reg_class_agnostic else \\\n                box_dim * num_classes\n            reg_predictor_cfg_ = self.reg_predictor_cfg.copy()\n            if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):\n                reg_predictor_cfg_.update(\n                    in_features=in_channels, out_features=out_dim_reg)\n            self.fc_reg = MODELS.build(reg_predictor_cfg_)\n        self.debug_imgs = None\n        if init_cfg is None:\n            self.init_cfg = []\n            if self.with_cls:\n                self.init_cfg += [\n                    dict(\n                        type='Normal', std=0.01, override=dict(name='fc_cls'))\n                ]\n            if self.with_reg:\n                self.init_cfg += [\n                    dict(\n                        type='Normal', std=0.001, override=dict(name='fc_reg'))\n                ]\n\n    # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n    @property\n    def custom_cls_channels(self) -> bool:\n        \"\"\"get custom_cls_channels from loss_cls.\"\"\"\n        return getattr(self.loss_cls, 'custom_cls_channels', False)\n\n    # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n    @property\n    def custom_activation(self) -> bool:\n        \"\"\"get custom_activation from loss_cls.\"\"\"\n        return getattr(self.loss_cls, 'custom_activation', False)\n\n    # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n    @property\n    def custom_accuracy(self) -> bool:\n        \"\"\"get custom_accuracy from loss_cls.\"\"\"\n        return getattr(self.loss_cls, 'custom_accuracy', False)\n\n    def forward(self, x: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and bbox prediction.\n\n                - cls_score (Tensor): Classification scores for all\n                  scale levels, each is a 4D-tensor, the channels number\n                  is num_base_priors * num_classes.\n                - bbox_pred (Tensor): Box energies / deltas for all\n                  scale levels, each is a 4D-tensor, the channels number\n                  is num_base_priors * 4.\n        \"\"\"\n        if self.with_avg_pool:\n            if x.numel() > 0:\n                x = self.avg_pool(x)\n                x = x.view(x.size(0), -1)\n            else:\n                # avg_pool does not support empty tensor,\n                # so use torch.mean instead it\n                x = torch.mean(x, dim=(-1, -2))\n        cls_score = self.fc_cls(x) if self.with_cls else None\n        bbox_pred = self.fc_reg(x) if self.with_reg else None\n        return cls_score, bbox_pred\n\n    def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor,\n                            pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,\n                            cfg: ConfigDict) -> tuple:\n        \"\"\"Calculate the ground truth for proposals in the single image\n        according to the sampling results.\n\n        Args:\n            pos_priors (Tensor): Contains all the positive boxes,\n                has shape (num_pos, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            neg_priors (Tensor): Contains all the negative boxes,\n                has shape (num_neg, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_bboxes (Tensor): Contains gt_boxes for\n                all positive samples, has shape (num_pos, 4),\n                the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_labels (Tensor): Contains gt_labels for\n                all positive samples, has shape (num_pos, ).\n            cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals\n            in a single image. Containing the following Tensors:\n\n                - labels(Tensor): Gt_labels for all proposals, has\n                  shape (num_proposals,).\n                - label_weights(Tensor): Labels_weights for all\n                  proposals, has shape (num_proposals,).\n                - bbox_targets(Tensor):Regression target for all\n                  proposals, has shape (num_proposals, 4), the\n                  last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n                - bbox_weights(Tensor):Regression weights for all\n                  proposals, has shape (num_proposals, 4).\n        \"\"\"\n        num_pos = pos_priors.size(0)\n        num_neg = neg_priors.size(0)\n        num_samples = num_pos + num_neg\n\n        # original implementation uses new_zeros since BG are set to be 0\n        # now use empty & fill because BG cat_id = num_classes,\n        # FG cat_id = [0, num_classes-1]\n        labels = pos_priors.new_full((num_samples, ),\n                                     self.num_classes,\n                                     dtype=torch.long)\n        reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \\\n            else self.bbox_coder.encode_size\n        label_weights = pos_priors.new_zeros(num_samples)\n        bbox_targets = pos_priors.new_zeros(num_samples, reg_dim)\n        bbox_weights = pos_priors.new_zeros(num_samples, reg_dim)\n        if num_pos > 0:\n            labels[:num_pos] = pos_gt_labels\n            pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n            label_weights[:num_pos] = pos_weight\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    pos_priors, pos_gt_bboxes)\n            else:\n                # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n                # is applied directly on the decoded bounding boxes, both\n                # the predicted boxes and regression targets should be with\n                # absolute coordinate format.\n                pos_bbox_targets = get_box_tensor(pos_gt_bboxes)\n            bbox_targets[:num_pos, :] = pos_bbox_targets\n            bbox_weights[:num_pos, :] = 1\n        if num_neg > 0:\n            label_weights[-num_neg:] = 1.0\n\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def get_targets(self,\n                    sampling_results: List[SamplingResult],\n                    rcnn_train_cfg: ConfigDict,\n                    concat: bool = True) -> tuple:\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\n\n        Almost the same as the implementation in bbox_head, we passed\n        additional parameters pos_inds_list and neg_inds_list to\n        `_get_targets_single` function.\n\n        Args:\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following list of Tensors:\n\n            - labels (list[Tensor],Tensor): Gt_labels for all\n                proposals in a batch, each tensor in list has\n                shape (num_proposals,) when `concat=False`, otherwise\n                just a single tensor has shape (num_all_proposals,).\n            - label_weights (list[Tensor]): Labels_weights for\n                all proposals in a batch, each tensor in list has\n                shape (num_proposals,) when `concat=False`, otherwise\n                just a single tensor has shape (num_all_proposals,).\n            - bbox_targets (list[Tensor],Tensor): Regression target\n                for all proposals in a batch, each tensor in list\n                has shape (num_proposals, 4) when `concat=False`,\n                otherwise just a single tensor has shape\n                (num_all_proposals, 4), the last dimension 4 represents\n                [tl_x, tl_y, br_x, br_y].\n            - bbox_weights (list[tensor],Tensor): Regression weights for\n                all proposals in a batch, each tensor in list has shape\n                (num_proposals, 4) when `concat=False`, otherwise just a\n                single tensor has shape (num_all_proposals, 4).\n        \"\"\"\n        pos_priors_list = [res.pos_priors for res in sampling_results]\n        neg_priors_list = [res.neg_priors for res in sampling_results]\n        pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n        pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n        labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n            self._get_targets_single,\n            pos_priors_list,\n            neg_priors_list,\n            pos_gt_bboxes_list,\n            pos_gt_labels_list,\n            cfg=rcnn_train_cfg)\n\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bbox_targets = torch.cat(bbox_targets, 0)\n            bbox_weights = torch.cat(bbox_weights, 0)\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def loss_and_target(self,\n                        cls_score: Tensor,\n                        bbox_pred: Tensor,\n                        rois: Tensor,\n                        sampling_results: List[SamplingResult],\n                        rcnn_train_cfg: ConfigDict,\n                        concat: bool = True,\n                        reduction_override: Optional[str] = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the bbox head.\n\n        Args:\n            cls_score (Tensor): Classification prediction\n                results of all class, has shape\n                (batch_size * num_proposals_single_image, num_classes)\n            bbox_pred (Tensor): Regression prediction results,\n                has shape\n                (batch_size * num_proposals_single_image, 4), the last\n                dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            rois (Tensor): RoIs with the shape\n                (batch_size * num_proposals_single_image, 5) where the first\n                column indicates batch id of each RoI.\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch. Defaults to True.\n            reduction_override (str, optional): The reduction\n                method used to override the original reduction\n                method of the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to None,\n\n        Returns:\n            dict: A dictionary of loss and targets components.\n                The targets are only used for cascade rcnn.\n        \"\"\"\n\n        cls_reg_targets = self.get_targets(\n            sampling_results, rcnn_train_cfg, concat=concat)\n        losses = self.loss(\n            cls_score,\n            bbox_pred,\n            rois,\n            *cls_reg_targets,\n            reduction_override=reduction_override)\n\n        # cls_reg_targets is only for cascade rcnn\n        return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)\n\n    def loss(self,\n             cls_score: Tensor,\n             bbox_pred: Tensor,\n             rois: Tensor,\n             labels: Tensor,\n             label_weights: Tensor,\n             bbox_targets: Tensor,\n             bbox_weights: Tensor,\n             reduction_override: Optional[str] = None) -> dict:\n        \"\"\"Calculate the loss based on the network predictions and targets.\n\n        Args:\n            cls_score (Tensor): Classification prediction\n                results of all class, has shape\n                (batch_size * num_proposals_single_image, num_classes)\n            bbox_pred (Tensor): Regression prediction results,\n                has shape\n                (batch_size * num_proposals_single_image, 4), the last\n                dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            rois (Tensor): RoIs with the shape\n                (batch_size * num_proposals_single_image, 5) where the first\n                column indicates batch id of each RoI.\n            labels (Tensor): Gt_labels for all proposals in a batch, has\n                shape (batch_size * num_proposals_single_image, ).\n            label_weights (Tensor): Labels_weights for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image, ).\n            bbox_targets (Tensor): Regression target for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image, 4),\n                the last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            bbox_weights (Tensor): Regression weights for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image, 4).\n            reduction_override (str, optional): The reduction\n                method used to override the original reduction\n                method of the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to None,\n\n        Returns:\n            dict: A dictionary of loss.\n        \"\"\"\n\n        losses = dict()\n\n        if cls_score is not None:\n            avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n            if cls_score.numel() > 0:\n                loss_cls_ = self.loss_cls(\n                    cls_score,\n                    labels,\n                    label_weights,\n                    avg_factor=avg_factor,\n                    reduction_override=reduction_override)\n                if isinstance(loss_cls_, dict):\n                    losses.update(loss_cls_)\n                else:\n                    losses['loss_cls'] = loss_cls_\n                if self.custom_activation:\n                    acc_ = self.loss_cls.get_accuracy(cls_score, labels)\n                    losses.update(acc_)\n                else:\n                    losses['acc'] = accuracy(cls_score, labels)\n        if bbox_pred is not None:\n            bg_class_ind = self.num_classes\n            # 0~self.num_classes-1 are FG, self.num_classes is BG\n            pos_inds = (labels >= 0) & (labels < bg_class_ind)\n            # do not perform bounding box regression for BG anymore.\n            if pos_inds.any():\n                if self.reg_decoded_bbox:\n                    # When the regression loss (e.g. `IouLoss`,\n                    # `GIouLoss`, `DIouLoss`) is applied directly on\n                    # the decoded bounding boxes, it decodes the\n                    # already encoded coordinates to absolute format.\n                    bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)\n                    bbox_pred = get_box_tensor(bbox_pred)\n                if self.reg_class_agnostic:\n                    pos_bbox_pred = bbox_pred.view(\n                        bbox_pred.size(0), -1)[pos_inds.type(torch.bool)]\n                else:\n                    pos_bbox_pred = bbox_pred.view(\n                        bbox_pred.size(0), self.num_classes,\n                        -1)[pos_inds.type(torch.bool),\n                            labels[pos_inds.type(torch.bool)]]\n                losses['loss_bbox'] = self.loss_bbox(\n                    pos_bbox_pred,\n                    bbox_targets[pos_inds.type(torch.bool)],\n                    bbox_weights[pos_inds.type(torch.bool)],\n                    avg_factor=bbox_targets.size(0),\n                    reduction_override=reduction_override)\n            else:\n                losses['loss_bbox'] = bbox_pred[pos_inds].sum()\n\n        return losses\n\n    def predict_by_feat(self,\n                        rois: Tuple[Tensor],\n                        cls_scores: Tuple[Tensor],\n                        bbox_preds: Tuple[Tensor],\n                        batch_img_metas: List[dict],\n                        rcnn_test_cfg: Optional[ConfigDict] = None,\n                        rescale: bool = False) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        bbox results.\n\n        Args:\n            rois (tuple[Tensor]): Tuple of boxes to be transformed.\n                Each has shape  (num_boxes, 5). last dimension 5 arrange as\n                (batch_index, x1, y1, x2, y2).\n            cls_scores (tuple[Tensor]): Tuple of box scores, each has shape\n                (num_boxes, num_classes + 1).\n            bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each\n                has shape (num_boxes, num_classes * 4).\n            batch_img_metas (list[dict]): List of image information.\n            rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Instance segmentation\n            results of each image after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n        result_list = []\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            results = self._predict_by_feat_single(\n                roi=rois[img_id],\n                cls_score=cls_scores[img_id],\n                bbox_pred=bbox_preds[img_id],\n                img_meta=img_meta,\n                rescale=rescale,\n                rcnn_test_cfg=rcnn_test_cfg)\n            result_list.append(results)\n\n        return result_list\n\n    def _predict_by_feat_single(\n            self,\n            roi: Tensor,\n            cls_score: Tensor,\n            bbox_pred: Tensor,\n            img_meta: dict,\n            rescale: bool = False,\n            rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n                last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n            cls_score (Tensor): Box scores, has shape\n                (num_boxes, num_classes + 1).\n            bbox_pred (Tensor): Box energies / deltas.\n                has shape (num_boxes, num_classes * 4).\n            img_meta (dict): image information.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n                Defaults to None\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\\\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        results = InstanceData()\n        if roi.shape[0] == 0:\n            return empty_instances([img_meta],\n                                   roi.device,\n                                   task_type='bbox',\n                                   instance_results=[results],\n                                   box_type=self.predict_box_type,\n                                   use_box_type=False,\n                                   num_classes=self.num_classes,\n                                   score_per_cls=rcnn_test_cfg is None)[0]\n\n        # some loss (Seesaw loss..) may have custom activation\n        if self.custom_cls_channels:\n            scores = self.loss_cls.get_activation(cls_score)\n        else:\n            scores = F.softmax(\n                cls_score, dim=-1) if cls_score is not None else None\n\n        img_shape = img_meta['img_shape']\n        num_rois = roi.size(0)\n        # bbox_pred would be None in some detector when with_reg is False,\n        # e.g. Grid R-CNN.\n        if bbox_pred is not None:\n            num_classes = 1 if self.reg_class_agnostic else self.num_classes\n            roi = roi.repeat_interleave(num_classes, dim=0)\n            bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)\n            bboxes = self.bbox_coder.decode(\n                roi[..., 1:], bbox_pred, max_shape=img_shape)\n        else:\n            bboxes = roi[:, 1:].clone()\n            if img_shape is not None and bboxes.size(-1) == 4:\n                bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])\n                bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])\n\n        if rescale and bboxes.size(0) > 0:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = [1 / s for s in img_meta['scale_factor']]\n            bboxes = scale_boxes(bboxes, scale_factor)\n\n        # Get the inside tensor when `bboxes` is a box type\n        bboxes = get_box_tensor(bboxes)\n        box_dim = bboxes.size(-1)\n        bboxes = bboxes.view(num_rois, -1)\n\n        if rcnn_test_cfg is None:\n            # This means that it is aug test.\n            # It needs to return the raw results without nms.\n            results.bboxes = bboxes\n            results.scores = scores\n        else:\n            det_bboxes, det_labels = multiclass_nms(\n                bboxes,\n                scores,\n                rcnn_test_cfg.score_thr,\n                rcnn_test_cfg.nms,\n                rcnn_test_cfg.max_per_img,\n                box_dim=box_dim)\n            results.bboxes = det_bboxes[:, :-1]\n            results.scores = det_bboxes[:, -1]\n            results.labels = det_labels\n        return results\n\n    def refine_bboxes(self, sampling_results: Union[List[SamplingResult],\n                                                    InstanceList],\n                      bbox_results: dict,\n                      batch_img_metas: List[dict]) -> InstanceList:\n        \"\"\"Refine bboxes during training.\n\n        Args:\n            sampling_results (List[:obj:`SamplingResult`] or\n                List[:obj:`InstanceData`]): Sampling results.\n                :obj:`SamplingResult` is the real sampling results\n                calculate from bbox_head, while :obj:`InstanceData` is\n                fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc.\n            bbox_results (dict): Usually is a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n                  column indicates batch id of each RoI.\n                - `bbox_targets` (tuple):  Ground truth for proposals in a\n                  single image. Containing the following list of Tensors:\n                  (labels, label_weights, bbox_targets, bbox_weights)\n            batch_img_metas (List[dict]): List of image information.\n\n        Returns:\n            list[:obj:`InstanceData`]: Refined bboxes of each image.\n\n        Example:\n            >>> # xdoctest: +REQUIRES(module:kwarray)\n            >>> import numpy as np\n            >>> from mmdet.models.task_modules.samplers.\n            ... sampling_result import random_boxes\n            >>> from mmdet.models.task_modules.samplers import SamplingResult\n            >>> self = BBoxHead(reg_class_agnostic=True)\n            >>> n_roi = 2\n            >>> n_img = 4\n            >>> scale = 512\n            >>> rng = np.random.RandomState(0)\n            ... batch_img_metas = [{'img_shape': (scale, scale)}\n            >>>                     for _ in range(n_img)]\n            >>> sampling_results = [SamplingResult.random(rng=10)\n            ...                     for _ in range(n_img)]\n            >>> # Create rois in the expected format\n            >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)\n            >>> img_ids = torch.randint(0, n_img, (n_roi,))\n            >>> img_ids = img_ids.float()\n            >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)\n            >>> # Create other args\n            >>> labels = torch.randint(0, 81, (scale,)).long()\n            >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)\n            >>> cls_score = torch.randn((scale, 81))\n            ... # For each image, pretend random positive boxes are gts\n            >>> bbox_targets = (labels, None, None, None)\n            ... bbox_results = dict(rois=rois, bbox_pred=bbox_preds,\n            ...                     cls_score=cls_score,\n            ...                     bbox_targets=bbox_targets)\n            >>> bboxes_list = self.refine_bboxes(sampling_results,\n            ...                                  bbox_results,\n            ...                                  batch_img_metas)\n            >>> print(bboxes_list)\n        \"\"\"\n        pos_is_gts = [res.pos_is_gt for res in sampling_results]\n        # bbox_targets is a tuple\n        labels = bbox_results['bbox_targets'][0]\n        cls_scores = bbox_results['cls_score']\n        rois = bbox_results['rois']\n        bbox_preds = bbox_results['bbox_pred']\n        if self.custom_activation:\n            # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n            cls_scores = self.loss_cls.get_activation(cls_scores)\n        if cls_scores.numel() == 0:\n            return None\n        if cls_scores.shape[-1] == self.num_classes + 1:\n            # remove background class\n            cls_scores = cls_scores[:, :-1]\n        elif cls_scores.shape[-1] != self.num_classes:\n            raise ValueError('The last dim of `cls_scores` should equal to '\n                             '`num_classes` or `num_classes + 1`,'\n                             f'but got {cls_scores.shape[-1]}.')\n        labels = torch.where(labels == self.num_classes, cls_scores.argmax(1),\n                             labels)\n\n        img_ids = rois[:, 0].long().unique(sorted=True)\n        assert img_ids.numel() <= len(batch_img_metas)\n\n        results_list = []\n        for i in range(len(batch_img_metas)):\n            inds = torch.nonzero(\n                rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n            num_rois = inds.numel()\n\n            bboxes_ = rois[inds, 1:]\n            label_ = labels[inds]\n            bbox_pred_ = bbox_preds[inds]\n            img_meta_ = batch_img_metas[i]\n            pos_is_gts_ = pos_is_gts[i]\n\n            bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n                                           img_meta_)\n            # filter gt bboxes\n            pos_keep = 1 - pos_is_gts_\n            keep_inds = pos_is_gts_.new_ones(num_rois)\n            keep_inds[:len(pos_is_gts_)] = pos_keep\n            results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])\n            results_list.append(results)\n\n        return results_list\n\n    def regress_by_class(self, priors: Tensor, label: Tensor,\n                         bbox_pred: Tensor, img_meta: dict) -> Tensor:\n        \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n        Args:\n            priors (Tensor): Priors from `rpn_head` or last stage\n                `bbox_head`, has shape (num_proposals, 4).\n            label (Tensor): Only used when `self.reg_class_agnostic`\n                is False, has shape (num_proposals, ).\n            bbox_pred (Tensor): Regression prediction of\n                current stage `bbox_head`. When `self.reg_class_agnostic`\n                is False, it has shape (n, num_classes * 4), otherwise\n                it has shape (n, 4).\n            img_meta (dict): Image meta info.\n\n        Returns:\n            Tensor: Regressed bboxes, the same shape as input rois.\n        \"\"\"\n        reg_dim = self.bbox_coder.encode_size\n        if not self.reg_class_agnostic:\n            label = label * reg_dim\n            inds = torch.stack([label + i for i in range(reg_dim)], 1)\n            bbox_pred = torch.gather(bbox_pred, 1, inds)\n        assert bbox_pred.size()[1] == reg_dim\n\n        max_shape = img_meta['img_shape']\n        regressed_bboxes = self.bbox_coder.decode(\n            priors, bbox_pred, max_shape=max_shape)\n        return regressed_bboxes\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, Union\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.config import ConfigDict\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .bbox_head import BBoxHead\n\n\n@MODELS.register_module()\nclass ConvFCBBoxHead(BBoxHead):\n    r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n    separated branches.\n\n    .. code-block:: none\n\n                                    /-> cls convs -> cls fcs -> cls\n        shared convs -> shared fcs\n                                    \\-> reg convs -> reg fcs -> reg\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_shared_convs: int = 0,\n                 num_shared_fcs: int = 0,\n                 num_cls_convs: int = 0,\n                 num_cls_fcs: int = 0,\n                 num_reg_convs: int = 0,\n                 num_reg_fcs: int = 0,\n                 conv_out_channels: int = 256,\n                 fc_out_channels: int = 1024,\n                 conv_cfg: Optional[Union[dict, ConfigDict]] = None,\n                 norm_cfg: Optional[Union[dict, ConfigDict]] = None,\n                 init_cfg: Optional[Union[dict, ConfigDict]] = None,\n                 *args,\n                 **kwargs) -> None:\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n        assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n                num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n        if num_cls_convs > 0 or num_reg_convs > 0:\n            assert num_shared_fcs == 0\n        if not self.with_cls:\n            assert num_cls_convs == 0 and num_cls_fcs == 0\n        if not self.with_reg:\n            assert num_reg_convs == 0 and num_reg_fcs == 0\n        self.num_shared_convs = num_shared_convs\n        self.num_shared_fcs = num_shared_fcs\n        self.num_cls_convs = num_cls_convs\n        self.num_cls_fcs = num_cls_fcs\n        self.num_reg_convs = num_reg_convs\n        self.num_reg_fcs = num_reg_fcs\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        # add shared convs and fcs\n        self.shared_convs, self.shared_fcs, last_layer_dim = \\\n            self._add_conv_fc_branch(\n                self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n                True)\n        self.shared_out_channels = last_layer_dim\n\n        # add cls specific branch\n        self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n            self._add_conv_fc_branch(\n                self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n        # add reg specific branch\n        self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n            self._add_conv_fc_branch(\n                self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n        if self.num_shared_fcs == 0 and not self.with_avg_pool:\n            if self.num_cls_fcs == 0:\n                self.cls_last_dim *= self.roi_feat_area\n            if self.num_reg_fcs == 0:\n                self.reg_last_dim *= self.roi_feat_area\n\n        self.relu = nn.ReLU(inplace=True)\n        # reconstruct fc_cls and fc_reg since input channels are changed\n        if self.with_cls:\n            if self.custom_cls_channels:\n                cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n            else:\n                cls_channels = self.num_classes + 1\n            cls_predictor_cfg_ = self.cls_predictor_cfg.copy()\n            cls_predictor_cfg_.update(\n                in_features=self.cls_last_dim, out_features=cls_channels)\n            self.fc_cls = MODELS.build(cls_predictor_cfg_)\n        if self.with_reg:\n            box_dim = self.bbox_coder.encode_size\n            out_dim_reg = box_dim if self.reg_class_agnostic else \\\n                box_dim * self.num_classes\n            reg_predictor_cfg_ = self.reg_predictor_cfg.copy()\n            if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):\n                reg_predictor_cfg_.update(\n                    in_features=self.reg_last_dim, out_features=out_dim_reg)\n            self.fc_reg = MODELS.build(reg_predictor_cfg_)\n\n        if init_cfg is None:\n            # when init_cfg is None,\n            # It has been set to\n            # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],\n            #  [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]\n            # after `super(ConvFCBBoxHead, self).__init__()`\n            # we only need to append additional configuration\n            # for `shared_fcs`, `cls_fcs` and `reg_fcs`\n            self.init_cfg += [\n                dict(\n                    type='Xavier',\n                    distribution='uniform',\n                    override=[\n                        dict(name='shared_fcs'),\n                        dict(name='cls_fcs'),\n                        dict(name='reg_fcs')\n                    ])\n            ]\n\n    def _add_conv_fc_branch(self,\n                            num_branch_convs: int,\n                            num_branch_fcs: int,\n                            in_channels: int,\n                            is_shared: bool = False) -> tuple:\n        \"\"\"Add shared or separable branch.\n\n        convs -> avg pool (optional) -> fcs\n        \"\"\"\n        last_layer_dim = in_channels\n        # add branch specific conv layers\n        branch_convs = nn.ModuleList()\n        if num_branch_convs > 0:\n            for i in range(num_branch_convs):\n                conv_in_channels = (\n                    last_layer_dim if i == 0 else self.conv_out_channels)\n                branch_convs.append(\n                    ConvModule(\n                        conv_in_channels,\n                        self.conv_out_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n            last_layer_dim = self.conv_out_channels\n        # add branch specific fc layers\n        branch_fcs = nn.ModuleList()\n        if num_branch_fcs > 0:\n            # for shared branch, only consider self.with_avg_pool\n            # for separated branches, also consider self.num_shared_fcs\n            if (is_shared\n                    or self.num_shared_fcs == 0) and not self.with_avg_pool:\n                last_layer_dim *= self.roi_feat_area\n            for i in range(num_branch_fcs):\n                fc_in_channels = (\n                    last_layer_dim if i == 0 else self.fc_out_channels)\n                branch_fcs.append(\n                    nn.Linear(fc_in_channels, self.fc_out_channels))\n            last_layer_dim = self.fc_out_channels\n        return branch_convs, branch_fcs, last_layer_dim\n\n    def forward(self, x: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and bbox prediction.\n\n                - cls_score (Tensor): Classification scores for all \\\n                    scale levels, each is a 4D-tensor, the channels number \\\n                    is num_base_priors * num_classes.\n                - bbox_pred (Tensor): Box energies / deltas for all \\\n                    scale levels, each is a 4D-tensor, the channels number \\\n                    is num_base_priors * 4.\n        \"\"\"\n        # shared part\n        if self.num_shared_convs > 0:\n            for conv in self.shared_convs:\n                x = conv(x)\n\n        if self.num_shared_fcs > 0:\n            if self.with_avg_pool:\n                x = self.avg_pool(x)\n\n            x = x.flatten(1)\n\n            for fc in self.shared_fcs:\n                x = self.relu(fc(x))\n        # separate branches\n        x_cls = x\n        x_reg = x\n\n        for conv in self.cls_convs:\n            x_cls = conv(x_cls)\n        if x_cls.dim() > 2:\n            if self.with_avg_pool:\n                x_cls = self.avg_pool(x_cls)\n            x_cls = x_cls.flatten(1)\n        for fc in self.cls_fcs:\n            x_cls = self.relu(fc(x_cls))\n\n        for conv in self.reg_convs:\n            x_reg = conv(x_reg)\n        if x_reg.dim() > 2:\n            if self.with_avg_pool:\n                x_reg = self.avg_pool(x_reg)\n            x_reg = x_reg.flatten(1)\n        for fc in self.reg_fcs:\n            x_reg = self.relu(fc(x_reg))\n\n        cls_score = self.fc_cls(x_cls) if self.with_cls else None\n        bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n        return cls_score, bbox_pred\n\n\n@MODELS.register_module()\nclass Shared2FCBBoxHead(ConvFCBBoxHead):\n\n    def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None:\n        super().__init__(\n            num_shared_convs=0,\n            num_shared_fcs=2,\n            num_cls_convs=0,\n            num_cls_fcs=0,\n            num_reg_convs=0,\n            num_reg_fcs=0,\n            fc_out_channels=fc_out_channels,\n            *args,\n            **kwargs)\n\n\n@MODELS.register_module()\nclass Shared4Conv1FCBBoxHead(ConvFCBBoxHead):\n\n    def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None:\n        super().__init__(\n            num_shared_convs=4,\n            num_shared_fcs=1,\n            num_cls_convs=0,\n            num_cls_fcs=0,\n            num_reg_convs=0,\n            num_reg_fcs=0,\n            fc_out_channels=fc_out_channels,\n            *args,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/dii_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import build_activation_layer, build_norm_layer\nfrom mmcv.cnn.bricks.transformer import FFN, MultiheadAttention\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import bias_init_with_prob\nfrom torch import Tensor\n\nfrom mmdet.models.losses import accuracy\nfrom mmdet.models.task_modules import SamplingResult\nfrom mmdet.models.utils import multi_apply\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, reduce_mean\nfrom .bbox_head import BBoxHead\n\n\n@MODELS.register_module()\nclass DIIHead(BBoxHead):\n    r\"\"\"Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object\n    Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_\n\n    Args:\n        num_classes (int): Number of class in dataset.\n            Defaults to 80.\n        num_ffn_fcs (int): The number of fully-connected\n            layers in FFNs. Defaults to 2.\n        num_heads (int): The hidden dimension of FFNs.\n            Defaults to 8.\n        num_cls_fcs (int): The number of fully-connected\n            layers in classification subnet. Defaults to 1.\n        num_reg_fcs (int): The number of fully-connected\n            layers in regression subnet. Defaults to 3.\n        feedforward_channels (int): The hidden dimension\n            of FFNs. Defaults to 2048\n        in_channels (int): Hidden_channels of MultiheadAttention.\n            Defaults to 256.\n        dropout (float): Probability of drop the channel.\n            Defaults to 0.0\n        ffn_act_cfg (:obj:`ConfigDict` or dict): The activation config\n            for FFNs.\n        dynamic_conv_cfg (:obj:`ConfigDict` or dict): The convolution\n            config for DynamicConv.\n        loss_iou (:obj:`ConfigDict` or dict): The config for iou or\n            giou loss.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int = 80,\n                 num_ffn_fcs: int = 2,\n                 num_heads: int = 8,\n                 num_cls_fcs: int = 1,\n                 num_reg_fcs: int = 3,\n                 feedforward_channels: int = 2048,\n                 in_channels: int = 256,\n                 dropout: float = 0.0,\n                 ffn_act_cfg: ConfigType = dict(type='ReLU', inplace=True),\n                 dynamic_conv_cfg: ConfigType = dict(\n                     type='DynamicConv',\n                     in_channels=256,\n                     feat_channels=64,\n                     out_channels=256,\n                     input_feat_shape=7,\n                     act_cfg=dict(type='ReLU', inplace=True),\n                     norm_cfg=dict(type='LN')),\n                 loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0),\n                 init_cfg: OptConfigType = None,\n                 **kwargs) -> None:\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(\n            num_classes=num_classes,\n            reg_decoded_bbox=True,\n            reg_class_agnostic=True,\n            init_cfg=init_cfg,\n            **kwargs)\n        self.loss_iou = MODELS.build(loss_iou)\n        self.in_channels = in_channels\n        self.fp16_enabled = False\n        self.attention = MultiheadAttention(in_channels, num_heads, dropout)\n        self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]\n\n        self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg)\n        self.instance_interactive_conv_dropout = nn.Dropout(dropout)\n        self.instance_interactive_conv_norm = build_norm_layer(\n            dict(type='LN'), in_channels)[1]\n\n        self.ffn = FFN(\n            in_channels,\n            feedforward_channels,\n            num_ffn_fcs,\n            act_cfg=ffn_act_cfg,\n            dropout=dropout)\n        self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]\n\n        self.cls_fcs = nn.ModuleList()\n        for _ in range(num_cls_fcs):\n            self.cls_fcs.append(\n                nn.Linear(in_channels, in_channels, bias=False))\n            self.cls_fcs.append(\n                build_norm_layer(dict(type='LN'), in_channels)[1])\n            self.cls_fcs.append(\n                build_activation_layer(dict(type='ReLU', inplace=True)))\n\n        # over load the self.fc_cls in BBoxHead\n        if self.loss_cls.use_sigmoid:\n            self.fc_cls = nn.Linear(in_channels, self.num_classes)\n        else:\n            self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)\n\n        self.reg_fcs = nn.ModuleList()\n        for _ in range(num_reg_fcs):\n            self.reg_fcs.append(\n                nn.Linear(in_channels, in_channels, bias=False))\n            self.reg_fcs.append(\n                build_norm_layer(dict(type='LN'), in_channels)[1])\n            self.reg_fcs.append(\n                build_activation_layer(dict(type='ReLU', inplace=True)))\n        # over load the self.fc_cls in BBoxHead\n        self.fc_reg = nn.Linear(in_channels, 4)\n\n        assert self.reg_class_agnostic, 'DIIHead only ' \\\n            'suppport `reg_class_agnostic=True` '\n        assert self.reg_decoded_bbox, 'DIIHead only ' \\\n            'suppport `reg_decoded_bbox=True`'\n\n    def init_weights(self) -> None:\n        \"\"\"Use xavier initialization for all weight parameter and set\n        classification head bias as a specific value when use focal loss.\"\"\"\n        super().init_weights()\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n            else:\n                # adopt the default initialization for\n                # the weight and bias of the layer norm\n                pass\n        if self.loss_cls.use_sigmoid:\n            bias_init = bias_init_with_prob(0.01)\n            nn.init.constant_(self.fc_cls.bias, bias_init)\n\n    def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> tuple:\n        \"\"\"Forward function of Dynamic Instance Interactive Head.\n\n        Args:\n            roi_feat (Tensor): Roi-pooling features with shape\n                (batch_size*num_proposals, feature_dimensions,\n                pooling_h , pooling_w).\n            proposal_feat (Tensor): Intermediate feature get from\n                diihead in last stage, has shape\n                (batch_size, num_proposals, feature_dimensions)\n\n        Returns:\n            tuple[Tensor]: Usually a tuple of classification scores\n            and bbox prediction and a intermediate feature.\n\n            - cls_scores (Tensor): Classification scores for\n              all proposals, has shape\n              (batch_size, num_proposals, num_classes).\n            - bbox_preds (Tensor): Box energies / deltas for\n              all proposals, has shape\n              (batch_size, num_proposals, 4).\n            - obj_feat (Tensor): Object feature before classification\n              and regression subnet, has shape\n              (batch_size, num_proposal, feature_dimensions).\n            - attn_feats (Tensor): Intermediate feature.\n        \"\"\"\n        N, num_proposals = proposal_feat.shape[:2]\n\n        # Self attention\n        proposal_feat = proposal_feat.permute(1, 0, 2)\n        proposal_feat = self.attention_norm(self.attention(proposal_feat))\n        attn_feats = proposal_feat.permute(1, 0, 2)\n\n        # instance interactive\n        proposal_feat = attn_feats.reshape(-1, self.in_channels)\n        proposal_feat_iic = self.instance_interactive_conv(\n            proposal_feat, roi_feat)\n        proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(\n            proposal_feat_iic)\n        obj_feat = self.instance_interactive_conv_norm(proposal_feat)\n\n        # FFN\n        obj_feat = self.ffn_norm(self.ffn(obj_feat))\n\n        cls_feat = obj_feat\n        reg_feat = obj_feat\n\n        for cls_layer in self.cls_fcs:\n            cls_feat = cls_layer(cls_feat)\n        for reg_layer in self.reg_fcs:\n            reg_feat = reg_layer(reg_feat)\n\n        cls_score = self.fc_cls(cls_feat).view(\n            N, num_proposals, self.num_classes\n            if self.loss_cls.use_sigmoid else self.num_classes + 1)\n        bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4)\n\n        return cls_score, bbox_delta, obj_feat.view(\n            N, num_proposals, self.in_channels), attn_feats\n\n    def loss_and_target(self,\n                        cls_score: Tensor,\n                        bbox_pred: Tensor,\n                        sampling_results: List[SamplingResult],\n                        rcnn_train_cfg: ConfigType,\n                        imgs_whwh: Tensor,\n                        concat: bool = True,\n                        reduction_override: str = None) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the DIIHead.\n\n        Args:\n            cls_score (Tensor): Classification prediction\n                results of all class, has shape\n                (batch_size * num_proposals_single_image, num_classes)\n            bbox_pred (Tensor): Regression prediction results, has shape\n                (batch_size * num_proposals_single_image, 4), the last\n                dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n            imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\\\n                shape (batch_size, num_proposals, 4), the last\n                dimension means\n                [img_width,img_height, img_width, img_height].\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch. Defaults to True.\n            reduction_override (str, optional): The reduction\n                method used to override the original reduction\n                method of the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to None.\n\n        Returns:\n            dict: A dictionary of loss and targets components.\n            The targets are only used for cascade rcnn.\n        \"\"\"\n        cls_reg_targets = self.get_targets(\n            sampling_results=sampling_results,\n            rcnn_train_cfg=rcnn_train_cfg,\n            concat=concat)\n        (labels, label_weights, bbox_targets, bbox_weights) = cls_reg_targets\n\n        losses = dict()\n        bg_class_ind = self.num_classes\n        # note in spare rcnn num_gt == num_pos\n        pos_inds = (labels >= 0) & (labels < bg_class_ind)\n        num_pos = pos_inds.sum().float()\n        avg_factor = reduce_mean(num_pos)\n        if cls_score is not None:\n            if cls_score.numel() > 0:\n                losses['loss_cls'] = self.loss_cls(\n                    cls_score,\n                    labels,\n                    label_weights,\n                    avg_factor=avg_factor,\n                    reduction_override=reduction_override)\n                losses['pos_acc'] = accuracy(cls_score[pos_inds],\n                                             labels[pos_inds])\n        if bbox_pred is not None:\n            # 0~self.num_classes-1 are FG, self.num_classes is BG\n            # do not perform bounding box regression for BG anymore.\n            if pos_inds.any():\n                pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),\n                                                  4)[pos_inds.type(torch.bool)]\n                imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),\n                                              4)[pos_inds.type(torch.bool)]\n                losses['loss_bbox'] = self.loss_bbox(\n                    pos_bbox_pred / imgs_whwh,\n                    bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,\n                    bbox_weights[pos_inds.type(torch.bool)],\n                    avg_factor=avg_factor)\n                losses['loss_iou'] = self.loss_iou(\n                    pos_bbox_pred,\n                    bbox_targets[pos_inds.type(torch.bool)],\n                    bbox_weights[pos_inds.type(torch.bool)],\n                    avg_factor=avg_factor)\n            else:\n                losses['loss_bbox'] = bbox_pred.sum() * 0\n                losses['loss_iou'] = bbox_pred.sum() * 0\n        return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)\n\n    def _get_targets_single(self, pos_inds: Tensor, neg_inds: Tensor,\n                            pos_priors: Tensor, neg_priors: Tensor,\n                            pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,\n                            cfg: ConfigDict) -> tuple:\n        \"\"\"Calculate the ground truth for proposals in the single image\n        according to the sampling results.\n\n        Almost the same as the implementation in `bbox_head`,\n        we add pos_inds and neg_inds to select positive and\n        negative samples instead of selecting the first num_pos\n        as positive samples.\n\n        Args:\n            pos_inds (Tensor): The length is equal to the\n                positive sample numbers contain all index\n                of the positive sample in the origin proposal set.\n            neg_inds (Tensor): The length is equal to the\n                negative sample numbers contain all index\n                of the negative sample in the origin proposal set.\n            pos_priors (Tensor): Contains all the positive boxes,\n                has shape (num_pos, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            neg_priors (Tensor): Contains all the negative boxes,\n                has shape (num_neg, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_bboxes (Tensor): Contains gt_boxes for\n                all positive samples, has shape (num_pos, 4),\n                the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_labels (Tensor): Contains gt_labels for\n                all positive samples, has shape (num_pos, ).\n            cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following Tensors:\n\n            - labels(Tensor): Gt_labels for all proposals, has\n              shape (num_proposals,).\n            - label_weights(Tensor): Labels_weights for all proposals, has\n              shape (num_proposals,).\n            - bbox_targets(Tensor):Regression target for all proposals, has\n              shape (num_proposals, 4), the last dimension 4\n              represents [tl_x, tl_y, br_x, br_y].\n            - bbox_weights(Tensor):Regression weights for all proposals,\n              has shape (num_proposals, 4).\n        \"\"\"\n        num_pos = pos_priors.size(0)\n        num_neg = neg_priors.size(0)\n        num_samples = num_pos + num_neg\n\n        # original implementation uses new_zeros since BG are set to be 0\n        # now use empty & fill because BG cat_id = num_classes,\n        # FG cat_id = [0, num_classes-1]\n        labels = pos_priors.new_full((num_samples, ),\n                                     self.num_classes,\n                                     dtype=torch.long)\n        label_weights = pos_priors.new_zeros(num_samples)\n        bbox_targets = pos_priors.new_zeros(num_samples, 4)\n        bbox_weights = pos_priors.new_zeros(num_samples, 4)\n        if num_pos > 0:\n            labels[pos_inds] = pos_gt_labels\n            pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n            label_weights[pos_inds] = pos_weight\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    pos_priors, pos_gt_bboxes)\n            else:\n                pos_bbox_targets = pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1\n        if num_neg > 0:\n            label_weights[neg_inds] = 1.0\n\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def get_targets(self,\n                    sampling_results: List[SamplingResult],\n                    rcnn_train_cfg: ConfigDict,\n                    concat: bool = True) -> tuple:\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\n\n        Almost the same as the implementation in bbox_head, we passed\n        additional parameters pos_inds_list and neg_inds_list to\n        `_get_targets_single` function.\n\n        Args:\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following list of Tensors:\n\n            - labels (list[Tensor],Tensor): Gt_labels for all\n              proposals in a batch, each tensor in list has\n              shape (num_proposals,) when `concat=False`, otherwise just\n              a single tensor has shape (num_all_proposals,).\n            - label_weights (list[Tensor]): Labels_weights for\n              all proposals in a batch, each tensor in list has shape\n              (num_proposals,) when `concat=False`, otherwise just a\n              single tensor has shape (num_all_proposals,).\n            - bbox_targets (list[Tensor],Tensor): Regression target\n              for all proposals in a batch, each tensor in list has\n              shape (num_proposals, 4) when `concat=False`, otherwise\n              just a single tensor has shape (num_all_proposals, 4),\n              the last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            - bbox_weights (list[tensor],Tensor): Regression weights for\n              all proposals in a batch, each tensor in list has shape\n              (num_proposals, 4) when `concat=False`, otherwise just a\n              single tensor has shape (num_all_proposals, 4).\n        \"\"\"\n        pos_inds_list = [res.pos_inds for res in sampling_results]\n        neg_inds_list = [res.neg_inds for res in sampling_results]\n        pos_priors_list = [res.pos_priors for res in sampling_results]\n        neg_priors_list = [res.neg_priors for res in sampling_results]\n        pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n        pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n        labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n            self._get_targets_single,\n            pos_inds_list,\n            neg_inds_list,\n            pos_priors_list,\n            neg_priors_list,\n            pos_gt_bboxes_list,\n            pos_gt_labels_list,\n            cfg=rcnn_train_cfg)\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bbox_targets = torch.cat(bbox_targets, 0)\n            bbox_weights = torch.cat(bbox_weights, 0)\n        return labels, label_weights, bbox_targets, bbox_weights\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/double_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule, ModuleList\nfrom torch import Tensor\n\nfrom mmdet.models.backbones.resnet import Bottleneck\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, MultiConfig, OptConfigType, OptMultiConfig\nfrom .bbox_head import BBoxHead\n\n\nclass BasicResBlock(BaseModule):\n    \"\"\"Basic residual block.\n\n    This block is a little different from the block in the ResNet backbone.\n    The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.\n\n    Args:\n        in_channels (int): Channels of the input feature map.\n        out_channels (int): Channels of the output feature map.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict\n            for convolution layers.\n        norm_cfg (:obj:`ConfigDict` or dict): The config dict for\n            normalization layers.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n\n        # main path\n        self.conv1 = ConvModule(\n            in_channels,\n            in_channels,\n            kernel_size=3,\n            padding=1,\n            bias=False,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg)\n        self.conv2 = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size=1,\n            bias=False,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        # identity path\n        self.conv_identity = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function.\"\"\"\n        identity = x\n\n        x = self.conv1(x)\n        x = self.conv2(x)\n\n        identity = self.conv_identity(identity)\n        out = x + identity\n\n        out = self.relu(out)\n        return out\n\n\n@MODELS.register_module()\nclass DoubleConvFCBBoxHead(BBoxHead):\n    r\"\"\"Bbox head used in Double-Head R-CNN\n\n    .. code-block:: none\n\n                                          /-> cls\n                      /-> shared convs ->\n                                          \\-> reg\n        roi features\n                                          /-> cls\n                      \\-> shared fc    ->\n                                          \\-> reg\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_convs: int = 0,\n                 num_fcs: int = 0,\n                 conv_out_channels: int = 1024,\n                 fc_out_channels: int = 1024,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(type='BN'),\n                 init_cfg: MultiConfig = dict(\n                     type='Normal',\n                     override=[\n                         dict(type='Normal', name='fc_cls', std=0.01),\n                         dict(type='Normal', name='fc_reg', std=0.001),\n                         dict(\n                             type='Xavier',\n                             name='fc_branch',\n                             distribution='uniform')\n                     ]),\n                 **kwargs) -> None:\n        kwargs.setdefault('with_avg_pool', True)\n        super().__init__(init_cfg=init_cfg, **kwargs)\n        assert self.with_avg_pool\n        assert num_convs > 0\n        assert num_fcs > 0\n        self.num_convs = num_convs\n        self.num_fcs = num_fcs\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        # increase the channel of input features\n        self.res_block = BasicResBlock(self.in_channels,\n                                       self.conv_out_channels)\n\n        # add conv heads\n        self.conv_branch = self._add_conv_branch()\n        # add fc heads\n        self.fc_branch = self._add_fc_branch()\n\n        out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes\n        self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)\n\n        self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)\n        self.relu = nn.ReLU()\n\n    def _add_conv_branch(self) -> None:\n        \"\"\"Add the fc branch which consists of a sequential of conv layers.\"\"\"\n        branch_convs = ModuleList()\n        for i in range(self.num_convs):\n            branch_convs.append(\n                Bottleneck(\n                    inplanes=self.conv_out_channels,\n                    planes=self.conv_out_channels // 4,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        return branch_convs\n\n    def _add_fc_branch(self) -> None:\n        \"\"\"Add the fc branch which consists of a sequential of fc layers.\"\"\"\n        branch_fcs = ModuleList()\n        for i in range(self.num_fcs):\n            fc_in_channels = (\n                self.in_channels *\n                self.roi_feat_area if i == 0 else self.fc_out_channels)\n            branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))\n        return branch_fcs\n\n    def forward(self, x_cls: Tensor, x_reg: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x_cls (Tensor): Classification features of rois\n            x_reg (Tensor): Regression features from the upstream network.\n\n        Returns:\n            tuple: A tuple of classification scores and bbox prediction.\n\n                - cls_score (Tensor): Classification score predictions of rois.\n                  each roi predicts num_classes + 1 channels.\n                - bbox_pred (Tensor): BBox deltas predictions of rois. each roi\n                  predicts 4 * num_classes channels.\n        \"\"\"\n        # conv head\n        x_conv = self.res_block(x_reg)\n\n        for conv in self.conv_branch:\n            x_conv = conv(x_conv)\n\n        if self.with_avg_pool:\n            x_conv = self.avg_pool(x_conv)\n\n        x_conv = x_conv.view(x_conv.size(0), -1)\n        bbox_pred = self.fc_reg(x_conv)\n\n        # fc head\n        x_fc = x_cls.view(x_cls.size(0), -1)\n        for fc in self.fc_branch:\n            x_fc = self.relu(fc(x_fc))\n\n        cls_score = self.fc_cls(x_fc)\n\n        return cls_score, bbox_pred\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor, nn\n\nfrom mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.models.utils import empty_instances\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox_overlaps\n\n\n@MODELS.register_module()\nclass MultiInstanceBBoxHead(BBoxHead):\n    r\"\"\"Bbox head used in CrowdDet.\n\n    .. code-block:: none\n\n                                      /-> cls convs_1 -> cls fcs_1 -> cls_1\n                                   |--\n                                   |  \\-> reg convs_1 -> reg fcs_1 -> reg_1\n                                   |\n                                   |  /-> cls convs_2 -> cls fcs_2 -> cls_2\n        shared convs -> shared fcs |--\n                                   |  \\-> reg convs_2 -> reg fcs_2 -> reg_2\n                                   |\n                                   |                     ...\n                                   |\n                                   |  /-> cls convs_k -> cls fcs_k -> cls_k\n                                   |--\n                                      \\-> reg convs_k -> reg fcs_k -> reg_k\n\n\n    Args:\n        num_instance (int): The number of branches after shared fcs.\n            Defaults to 2.\n        with_refine (bool): Whether to use refine module. Defaults to False.\n        num_shared_convs (int): The number of shared convs. Defaults to 0.\n        num_shared_fcs (int): The number of shared fcs. Defaults to 2.\n        num_cls_convs (int): The number of cls convs. Defaults to 0.\n        num_cls_fcs (int): The number of cls fcs. Defaults to 0.\n        num_reg_convs (int): The number of reg convs. Defaults to 0.\n        num_reg_fcs (int): The number of reg fcs. Defaults to 0.\n        conv_out_channels (int): The number of conv out channels.\n            Defaults to 256.\n        fc_out_channels (int): The number of fc out channels. Defaults to 1024.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_instance: int = 2,\n                 with_refine: bool = False,\n                 num_shared_convs: int = 0,\n                 num_shared_fcs: int = 2,\n                 num_cls_convs: int = 0,\n                 num_cls_fcs: int = 0,\n                 num_reg_convs: int = 0,\n                 num_reg_fcs: int = 0,\n                 conv_out_channels: int = 256,\n                 fc_out_channels: int = 1024,\n                 init_cfg: Optional[Union[dict, ConfigDict]] = None,\n                 *args,\n                 **kwargs) -> None:\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n        assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n                num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n        assert num_instance == 2, 'Currently only 2 instances are supported'\n        if num_cls_convs > 0 or num_reg_convs > 0:\n            assert num_shared_fcs == 0\n        if not self.with_cls:\n            assert num_cls_convs == 0 and num_cls_fcs == 0\n        if not self.with_reg:\n            assert num_reg_convs == 0 and num_reg_fcs == 0\n        self.num_instance = num_instance\n        self.num_shared_convs = num_shared_convs\n        self.num_shared_fcs = num_shared_fcs\n        self.num_cls_convs = num_cls_convs\n        self.num_cls_fcs = num_cls_fcs\n        self.num_reg_convs = num_reg_convs\n        self.num_reg_fcs = num_reg_fcs\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.with_refine = with_refine\n\n        # add shared convs and fcs\n        self.shared_convs, self.shared_fcs, last_layer_dim = \\\n            self._add_conv_fc_branch(\n                self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n                True)\n        self.shared_out_channels = last_layer_dim\n        self.relu = nn.ReLU(inplace=True)\n\n        if self.with_refine:\n            refine_model_cfg = {\n                'type': 'Linear',\n                'in_features': self.shared_out_channels + 20,\n                'out_features': self.shared_out_channels\n            }\n            self.shared_fcs_ref = MODELS.build(refine_model_cfg)\n            self.fc_cls_ref = nn.ModuleList()\n            self.fc_reg_ref = nn.ModuleList()\n\n        self.cls_convs = nn.ModuleList()\n        self.cls_fcs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        self.reg_fcs = nn.ModuleList()\n        self.cls_last_dim = list()\n        self.reg_last_dim = list()\n        self.fc_cls = nn.ModuleList()\n        self.fc_reg = nn.ModuleList()\n        for k in range(self.num_instance):\n            # add cls specific branch\n            cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch(\n                self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n            self.cls_convs.append(cls_convs)\n            self.cls_fcs.append(cls_fcs)\n            self.cls_last_dim.append(cls_last_dim)\n\n            # add reg specific branch\n            reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch(\n                self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n            self.reg_convs.append(reg_convs)\n            self.reg_fcs.append(reg_fcs)\n            self.reg_last_dim.append(reg_last_dim)\n\n            if self.num_shared_fcs == 0 and not self.with_avg_pool:\n                if self.num_cls_fcs == 0:\n                    self.cls_last_dim *= self.roi_feat_area\n                if self.num_reg_fcs == 0:\n                    self.reg_last_dim *= self.roi_feat_area\n\n            if self.with_cls:\n                if self.custom_cls_channels:\n                    cls_channels = self.loss_cls.get_cls_channels(\n                        self.num_classes)\n                else:\n                    cls_channels = self.num_classes + 1\n                cls_predictor_cfg_ = self.cls_predictor_cfg.copy()  # deepcopy\n                cls_predictor_cfg_.update(\n                    in_features=self.cls_last_dim[k],\n                    out_features=cls_channels)\n                self.fc_cls.append(MODELS.build(cls_predictor_cfg_))\n                if self.with_refine:\n                    self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_))\n\n            if self.with_reg:\n                out_dim_reg = (4 if self.reg_class_agnostic else 4 *\n                               self.num_classes)\n                reg_predictor_cfg_ = self.reg_predictor_cfg.copy()\n                reg_predictor_cfg_.update(\n                    in_features=self.reg_last_dim[k], out_features=out_dim_reg)\n                self.fc_reg.append(MODELS.build(reg_predictor_cfg_))\n                if self.with_refine:\n                    self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_))\n\n        if init_cfg is None:\n            # when init_cfg is None,\n            # It has been set to\n            # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],\n            #  [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]\n            # after `super(ConvFCBBoxHead, self).__init__()`\n            # we only need to append additional configuration\n            # for `shared_fcs`, `cls_fcs` and `reg_fcs`\n            self.init_cfg += [\n                dict(\n                    type='Xavier',\n                    distribution='uniform',\n                    override=[\n                        dict(name='shared_fcs'),\n                        dict(name='cls_fcs'),\n                        dict(name='reg_fcs')\n                    ])\n            ]\n\n    def _add_conv_fc_branch(self,\n                            num_branch_convs: int,\n                            num_branch_fcs: int,\n                            in_channels: int,\n                            is_shared: bool = False) -> tuple:\n        \"\"\"Add shared or separable branch.\n\n        convs -> avg pool (optional) -> fcs\n        \"\"\"\n        last_layer_dim = in_channels\n        # add branch specific conv layers\n        branch_convs = nn.ModuleList()\n        if num_branch_convs > 0:\n            for i in range(num_branch_convs):\n                conv_in_channels = (\n                    last_layer_dim if i == 0 else self.conv_out_channels)\n                branch_convs.append(\n                    ConvModule(\n                        conv_in_channels, self.conv_out_channels, 3,\n                        padding=1))\n            last_layer_dim = self.conv_out_channels\n        # add branch specific fc layers\n        branch_fcs = nn.ModuleList()\n        if num_branch_fcs > 0:\n            # for shared branch, only consider self.with_avg_pool\n            # for separated branches, also consider self.num_shared_fcs\n            if (is_shared\n                    or self.num_shared_fcs == 0) and not self.with_avg_pool:\n                last_layer_dim *= self.roi_feat_area\n            for i in range(num_branch_fcs):\n                fc_in_channels = (\n                    last_layer_dim if i == 0 else self.fc_out_channels)\n                branch_fcs.append(\n                    nn.Linear(fc_in_channels, self.fc_out_channels))\n            last_layer_dim = self.fc_out_channels\n        return branch_convs, branch_fcs, last_layer_dim\n\n    def forward(self, x: Tuple[Tensor]) -> tuple:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and bbox prediction.\n\n                - cls_score (Tensor): Classification scores for all scale\n                  levels, each is a 4D-tensor, the channels number is\n                  num_base_priors * num_classes.\n                - bbox_pred (Tensor): Box energies / deltas for all scale\n                  levels, each is a 4D-tensor, the channels number is\n                  num_base_priors * 4.\n                - cls_score_ref (Tensor): The cls_score after refine model.\n                - bbox_pred_ref (Tensor): The bbox_pred after refine model.\n        \"\"\"\n        # shared part\n        if self.num_shared_convs > 0:\n            for conv in self.shared_convs:\n                x = conv(x)\n\n        if self.num_shared_fcs > 0:\n            if self.with_avg_pool:\n                x = self.avg_pool(x)\n\n            x = x.flatten(1)\n            for fc in self.shared_fcs:\n                x = self.relu(fc(x))\n\n        x_cls = x\n        x_reg = x\n        # separate branches\n        cls_score = list()\n        bbox_pred = list()\n        for k in range(self.num_instance):\n            for conv in self.cls_convs[k]:\n                x_cls = conv(x_cls)\n            if x_cls.dim() > 2:\n                if self.with_avg_pool:\n                    x_cls = self.avg_pool(x_cls)\n                x_cls = x_cls.flatten(1)\n            for fc in self.cls_fcs[k]:\n                x_cls = self.relu(fc(x_cls))\n\n            for conv in self.reg_convs[k]:\n                x_reg = conv(x_reg)\n            if x_reg.dim() > 2:\n                if self.with_avg_pool:\n                    x_reg = self.avg_pool(x_reg)\n                x_reg = x_reg.flatten(1)\n            for fc in self.reg_fcs[k]:\n                x_reg = self.relu(fc(x_reg))\n\n            cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None)\n            bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None)\n\n        if self.with_refine:\n            x_ref = x\n            cls_score_ref = list()\n            bbox_pred_ref = list()\n            for k in range(self.num_instance):\n                feat_ref = cls_score[k].softmax(dim=-1)\n                feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]),\n                                     dim=1).repeat(1, 4)\n                feat_ref = torch.cat((x_ref, feat_ref), dim=1)\n                feat_ref = F.relu_(self.shared_fcs_ref(feat_ref))\n\n                cls_score_ref.append(self.fc_cls_ref[k](feat_ref))\n                bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref))\n\n            cls_score = torch.cat(cls_score, dim=1)\n            bbox_pred = torch.cat(bbox_pred, dim=1)\n            cls_score_ref = torch.cat(cls_score_ref, dim=1)\n            bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1)\n            return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref\n\n        cls_score = torch.cat(cls_score, dim=1)\n        bbox_pred = torch.cat(bbox_pred, dim=1)\n\n        return cls_score, bbox_pred\n\n    def get_targets(self,\n                    sampling_results: List[SamplingResult],\n                    rcnn_train_cfg: ConfigDict,\n                    concat: bool = True) -> tuple:\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\n\n        Almost the same as the implementation in bbox_head, we passed\n        additional parameters pos_inds_list and neg_inds_list to\n        `_get_targets_single` function.\n\n        Args:\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following list of Tensors:\n\n            - labels (list[Tensor],Tensor): Gt_labels for all proposals in a\n              batch, each tensor in list has shape (num_proposals,) when\n              `concat=False`, otherwise just a single tensor has shape\n              (num_all_proposals,).\n            - label_weights (list[Tensor]): Labels_weights for\n              all proposals in a batch, each tensor in list has shape\n              (num_proposals,) when `concat=False`, otherwise just a single\n              tensor has shape (num_all_proposals,).\n            - bbox_targets (list[Tensor],Tensor): Regression target for all\n              proposals in a batch, each tensor in list has shape\n              (num_proposals, 4) when `concat=False`, otherwise just a single\n              tensor has shape (num_all_proposals, 4), the last dimension 4\n              represents [tl_x, tl_y, br_x, br_y].\n            - bbox_weights (list[tensor],Tensor): Regression weights for\n              all proposals in a batch, each tensor in list has shape\n              (num_proposals, 4) when `concat=False`, otherwise just a\n              single tensor has shape (num_all_proposals, 4).\n        \"\"\"\n        labels = []\n        bbox_targets = []\n        bbox_weights = []\n        label_weights = []\n        for i in range(len(sampling_results)):\n            sample_bboxes = torch.cat([\n                sampling_results[i].pos_gt_bboxes,\n                sampling_results[i].neg_gt_bboxes\n            ])\n            sample_priors = sampling_results[i].priors\n            sample_priors = sample_priors.repeat(1, self.num_instance).reshape(\n                -1, 4)\n            sample_bboxes = sample_bboxes.reshape(-1, 4)\n\n            if not self.reg_decoded_bbox:\n                _bbox_targets = self.bbox_coder.encode(sample_priors,\n                                                       sample_bboxes)\n            else:\n                _bbox_targets = sample_priors\n            _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4)\n            _bbox_weights = torch.ones(_bbox_targets.shape)\n            _labels = torch.cat([\n                sampling_results[i].pos_gt_labels,\n                sampling_results[i].neg_gt_labels\n            ])\n            _labels_weights = torch.ones(_labels.shape)\n\n            bbox_targets.append(_bbox_targets)\n            bbox_weights.append(_bbox_weights)\n            labels.append(_labels)\n            label_weights.append(_labels_weights)\n\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bbox_targets = torch.cat(bbox_targets, 0)\n            bbox_weights = torch.cat(bbox_weights, 0)\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor,\n             labels: Tensor, label_weights: Tensor, bbox_targets: Tensor,\n             bbox_weights: Tensor, **kwargs) -> dict:\n        \"\"\"Calculate the loss based on the network predictions and targets.\n\n        Args:\n            cls_score (Tensor): Classification prediction results of all class,\n                has shape (batch_size * num_proposals_single_image,\n                (num_classes + 1) * k), k represents the number of prediction\n                boxes generated by each proposal box.\n            bbox_pred (Tensor): Regression prediction results, has shape\n                (batch_size * num_proposals_single_image, 4 * k), the last\n                dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            rois (Tensor): RoIs with the shape\n                (batch_size * num_proposals_single_image, 5) where the first\n                column indicates batch id of each RoI.\n            labels (Tensor): Gt_labels for all proposals in a batch, has\n                shape (batch_size * num_proposals_single_image, k).\n            label_weights (Tensor): Labels_weights for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image, k).\n            bbox_targets (Tensor): Regression target for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image,\n                4 * k), the last dimension 4 represents [tl_x, tl_y, br_x,\n                br_y].\n            bbox_weights (Tensor): Regression weights for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image,\n                4 * k).\n\n        Returns:\n            dict: A dictionary of loss.\n        \"\"\"\n        losses = dict()\n        if bbox_pred.numel():\n            loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2],\n                                   bbox_pred[:, 4:8], cls_score[:, 2:4],\n                                   bbox_targets, labels)\n            loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4],\n                                   bbox_pred[:, 0:4], cls_score[:, 0:2],\n                                   bbox_targets, labels)\n            loss = torch.cat([loss_0, loss_1], dim=1)\n            _, min_indices = loss.min(dim=1)\n            loss_emd = loss[torch.arange(loss.shape[0]), min_indices]\n            loss_emd = loss_emd.mean()\n        else:\n            loss_emd = bbox_pred.sum()\n        losses['loss_rcnn_emd'] = loss_emd\n        return losses\n\n    def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor,\n                 bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor,\n                 labels: Tensor) -> Tensor:\n        \"\"\"Calculate the emd loss.\n\n        Note:\n            This implementation is modified from https://github.com/Purkialo/\n            CrowdDet/blob/master/lib/det_oprs/loss_opr.py\n\n        Args:\n            bbox_pred_0 (Tensor): Part of regression prediction results, has\n                shape (batch_size * num_proposals_single_image, 4), the last\n                dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            cls_score_0 (Tensor): Part of classification prediction results,\n                has shape (batch_size * num_proposals_single_image,\n                (num_classes + 1)), where 1 represents the background.\n            bbox_pred_1 (Tensor): The other part of regression prediction\n                results, has shape (batch_size*num_proposals_single_image, 4).\n            cls_score_1 (Tensor):The other part of classification prediction\n                results, has shape (batch_size * num_proposals_single_image,\n                (num_classes + 1)).\n            targets (Tensor):Regression target for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image,\n                4 * k), the last dimension 4 represents [tl_x, tl_y, br_x,\n                br_y], k represents the number of prediction boxes generated\n                by each proposal box.\n            labels (Tensor): Gt_labels for all proposals in a batch, has\n                shape (batch_size * num_proposals_single_image, k).\n\n        Returns:\n            torch.Tensor: The calculated loss.\n        \"\"\"\n\n        bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1],\n                              dim=1).reshape(-1, bbox_pred_0.shape[-1])\n        cls_score = torch.cat([cls_score_0, cls_score_1],\n                              dim=1).reshape(-1, cls_score_0.shape[-1])\n        targets = targets.reshape(-1, 4)\n        labels = labels.long().flatten()\n\n        # masks\n        valid_masks = labels >= 0\n        fg_masks = labels > 0\n\n        # multiple class\n        bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4)\n        fg_gt_classes = labels[fg_masks]\n        bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :]\n\n        # loss for regression\n        loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks])\n        loss_bbox = loss_bbox.sum(dim=1)\n\n        # loss for classification\n        labels = labels * valid_masks\n        loss_cls = self.loss_cls(cls_score, labels)\n\n        loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox\n        loss = loss_cls.reshape(-1, 2).sum(dim=1)\n        return loss.reshape(-1, 1)\n\n    def _predict_by_feat_single(\n            self,\n            roi: Tensor,\n            cls_score: Tensor,\n            bbox_pred: Tensor,\n            img_meta: dict,\n            rescale: bool = False,\n            rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n                last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n            cls_score (Tensor): Box scores, has shape\n                (num_boxes, num_classes + 1).\n            bbox_pred (Tensor): Box energies / deltas. has shape\n                (num_boxes, num_classes * 4).\n            img_meta (dict): image information.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n                Defaults to None\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n\n        cls_score = cls_score.reshape(-1, self.num_classes + 1)\n        bbox_pred = bbox_pred.reshape(-1, 4)\n        roi = roi.repeat_interleave(self.num_instance, dim=0)\n\n        results = InstanceData()\n        if roi.shape[0] == 0:\n            return empty_instances([img_meta],\n                                   roi.device,\n                                   task_type='bbox',\n                                   instance_results=[results])[0]\n\n        scores = cls_score.softmax(dim=-1) if cls_score is not None else None\n        img_shape = img_meta['img_shape']\n        bboxes = self.bbox_coder.decode(\n            roi[..., 1:], bbox_pred, max_shape=img_shape)\n\n        if rescale and bboxes.size(0) > 0:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n                (1, 2))\n            bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(\n                bboxes.size()[0], -1)\n\n        if rcnn_test_cfg is None:\n            # This means that it is aug test.\n            # It needs to return the raw results without nms.\n            results.bboxes = bboxes\n            results.scores = scores\n        else:\n            roi_idx = np.tile(\n                np.arange(bboxes.shape[0] / self.num_instance)[:, None],\n                (1, self.num_instance)).reshape(-1, 1)[:, 0]\n            roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape(\n                -1, 1)\n            bboxes = torch.cat([bboxes, roi_idx], dim=1)\n            det_bboxes, det_scores = self.set_nms(\n                bboxes, scores[:, 1], rcnn_test_cfg.score_thr,\n                rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img)\n\n            results.bboxes = det_bboxes[:, :-1]\n            results.scores = det_scores\n            results.labels = torch.zeros_like(det_scores)\n\n        return results\n\n    @staticmethod\n    def set_nms(bboxes: Tensor,\n                scores: Tensor,\n                score_thr: float,\n                iou_threshold: float,\n                max_num: int = -1) -> Tuple[Tensor, Tensor]:\n        \"\"\"NMS for multi-instance prediction. Please refer to\n        https://github.com/Purkialo/CrowdDet for more details.\n\n        Args:\n            bboxes (Tensor): predict bboxes.\n            scores (Tensor): The score of each predict bbox.\n            score_thr (float): bbox threshold, bboxes with scores lower than it\n                will not be considered.\n            iou_threshold (float): IoU threshold to be considered as\n                conflicted.\n            max_num (int, optional): if there are more than max_num bboxes\n                after NMS, only top max_num will be kept. Default to -1.\n\n        Returns:\n            Tuple[Tensor, Tensor]: (bboxes, scores).\n        \"\"\"\n\n        bboxes = bboxes[scores > score_thr]\n        scores = scores[scores > score_thr]\n\n        ordered_scores, order = scores.sort(descending=True)\n        ordered_bboxes = bboxes[order]\n        roi_idx = ordered_bboxes[:, -1]\n\n        keep = torch.ones(len(ordered_bboxes)) == 1\n        ruler = torch.arange(len(ordered_bboxes))\n        while ruler.shape[0] > 0:\n            basement = ruler[0]\n            ruler = ruler[1:]\n            idx = roi_idx[basement]\n            # calculate the body overlap\n            basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4)\n            ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)\n            overlap = bbox_overlaps(basement_bbox, ruler_bbox)\n            indices = torch.where(overlap > iou_threshold)[1]\n            loc = torch.where(roi_idx[ruler][indices] == idx)\n            # the mask won't change in the step\n            mask = keep[ruler[indices][loc]]\n            keep[ruler[indices]] = False\n            keep[ruler[indices][loc][mask]] = True\n            ruler[~keep[ruler]] = -1\n            ruler = ruler[ruler > 0]\n\n        keep = keep[order.sort()[1]]\n        return bboxes[keep][:max_num, :], scores[keep][:max_num]\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/sabl_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Sequence, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.layers import multiclass_nms\nfrom mmdet.models.losses import accuracy\nfrom mmdet.models.task_modules import SamplingResult\nfrom mmdet.models.utils import multi_apply\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig\nfrom .bbox_head import BBoxHead\n\n\n@MODELS.register_module()\nclass SABLHead(BBoxHead):\n    \"\"\"Side-Aware Boundary Localization (SABL) for RoI-Head.\n\n    Side-Aware features are extracted by conv layers\n    with an attention mechanism.\n    Boundary Localization with Bucketing and Bucketing Guided Rescoring\n    are implemented in BucketingBBoxCoder.\n\n    Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n    Args:\n        cls_in_channels (int): Input channels of cls RoI feature. \\\n            Defaults to 256.\n        reg_in_channels (int): Input channels of reg RoI feature. \\\n            Defaults to 256.\n        roi_feat_size (int): Size of RoI features. Defaults to 7.\n        reg_feat_up_ratio (int): Upsample ratio of reg features. \\\n            Defaults to 2.\n        reg_pre_kernel (int): Kernel of 2D conv layers before \\\n            attention pooling. Defaults to 3.\n        reg_post_kernel (int): Kernel of 1D conv layers after \\\n            attention pooling. Defaults to 3.\n        reg_pre_num (int): Number of pre convs. Defaults to 2.\n        reg_post_num (int): Number of post convs. Defaults to 1.\n        num_classes (int): Number of classes in dataset. Defaults to 80.\n        cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.\n        reg_offset_out_channels (int): Hidden and output channel \\\n            of reg offset branch. Defaults to 256.\n        reg_cls_out_channels (int): Hidden and output channel \\\n            of reg cls branch. Defaults to 256.\n        num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.\n        num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.\n        reg_class_agnostic (bool): Class agnostic regression or not. \\\n            Defaults to True.\n        norm_cfg (dict): Config of norm layers. Defaults to None.\n        bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox_cls (dict): Config of classification loss for bbox branch.\n        loss_bbox_reg (dict): Config of regression loss for bbox branch.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 cls_in_channels: int = 256,\n                 reg_in_channels: int = 256,\n                 roi_feat_size: int = 7,\n                 reg_feat_up_ratio: int = 2,\n                 reg_pre_kernel: int = 3,\n                 reg_post_kernel: int = 3,\n                 reg_pre_num: int = 2,\n                 reg_post_num: int = 1,\n                 cls_out_channels: int = 1024,\n                 reg_offset_out_channels: int = 256,\n                 reg_cls_out_channels: int = 256,\n                 num_cls_fcs: int = 1,\n                 num_reg_fcs: int = 0,\n                 reg_class_agnostic: bool = True,\n                 norm_cfg: OptConfigType = None,\n                 bbox_coder: ConfigType = dict(\n                     type='BucketingBBoxCoder',\n                     num_buckets=14,\n                     scale_factor=1.7),\n                 loss_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=1.0),\n                 loss_bbox_cls: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_bbox_reg: ConfigType = dict(\n                     type='SmoothL1Loss', beta=0.1, loss_weight=1.0),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super(BBoxHead, self).__init__(init_cfg=init_cfg)\n        self.cls_in_channels = cls_in_channels\n        self.reg_in_channels = reg_in_channels\n        self.roi_feat_size = roi_feat_size\n        self.reg_feat_up_ratio = int(reg_feat_up_ratio)\n        self.num_buckets = bbox_coder['num_buckets']\n        assert self.reg_feat_up_ratio // 2 >= 1\n        self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio\n        assert self.up_reg_feat_size == bbox_coder['num_buckets']\n        self.reg_pre_kernel = reg_pre_kernel\n        self.reg_post_kernel = reg_post_kernel\n        self.reg_pre_num = reg_pre_num\n        self.reg_post_num = reg_post_num\n        self.num_classes = num_classes\n        self.cls_out_channels = cls_out_channels\n        self.reg_offset_out_channels = reg_offset_out_channels\n        self.reg_cls_out_channels = reg_cls_out_channels\n        self.num_cls_fcs = num_cls_fcs\n        self.num_reg_fcs = num_reg_fcs\n        self.reg_class_agnostic = reg_class_agnostic\n        assert self.reg_class_agnostic\n        self.norm_cfg = norm_cfg\n\n        self.bbox_coder = TASK_UTILS.build(bbox_coder)\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox_cls = MODELS.build(loss_bbox_cls)\n        self.loss_bbox_reg = MODELS.build(loss_bbox_reg)\n\n        self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,\n                                           self.cls_in_channels,\n                                           self.roi_feat_size,\n                                           self.cls_out_channels)\n\n        self.side_num = int(np.ceil(self.num_buckets / 2))\n\n        if self.reg_feat_up_ratio > 1:\n            self.upsample_x = nn.ConvTranspose1d(\n                reg_in_channels,\n                reg_in_channels,\n                self.reg_feat_up_ratio,\n                stride=self.reg_feat_up_ratio)\n            self.upsample_y = nn.ConvTranspose1d(\n                reg_in_channels,\n                reg_in_channels,\n                self.reg_feat_up_ratio,\n                stride=self.reg_feat_up_ratio)\n\n        self.reg_pre_convs = nn.ModuleList()\n        for i in range(self.reg_pre_num):\n            reg_pre_conv = ConvModule(\n                reg_in_channels,\n                reg_in_channels,\n                kernel_size=reg_pre_kernel,\n                padding=reg_pre_kernel // 2,\n                norm_cfg=norm_cfg,\n                act_cfg=dict(type='ReLU'))\n            self.reg_pre_convs.append(reg_pre_conv)\n\n        self.reg_post_conv_xs = nn.ModuleList()\n        for i in range(self.reg_post_num):\n            reg_post_conv_x = ConvModule(\n                reg_in_channels,\n                reg_in_channels,\n                kernel_size=(1, reg_post_kernel),\n                padding=(0, reg_post_kernel // 2),\n                norm_cfg=norm_cfg,\n                act_cfg=dict(type='ReLU'))\n            self.reg_post_conv_xs.append(reg_post_conv_x)\n        self.reg_post_conv_ys = nn.ModuleList()\n        for i in range(self.reg_post_num):\n            reg_post_conv_y = ConvModule(\n                reg_in_channels,\n                reg_in_channels,\n                kernel_size=(reg_post_kernel, 1),\n                padding=(reg_post_kernel // 2, 0),\n                norm_cfg=norm_cfg,\n                act_cfg=dict(type='ReLU'))\n            self.reg_post_conv_ys.append(reg_post_conv_y)\n\n        self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)\n        self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)\n\n        self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)\n        self.relu = nn.ReLU(inplace=True)\n\n        self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,\n                                               self.reg_in_channels, 1,\n                                               self.reg_cls_out_channels)\n        self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,\n                                                  self.reg_in_channels, 1,\n                                                  self.reg_offset_out_channels)\n        self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)\n        self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)\n\n        if init_cfg is None:\n            self.init_cfg = [\n                dict(\n                    type='Xavier',\n                    layer='Linear',\n                    distribution='uniform',\n                    override=[\n                        dict(type='Normal', name='reg_conv_att_x', std=0.01),\n                        dict(type='Normal', name='reg_conv_att_y', std=0.01),\n                        dict(type='Normal', name='fc_reg_cls', std=0.01),\n                        dict(type='Normal', name='fc_cls', std=0.01),\n                        dict(type='Normal', name='fc_reg_offset', std=0.001)\n                    ])\n            ]\n            if self.reg_feat_up_ratio > 1:\n                self.init_cfg += [\n                    dict(\n                        type='Kaiming',\n                        distribution='normal',\n                        override=[\n                            dict(name='upsample_x'),\n                            dict(name='upsample_y')\n                        ])\n                ]\n\n    def _add_fc_branch(self, num_branch_fcs: int, in_channels: int,\n                       roi_feat_size: int,\n                       fc_out_channels: int) -> nn.ModuleList:\n        \"\"\"build fc layers.\"\"\"\n        in_channels = in_channels * roi_feat_size * roi_feat_size\n        branch_fcs = nn.ModuleList()\n        for i in range(num_branch_fcs):\n            fc_in_channels = (in_channels if i == 0 else fc_out_channels)\n            branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))\n        return branch_fcs\n\n    def cls_forward(self, cls_x: Tensor) -> Tensor:\n        \"\"\"forward of classification fc layers.\"\"\"\n        cls_x = cls_x.view(cls_x.size(0), -1)\n        for fc in self.cls_fcs:\n            cls_x = self.relu(fc(cls_x))\n        cls_score = self.fc_cls(cls_x)\n        return cls_score\n\n    def attention_pool(self, reg_x: Tensor) -> tuple:\n        \"\"\"Extract direction-specific features fx and fy with attention\n        methanism.\"\"\"\n        reg_fx = reg_x\n        reg_fy = reg_x\n        reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()\n        reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()\n        reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)\n        reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)\n        reg_fx = (reg_fx * reg_fx_att).sum(dim=2)\n        reg_fy = (reg_fy * reg_fy_att).sum(dim=3)\n        return reg_fx, reg_fy\n\n    def side_aware_feature_extractor(self, reg_x: Tensor) -> tuple:\n        \"\"\"Refine and extract side-aware features without split them.\"\"\"\n        for reg_pre_conv in self.reg_pre_convs:\n            reg_x = reg_pre_conv(reg_x)\n        reg_fx, reg_fy = self.attention_pool(reg_x)\n\n        if self.reg_post_num > 0:\n            reg_fx = reg_fx.unsqueeze(2)\n            reg_fy = reg_fy.unsqueeze(3)\n            for i in range(self.reg_post_num):\n                reg_fx = self.reg_post_conv_xs[i](reg_fx)\n                reg_fy = self.reg_post_conv_ys[i](reg_fy)\n            reg_fx = reg_fx.squeeze(2)\n            reg_fy = reg_fy.squeeze(3)\n        if self.reg_feat_up_ratio > 1:\n            reg_fx = self.relu(self.upsample_x(reg_fx))\n            reg_fy = self.relu(self.upsample_y(reg_fy))\n        reg_fx = torch.transpose(reg_fx, 1, 2)\n        reg_fy = torch.transpose(reg_fy, 1, 2)\n        return reg_fx.contiguous(), reg_fy.contiguous()\n\n    def reg_pred(self, x: Tensor, offset_fcs: nn.ModuleList,\n                 cls_fcs: nn.ModuleList) -> tuple:\n        \"\"\"Predict bucketing estimation (cls_pred) and fine regression (offset\n        pred) with side-aware features.\"\"\"\n        x_offset = x.view(-1, self.reg_in_channels)\n        x_cls = x.view(-1, self.reg_in_channels)\n\n        for fc in offset_fcs:\n            x_offset = self.relu(fc(x_offset))\n        for fc in cls_fcs:\n            x_cls = self.relu(fc(x_cls))\n        offset_pred = self.fc_reg_offset(x_offset)\n        cls_pred = self.fc_reg_cls(x_cls)\n\n        offset_pred = offset_pred.view(x.size(0), -1)\n        cls_pred = cls_pred.view(x.size(0), -1)\n\n        return offset_pred, cls_pred\n\n    def side_aware_split(self, feat: Tensor) -> Tensor:\n        \"\"\"Split side-aware features aligned with orders of bucketing\n        targets.\"\"\"\n        l_end = int(np.ceil(self.up_reg_feat_size / 2))\n        r_start = int(np.floor(self.up_reg_feat_size / 2))\n        feat_fl = feat[:, :l_end]\n        feat_fr = feat[:, r_start:].flip(dims=(1, ))\n        feat_fl = feat_fl.contiguous()\n        feat_fr = feat_fr.contiguous()\n        feat = torch.cat([feat_fl, feat_fr], dim=-1)\n        return feat\n\n    def bbox_pred_split(self, bbox_pred: tuple,\n                        num_proposals_per_img: Sequence[int]) -> tuple:\n        \"\"\"Split batch bbox prediction back to each image.\"\"\"\n        bucket_cls_preds, bucket_offset_preds = bbox_pred\n        bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)\n        bucket_offset_preds = bucket_offset_preds.split(\n            num_proposals_per_img, 0)\n        bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))\n        return bbox_pred\n\n    def reg_forward(self, reg_x: Tensor) -> tuple:\n        \"\"\"forward of regression branch.\"\"\"\n        outs = self.side_aware_feature_extractor(reg_x)\n        edge_offset_preds = []\n        edge_cls_preds = []\n        reg_fx = outs[0]\n        reg_fy = outs[1]\n        offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,\n                                                  self.reg_cls_fcs)\n        offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,\n                                                  self.reg_cls_fcs)\n        offset_pred_x = self.side_aware_split(offset_pred_x)\n        offset_pred_y = self.side_aware_split(offset_pred_y)\n        cls_pred_x = self.side_aware_split(cls_pred_x)\n        cls_pred_y = self.side_aware_split(cls_pred_y)\n        edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)\n        edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)\n\n        return edge_cls_preds, edge_offset_preds\n\n    def forward(self, x: Tensor) -> tuple:\n        \"\"\"Forward features from the upstream network.\"\"\"\n        bbox_pred = self.reg_forward(x)\n        cls_score = self.cls_forward(x)\n\n        return cls_score, bbox_pred\n\n    def get_targets(self,\n                    sampling_results: List[SamplingResult],\n                    rcnn_train_cfg: ConfigDict,\n                    concat: bool = True) -> tuple:\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\"\"\"\n        pos_proposals = [res.pos_bboxes for res in sampling_results]\n        neg_proposals = [res.neg_bboxes for res in sampling_results]\n        pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]\n        pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n        cls_reg_targets = self.bucket_target(\n            pos_proposals,\n            neg_proposals,\n            pos_gt_bboxes,\n            pos_gt_labels,\n            rcnn_train_cfg,\n            concat=concat)\n        (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n         bucket_offset_targets, bucket_offset_weights) = cls_reg_targets\n        return (labels, label_weights, (bucket_cls_targets,\n                                        bucket_offset_targets),\n                (bucket_cls_weights, bucket_offset_weights))\n\n    def bucket_target(self,\n                      pos_proposals_list: list,\n                      neg_proposals_list: list,\n                      pos_gt_bboxes_list: list,\n                      pos_gt_labels_list: list,\n                      rcnn_train_cfg: ConfigDict,\n                      concat: bool = True) -> tuple:\n        \"\"\"Compute bucketing estimation targets and fine regression targets for\n        a batch of images.\"\"\"\n        (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n         bucket_offset_targets, bucket_offset_weights) = multi_apply(\n             self._bucket_target_single,\n             pos_proposals_list,\n             neg_proposals_list,\n             pos_gt_bboxes_list,\n             pos_gt_labels_list,\n             cfg=rcnn_train_cfg)\n\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bucket_cls_targets = torch.cat(bucket_cls_targets, 0)\n            bucket_cls_weights = torch.cat(bucket_cls_weights, 0)\n            bucket_offset_targets = torch.cat(bucket_offset_targets, 0)\n            bucket_offset_weights = torch.cat(bucket_offset_weights, 0)\n        return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n                bucket_offset_targets, bucket_offset_weights)\n\n    def _bucket_target_single(self, pos_proposals: Tensor,\n                              neg_proposals: Tensor, pos_gt_bboxes: Tensor,\n                              pos_gt_labels: Tensor, cfg: ConfigDict) -> tuple:\n        \"\"\"Compute bucketing estimation targets and fine regression targets for\n        a single image.\n\n        Args:\n            pos_proposals (Tensor): positive proposals of a single image,\n                 Shape (n_pos, 4)\n            neg_proposals (Tensor): negative proposals of a single image,\n                 Shape (n_neg, 4).\n            pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals\n                 of a single image, Shape (n_pos, 4).\n            pos_gt_labels (Tensor): gt labels assigned to positive proposals\n                 of a single image, Shape (n_pos, ).\n            cfg (dict): Config of calculating targets\n\n        Returns:\n            tuple:\n\n            - labels (Tensor): Labels in a single image. Shape (n,).\n            - label_weights (Tensor): Label weights in a single image.\n                Shape (n,)\n            - bucket_cls_targets (Tensor): Bucket cls targets in\n                a single image. Shape (n, num_buckets*2).\n            - bucket_cls_weights (Tensor): Bucket cls weights in\n                a single image. Shape (n, num_buckets*2).\n            - bucket_offset_targets (Tensor): Bucket offset targets\n                in a single image. Shape (n, num_buckets*2).\n            - bucket_offset_targets (Tensor): Bucket offset weights\n                in a single image. Shape (n, num_buckets*2).\n        \"\"\"\n        num_pos = pos_proposals.size(0)\n        num_neg = neg_proposals.size(0)\n        num_samples = num_pos + num_neg\n        labels = pos_gt_bboxes.new_full((num_samples, ),\n                                        self.num_classes,\n                                        dtype=torch.long)\n        label_weights = pos_proposals.new_zeros(num_samples)\n        bucket_cls_targets = pos_proposals.new_zeros(num_samples,\n                                                     4 * self.side_num)\n        bucket_cls_weights = pos_proposals.new_zeros(num_samples,\n                                                     4 * self.side_num)\n        bucket_offset_targets = pos_proposals.new_zeros(\n            num_samples, 4 * self.side_num)\n        bucket_offset_weights = pos_proposals.new_zeros(\n            num_samples, 4 * self.side_num)\n        if num_pos > 0:\n            labels[:num_pos] = pos_gt_labels\n            label_weights[:num_pos] = 1.0\n            (pos_bucket_offset_targets, pos_bucket_offset_weights,\n             pos_bucket_cls_targets,\n             pos_bucket_cls_weights) = self.bbox_coder.encode(\n                 pos_proposals, pos_gt_bboxes)\n            bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets\n            bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights\n            bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets\n            bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights\n        if num_neg > 0:\n            label_weights[-num_neg:] = 1.0\n        return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n                bucket_offset_targets, bucket_offset_weights)\n\n    def loss(self,\n             cls_score: Tensor,\n             bbox_pred: Tuple[Tensor, Tensor],\n             rois: Tensor,\n             labels: Tensor,\n             label_weights: Tensor,\n             bbox_targets: Tuple[Tensor, Tensor],\n             bbox_weights: Tuple[Tensor, Tensor],\n             reduction_override: Optional[str] = None) -> dict:\n        \"\"\"Calculate the loss based on the network predictions and targets.\n\n        Args:\n            cls_score (Tensor): Classification prediction\n                results of all class, has shape\n                (batch_size * num_proposals_single_image, num_classes)\n            bbox_pred (Tensor): A tuple of regression prediction results\n                containing `bucket_cls_preds and` `bucket_offset_preds`.\n            rois (Tensor): RoIs with the shape\n                (batch_size * num_proposals_single_image, 5) where the first\n                column indicates batch id of each RoI.\n            labels (Tensor): Gt_labels for all proposals in a batch, has\n                shape (batch_size * num_proposals_single_image, ).\n            label_weights (Tensor): Labels_weights for all proposals in a\n                batch, has shape (batch_size * num_proposals_single_image, ).\n            bbox_targets (Tuple[Tensor, Tensor]): A tuple of regression target\n                containing `bucket_cls_targets` and `bucket_offset_targets`.\n                the last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            bbox_weights (Tuple[Tensor, Tensor]): A tuple of regression\n                weights containing `bucket_cls_weights` and\n                `bucket_offset_weights`.\n            reduction_override (str, optional): The reduction\n                method used to override the original reduction\n                method of the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to None,\n\n        Returns:\n            dict: A dictionary of loss.\n        \"\"\"\n        losses = dict()\n        if cls_score is not None:\n            avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n            losses['loss_cls'] = self.loss_cls(\n                cls_score,\n                labels,\n                label_weights,\n                avg_factor=avg_factor,\n                reduction_override=reduction_override)\n            losses['acc'] = accuracy(cls_score, labels)\n\n        if bbox_pred is not None:\n            bucket_cls_preds, bucket_offset_preds = bbox_pred\n            bucket_cls_targets, bucket_offset_targets = bbox_targets\n            bucket_cls_weights, bucket_offset_weights = bbox_weights\n            # edge cls\n            bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)\n            bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)\n            bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)\n            losses['loss_bbox_cls'] = self.loss_bbox_cls(\n                bucket_cls_preds,\n                bucket_cls_targets,\n                bucket_cls_weights,\n                avg_factor=bucket_cls_targets.size(0),\n                reduction_override=reduction_override)\n\n            losses['loss_bbox_reg'] = self.loss_bbox_reg(\n                bucket_offset_preds,\n                bucket_offset_targets,\n                bucket_offset_weights,\n                avg_factor=bucket_offset_targets.size(0),\n                reduction_override=reduction_override)\n\n        return losses\n\n    def _predict_by_feat_single(\n            self,\n            roi: Tensor,\n            cls_score: Tensor,\n            bbox_pred: Tuple[Tensor, Tensor],\n            img_meta: dict,\n            rescale: bool = False,\n            rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n                last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n            cls_score (Tensor): Box scores, has shape\n                (num_boxes, num_classes + 1).\n            bbox_pred (Tuple[Tensor, Tensor]): Box cls preds and offset preds.\n            img_meta (dict): image information.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n                Defaults to None\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        results = InstanceData()\n        if isinstance(cls_score, list):\n            cls_score = sum(cls_score) / float(len(cls_score))\n        scores = F.softmax(cls_score, dim=1) if cls_score is not None else None\n        img_shape = img_meta['img_shape']\n        if bbox_pred is not None:\n            bboxes, confidences = self.bbox_coder.decode(\n                roi[:, 1:], bbox_pred, img_shape)\n        else:\n            bboxes = roi[:, 1:].clone()\n            confidences = None\n            if img_shape is not None:\n                bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)\n                bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)\n\n        if rescale and bboxes.size(0) > 0:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n                (1, 2))\n            bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(\n                bboxes.size()[0], -1)\n\n        if rcnn_test_cfg is None:\n            results.bboxes = bboxes\n            results.scores = scores\n        else:\n            det_bboxes, det_labels = multiclass_nms(\n                bboxes,\n                scores,\n                rcnn_test_cfg.score_thr,\n                rcnn_test_cfg.nms,\n                rcnn_test_cfg.max_per_img,\n                score_factors=confidences)\n            results.bboxes = det_bboxes[:, :4]\n            results.scores = det_bboxes[:, -1]\n            results.labels = det_labels\n        return results\n\n    def refine_bboxes(self, sampling_results: List[SamplingResult],\n                      bbox_results: dict,\n                      batch_img_metas: List[dict]) -> InstanceList:\n        \"\"\"Refine bboxes during training.\n\n        Args:\n            sampling_results (List[:obj:`SamplingResult`]): Sampling results.\n            bbox_results (dict): Usually is a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n                  column indicates batch id of each RoI.\n                - `bbox_targets` (tuple):  Ground truth for proposals in a\n                  single image. Containing the following list of Tensors:\n                  (labels, label_weights, bbox_targets, bbox_weights)\n            batch_img_metas (List[dict]): List of image information.\n\n        Returns:\n            list[:obj:`InstanceData`]: Refined bboxes of each image.\n        \"\"\"\n        pos_is_gts = [res.pos_is_gt for res in sampling_results]\n        # bbox_targets is a tuple\n        labels = bbox_results['bbox_targets'][0]\n        cls_scores = bbox_results['cls_score']\n        rois = bbox_results['rois']\n        bbox_preds = bbox_results['bbox_pred']\n\n        if cls_scores.numel() == 0:\n            return None\n\n        labels = torch.where(labels == self.num_classes,\n                             cls_scores[:, :-1].argmax(1), labels)\n\n        img_ids = rois[:, 0].long().unique(sorted=True)\n        assert img_ids.numel() <= len(batch_img_metas)\n\n        results_list = []\n        for i in range(len(batch_img_metas)):\n            inds = torch.nonzero(\n                rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n            num_rois = inds.numel()\n\n            bboxes_ = rois[inds, 1:]\n            label_ = labels[inds]\n            edge_cls_preds, edge_offset_preds = bbox_preds\n            edge_cls_preds_ = edge_cls_preds[inds]\n            edge_offset_preds_ = edge_offset_preds[inds]\n            bbox_pred_ = (edge_cls_preds_, edge_offset_preds_)\n            img_meta_ = batch_img_metas[i]\n            pos_is_gts_ = pos_is_gts[i]\n\n            bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n                                           img_meta_)\n            # filter gt bboxes\n            pos_keep = 1 - pos_is_gts_\n            keep_inds = pos_is_gts_.new_ones(num_rois)\n            keep_inds[:len(pos_is_gts_)] = pos_keep\n            results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])\n            results_list.append(results)\n\n        return results_list\n\n    def regress_by_class(self, rois: Tensor, label: Tensor, bbox_pred: tuple,\n                         img_meta: dict) -> Tensor:\n        \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n        Args:\n            rois (Tensor): shape (n, 4) or (n, 5)\n            label (Tensor): shape (n, )\n            bbox_pred (Tuple[Tensor]): shape [(n, num_buckets *2), \\\n                (n, num_buckets *2)]\n            img_meta (dict): Image meta info.\n\n        Returns:\n            Tensor: Regressed bboxes, the same shape as input rois.\n        \"\"\"\n        assert rois.size(1) == 4 or rois.size(1) == 5\n\n        if rois.size(1) == 4:\n            new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,\n                                                 img_meta['img_shape'])\n        else:\n            bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,\n                                               img_meta['img_shape'])\n            new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n        return new_rois\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple, Union\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .convfc_bbox_head import ConvFCBBoxHead\n\n\n@MODELS.register_module()\nclass SCNetBBoxHead(ConvFCBBoxHead):\n    \"\"\"BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us\n    to get intermediate shared feature.\n    \"\"\"\n\n    def _forward_shared(self, x: Tensor) -> Tensor:\n        \"\"\"Forward function for shared part.\n\n        Args:\n            x (Tensor): Input feature.\n\n        Returns:\n            Tensor: Shared feature.\n        \"\"\"\n        if self.num_shared_convs > 0:\n            for conv in self.shared_convs:\n                x = conv(x)\n\n        if self.num_shared_fcs > 0:\n            if self.with_avg_pool:\n                x = self.avg_pool(x)\n\n            x = x.flatten(1)\n\n            for fc in self.shared_fcs:\n                x = self.relu(fc(x))\n\n        return x\n\n    def _forward_cls_reg(self, x: Tensor) -> Tuple[Tensor]:\n        \"\"\"Forward function for classification and regression parts.\n\n        Args:\n            x (Tensor): Input feature.\n\n        Returns:\n            tuple[Tensor]:\n\n                - cls_score (Tensor): classification prediction.\n                - bbox_pred (Tensor): bbox prediction.\n        \"\"\"\n        x_cls = x\n        x_reg = x\n\n        for conv in self.cls_convs:\n            x_cls = conv(x_cls)\n        if x_cls.dim() > 2:\n            if self.with_avg_pool:\n                x_cls = self.avg_pool(x_cls)\n            x_cls = x_cls.flatten(1)\n        for fc in self.cls_fcs:\n            x_cls = self.relu(fc(x_cls))\n\n        for conv in self.reg_convs:\n            x_reg = conv(x_reg)\n        if x_reg.dim() > 2:\n            if self.with_avg_pool:\n                x_reg = self.avg_pool(x_reg)\n            x_reg = x_reg.flatten(1)\n        for fc in self.reg_fcs:\n            x_reg = self.relu(fc(x_reg))\n\n        cls_score = self.fc_cls(x_cls) if self.with_cls else None\n        bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n\n        return cls_score, bbox_pred\n\n    def forward(\n            self,\n            x: Tensor,\n            return_shared_feat: bool = False) -> Union[Tensor, Tuple[Tensor]]:\n        \"\"\"Forward function.\n\n        Args:\n            x (Tensor): input features\n            return_shared_feat (bool): If True, return cls-reg-shared feature.\n\n        Return:\n            out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,\n                if  ``return_shared_feat`` is True, append ``x_shared`` to the\n                returned tuple.\n        \"\"\"\n        x_shared = self._forward_shared(x)\n        out = self._forward_cls_reg(x_shared)\n\n        if return_shared_feat:\n            out += (x_shared, )\n\n        return out\n"
  },
  {
    "path": "mmdet/models/roi_heads/cascade_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.model import ModuleList\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.models.test_time_augs import merge_aug_masks\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi, get_box_tensor\nfrom mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,\n                         OptMultiConfig)\nfrom ..utils.misc import empty_instances, unpack_gt_instances\nfrom .base_roi_head import BaseRoIHead\n\n\n@MODELS.register_module()\nclass CascadeRoIHead(BaseRoIHead):\n    \"\"\"Cascade roi head including one bbox head and one mask head.\n\n    https://arxiv.org/abs/1712.00726\n    \"\"\"\n\n    def __init__(self,\n                 num_stages: int,\n                 stage_loss_weights: Union[List[float], Tuple[float]],\n                 bbox_roi_extractor: OptMultiConfig = None,\n                 bbox_head: OptMultiConfig = None,\n                 mask_roi_extractor: OptMultiConfig = None,\n                 mask_head: OptMultiConfig = None,\n                 shared_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        assert bbox_roi_extractor is not None\n        assert bbox_head is not None\n        assert shared_head is None, \\\n            'Shared head is not supported in Cascade RCNN anymore'\n\n        self.num_stages = num_stages\n        self.stage_loss_weights = stage_loss_weights\n        super().__init__(\n            bbox_roi_extractor=bbox_roi_extractor,\n            bbox_head=bbox_head,\n            mask_roi_extractor=mask_roi_extractor,\n            mask_head=mask_head,\n            shared_head=shared_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg)\n\n    def init_bbox_head(self, bbox_roi_extractor: MultiConfig,\n                       bbox_head: MultiConfig) -> None:\n        \"\"\"Initialize box head and box roi extractor.\n\n        Args:\n            bbox_roi_extractor (:obj:`ConfigDict`, dict or list):\n                Config of box roi extractor.\n            bbox_head (:obj:`ConfigDict`, dict or list): Config\n                of box in box head.\n        \"\"\"\n        self.bbox_roi_extractor = ModuleList()\n        self.bbox_head = ModuleList()\n        if not isinstance(bbox_roi_extractor, list):\n            bbox_roi_extractor = [\n                bbox_roi_extractor for _ in range(self.num_stages)\n            ]\n        if not isinstance(bbox_head, list):\n            bbox_head = [bbox_head for _ in range(self.num_stages)]\n        assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n        for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n            self.bbox_roi_extractor.append(MODELS.build(roi_extractor))\n            self.bbox_head.append(MODELS.build(head))\n\n    def init_mask_head(self, mask_roi_extractor: MultiConfig,\n                       mask_head: MultiConfig) -> None:\n        \"\"\"Initialize mask head and mask roi extractor.\n\n        Args:\n            mask_head (dict): Config of mask in mask head.\n            mask_roi_extractor (:obj:`ConfigDict`, dict or list):\n                Config of mask roi extractor.\n        \"\"\"\n        self.mask_head = nn.ModuleList()\n        if not isinstance(mask_head, list):\n            mask_head = [mask_head for _ in range(self.num_stages)]\n        assert len(mask_head) == self.num_stages\n        for head in mask_head:\n            self.mask_head.append(MODELS.build(head))\n        if mask_roi_extractor is not None:\n            self.share_roi_extractor = False\n            self.mask_roi_extractor = ModuleList()\n            if not isinstance(mask_roi_extractor, list):\n                mask_roi_extractor = [\n                    mask_roi_extractor for _ in range(self.num_stages)\n                ]\n            assert len(mask_roi_extractor) == self.num_stages\n            for roi_extractor in mask_roi_extractor:\n                self.mask_roi_extractor.append(MODELS.build(roi_extractor))\n        else:\n            self.share_roi_extractor = True\n            self.mask_roi_extractor = self.bbox_roi_extractor\n\n    def init_assigner_sampler(self) -> None:\n        \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n        self.bbox_assigner = []\n        self.bbox_sampler = []\n        if self.train_cfg is not None:\n            for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n                self.bbox_assigner.append(\n                    TASK_UTILS.build(rcnn_train_cfg.assigner))\n                self.current_stage = idx\n                self.bbox_sampler.append(\n                    TASK_UTILS.build(\n                        rcnn_train_cfg.sampler,\n                        default_args=dict(context=self)))\n\n    def _bbox_forward(self, stage: int, x: Tuple[Tensor],\n                      rois: Tensor) -> dict:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        cls_score, bbox_pred = bbox_head(bbox_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n        return bbox_results\n\n    def bbox_loss(self, stage: int, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult]) -> dict:\n        \"\"\"Run forward function and calculate loss for box head in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n                - `loss_bbox` (dict): A dictionary of bbox loss components.\n                - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n                  column indicates batch id of each RoI.\n                - `bbox_targets` (tuple):  Ground truth for proposals in a\n                  single image. Containing the following list of Tensors:\n                  (labels, label_weights, bbox_targets, bbox_weights)\n        \"\"\"\n        bbox_head = self.bbox_head[stage]\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(stage, x, rois)\n        bbox_results.update(rois=rois)\n\n        bbox_loss_and_target = bbox_head.loss_and_target(\n            cls_score=bbox_results['cls_score'],\n            bbox_pred=bbox_results['bbox_pred'],\n            rois=rois,\n            sampling_results=sampling_results,\n            rcnn_train_cfg=self.train_cfg[stage])\n        bbox_results.update(bbox_loss_and_target)\n\n        return bbox_results\n\n    def _mask_forward(self, stage: int, x: Tuple[Tensor],\n                      rois: Tensor) -> dict:\n        \"\"\"Mask head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n        \"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        mask_preds = mask_head(mask_feats)\n\n        mask_results = dict(mask_preds=mask_preds)\n        return mask_results\n\n    def mask_loss(self, stage: int, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Run forward function and calculate loss for mask head in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `loss_mask` (dict): A dictionary of mask loss components.\n        \"\"\"\n        pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n        mask_results = self._mask_forward(stage, x, pos_rois)\n\n        mask_head = self.mask_head[stage]\n\n        mask_loss_and_target = mask_head.loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=self.train_cfg[stage])\n        mask_results.update(mask_loss_and_target)\n\n        return mask_results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        # TODO: May add a new function in baseroihead\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n\n        num_imgs = len(batch_data_samples)\n        losses = dict()\n        results_list = rpn_results_list\n        for stage in range(self.num_stages):\n            self.current_stage = stage\n\n            stage_loss_weight = self.stage_loss_weights[stage]\n\n            # assign gts and sample proposals\n            sampling_results = []\n            if self.with_bbox or self.with_mask:\n                bbox_assigner = self.bbox_assigner[stage]\n                bbox_sampler = self.bbox_sampler[stage]\n\n                for i in range(num_imgs):\n                    results = results_list[i]\n                    # rename rpn_results.bboxes to rpn_results.priors\n                    results.priors = results.pop('bboxes')\n\n                    assign_result = bbox_assigner.assign(\n                        results, batch_gt_instances[i],\n                        batch_gt_instances_ignore[i])\n\n                    sampling_result = bbox_sampler.sample(\n                        assign_result,\n                        results,\n                        batch_gt_instances[i],\n                        feats=[lvl_feat[i][None] for lvl_feat in x])\n                    sampling_results.append(sampling_result)\n\n            # bbox head forward and loss\n            bbox_results = self.bbox_loss(stage, x, sampling_results)\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{stage}.{name}'] = (\n                    value * stage_loss_weight if 'loss' in name else value)\n\n            # mask head forward and loss\n            if self.with_mask:\n                mask_results = self.mask_loss(stage, x, sampling_results,\n                                              batch_gt_instances)\n                for name, value in mask_results['loss_mask'].items():\n                    losses[f's{stage}.{name}'] = (\n                        value * stage_loss_weight if 'loss' in name else value)\n\n            # refine bboxes\n            if stage < self.num_stages - 1:\n                bbox_head = self.bbox_head[stage]\n                with torch.no_grad():\n                    results_list = bbox_head.refine_bboxes(\n                        sampling_results, bbox_results, batch_img_metas)\n                    # Empty proposal\n                    if results_list is None:\n                        break\n        return losses\n\n    def predict_bbox(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     rpn_results_list: InstanceList,\n                     rcnn_test_cfg: ConfigType,\n                     rescale: bool = False,\n                     **kwargs) -> InstanceList:\n        \"\"\"Perform forward propagation of the bbox head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        proposals = [res.bboxes for res in rpn_results_list]\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = bbox2roi(proposals)\n\n        if rois.shape[0] == 0:\n            return empty_instances(\n                batch_img_metas,\n                rois.device,\n                task_type='bbox',\n                box_type=self.bbox_head[-1].predict_box_type,\n                num_classes=self.bbox_head[-1].num_classes,\n                score_per_cls=rcnn_test_cfg is None)\n\n        rois, cls_scores, bbox_preds = self._refine_roi(\n            x=x,\n            rois=rois,\n            batch_img_metas=batch_img_metas,\n            num_proposals_per_img=num_proposals_per_img,\n            **kwargs)\n\n        results_list = self.bbox_head[-1].predict_by_feat(\n            rois=rois,\n            cls_scores=cls_scores,\n            bbox_preds=bbox_preds,\n            batch_img_metas=batch_img_metas,\n            rescale=rescale,\n            rcnn_test_cfg=rcnn_test_cfg)\n        return results_list\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     results_list: List[InstanceData],\n                     rescale: bool = False) -> List[InstanceData]:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas,\n                mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        num_mask_rois_per_img = [len(res) for res in results_list]\n        aug_masks = []\n        for stage in range(self.num_stages):\n            mask_results = self._mask_forward(stage, x, mask_rois)\n            mask_preds = mask_results['mask_preds']\n            # split batch mask prediction back to each image\n            mask_preds = mask_preds.split(num_mask_rois_per_img, 0)\n            aug_masks.append([m.sigmoid().detach() for m in mask_preds])\n\n        merged_masks = []\n        for i in range(len(batch_img_metas)):\n            aug_mask = [mask[i] for mask in aug_masks]\n            merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n            merged_masks.append(merged_mask)\n        results_list = self.mask_head[-1].predict_by_feat(\n            mask_preds=merged_masks,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale,\n            activate_map=True)\n        return results_list\n\n    def _refine_roi(self, x: Tuple[Tensor], rois: Tensor,\n                    batch_img_metas: List[dict],\n                    num_proposals_per_img: Sequence[int], **kwargs) -> tuple:\n        \"\"\"Multi-stage refinement of RoI.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2]\n            batch_img_metas (list[dict]): List of image information.\n            num_proposals_per_img (sequence[int]): number of proposals\n                in each image.\n\n        Returns:\n            tuple:\n\n               - rois (Tensor): Refined RoI.\n               - cls_scores (list[Tensor]): Average predicted\n                   cls score per image.\n               - bbox_preds (list[Tensor]): Bbox branch predictions\n                   for the last stage of per image.\n        \"\"\"\n        # \"ms\" in variable names means multi-stage\n        ms_scores = []\n        for stage in range(self.num_stages):\n            bbox_results = self._bbox_forward(\n                stage=stage, x=x, rois=rois, **kwargs)\n\n            # split batch bbox prediction back to each image\n            cls_scores = bbox_results['cls_score']\n            bbox_preds = bbox_results['bbox_pred']\n\n            rois = rois.split(num_proposals_per_img, 0)\n            cls_scores = cls_scores.split(num_proposals_per_img, 0)\n            ms_scores.append(cls_scores)\n\n            # some detector with_reg is False, bbox_preds will be None\n            if bbox_preds is not None:\n                # TODO move this to a sabl_roi_head\n                # the bbox prediction of some detectors like SABL is not Tensor\n                if isinstance(bbox_preds, torch.Tensor):\n                    bbox_preds = bbox_preds.split(num_proposals_per_img, 0)\n                else:\n                    bbox_preds = self.bbox_head[stage].bbox_pred_split(\n                        bbox_preds, num_proposals_per_img)\n            else:\n                bbox_preds = (None, ) * len(batch_img_metas)\n\n            if stage < self.num_stages - 1:\n                bbox_head = self.bbox_head[stage]\n                if bbox_head.custom_activation:\n                    cls_scores = [\n                        bbox_head.loss_cls.get_activation(s)\n                        for s in cls_scores\n                    ]\n                refine_rois_list = []\n                for i in range(len(batch_img_metas)):\n                    if rois[i].shape[0] > 0:\n                        bbox_label = cls_scores[i][:, :-1].argmax(dim=1)\n                        # Refactor `bbox_head.regress_by_class` to only accept\n                        # box tensor without img_idx concatenated.\n                        refined_bboxes = bbox_head.regress_by_class(\n                            rois[i][:, 1:], bbox_label, bbox_preds[i],\n                            batch_img_metas[i])\n                        refined_bboxes = get_box_tensor(refined_bboxes)\n                        refined_rois = torch.cat(\n                            [rois[i][:, [0]], refined_bboxes], dim=1)\n                        refine_rois_list.append(refined_rois)\n                rois = torch.cat(refine_rois_list)\n\n        # average scores of each image by stages\n        cls_scores = [\n            sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n            for i in range(len(batch_img_metas))\n        ]\n        return rois, cls_scores, bbox_preds\n\n    def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n                batch_data_samples: SampleList) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (List[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        results = ()\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n        proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = bbox2roi(proposals)\n        # bbox head\n        if self.with_bbox:\n            rois, cls_scores, bbox_preds = self._refine_roi(\n                x, rois, batch_img_metas, num_proposals_per_img)\n            results = results + (cls_scores, bbox_preds)\n        # mask head\n        if self.with_mask:\n            aug_masks = []\n            rois = torch.cat(rois)\n            for stage in range(self.num_stages):\n                mask_results = self._mask_forward(stage, x, rois)\n                mask_preds = mask_results['mask_preds']\n                mask_preds = mask_preds.split(num_proposals_per_img, 0)\n                aug_masks.append([m.sigmoid().detach() for m in mask_preds])\n\n            merged_masks = []\n            for i in range(len(batch_img_metas)):\n                aug_mask = [mask[i] for mask in aug_masks]\n                merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n                merged_masks.append(merged_mask)\n            results = results + (merged_masks, )\n        return results\n"
  },
  {
    "path": "mmdet/models/roi_heads/double_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass DoubleHeadRoIHead(StandardRoIHead):\n    \"\"\"RoI head for `Double Head RCNN <https://arxiv.org/abs/1904.06493>`_.\n\n    Args:\n        reg_roi_scale_factor (float): The scale factor to extend the rois\n            used to extract the regression features.\n    \"\"\"\n\n    def __init__(self, reg_roi_scale_factor: float, **kwargs):\n        super().__init__(**kwargs)\n        self.reg_roi_scale_factor = reg_roi_scale_factor\n\n    def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        bbox_cls_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs], rois)\n        bbox_reg_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs],\n            rois,\n            roi_scale_factor=self.reg_roi_scale_factor)\n        if self.with_shared_head:\n            bbox_cls_feats = self.shared_head(bbox_cls_feats)\n            bbox_reg_feats = self.shared_head(bbox_reg_feats)\n        cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score,\n            bbox_pred=bbox_pred,\n            bbox_feats=bbox_cls_feats)\n        return bbox_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/dynamic_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.models.losses import SmoothL1Loss\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import InstanceList\nfrom ..utils.misc import unpack_gt_instances\nfrom .standard_roi_head import StandardRoIHead\n\nEPS = 1e-15\n\n\n@MODELS.register_module()\nclass DynamicRoIHead(StandardRoIHead):\n    \"\"\"RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_.\"\"\"\n\n    def __init__(self, **kwargs) -> None:\n        super().__init__(**kwargs)\n        assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)\n        # the IoU history of the past `update_iter_interval` iterations\n        self.iou_history = []\n        # the beta history of the past `update_iter_interval` iterations\n        self.beta_history = []\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Forward function for training.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, _ = outputs\n\n        # assign gts and sample proposals\n        num_imgs = len(batch_data_samples)\n        sampling_results = []\n        cur_iou = []\n        for i in range(num_imgs):\n            # rename rpn_results.bboxes to rpn_results.priors\n            rpn_results = rpn_results_list[i]\n            rpn_results.priors = rpn_results.pop('bboxes')\n\n            assign_result = self.bbox_assigner.assign(\n                rpn_results, batch_gt_instances[i],\n                batch_gt_instances_ignore[i])\n            sampling_result = self.bbox_sampler.sample(\n                assign_result,\n                rpn_results,\n                batch_gt_instances[i],\n                feats=[lvl_feat[i][None] for lvl_feat in x])\n            # record the `iou_topk`-th largest IoU in an image\n            iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,\n                           len(assign_result.max_overlaps))\n            ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)\n            cur_iou.append(ious[-1].item())\n            sampling_results.append(sampling_result)\n        # average the current IoUs over images\n        cur_iou = np.mean(cur_iou)\n        self.iou_history.append(cur_iou)\n\n        losses = dict()\n        # bbox head forward and loss\n        if self.with_bbox:\n            bbox_results = self.bbox_loss(x, sampling_results)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self.mask_loss(x, sampling_results,\n                                          bbox_results['bbox_feats'],\n                                          batch_gt_instances)\n            losses.update(mask_results['loss_mask'])\n\n        # update IoU threshold and SmoothL1 beta\n        update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval\n        if len(self.iou_history) % update_iter_interval == 0:\n            new_iou_thr, new_beta = self.update_hyperparameters()\n\n        return losses\n\n    def bbox_loss(self, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the bbox head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n                - `loss_bbox` (dict): A dictionary of bbox loss components.\n        \"\"\"\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(x, rois)\n\n        bbox_loss_and_target = self.bbox_head.loss_and_target(\n            cls_score=bbox_results['cls_score'],\n            bbox_pred=bbox_results['bbox_pred'],\n            rois=rois,\n            sampling_results=sampling_results,\n            rcnn_train_cfg=self.train_cfg)\n        bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])\n\n        # record the `beta_topk`-th smallest target\n        # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets\n        # and bbox_weights, respectively\n        bbox_targets = bbox_loss_and_target['bbox_targets']\n        pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)\n        num_pos = len(pos_inds)\n        num_imgs = len(sampling_results)\n        if num_pos > 0:\n            cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)\n            beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,\n                            num_pos)\n            cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()\n            self.beta_history.append(cur_target)\n\n        return bbox_results\n\n    def update_hyperparameters(self):\n        \"\"\"Update hyperparameters like IoU thresholds for assigner and beta for\n        SmoothL1 loss based on the training statistics.\n\n        Returns:\n            tuple[float]: the updated ``iou_thr`` and ``beta``.\n        \"\"\"\n        new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,\n                          np.mean(self.iou_history))\n        self.iou_history = []\n        self.bbox_assigner.pos_iou_thr = new_iou_thr\n        self.bbox_assigner.neg_iou_thr = new_iou_thr\n        self.bbox_assigner.min_pos_iou = new_iou_thr\n        if (not self.beta_history) or (np.median(self.beta_history) < EPS):\n            # avoid 0 or too small value for new_beta\n            new_beta = self.bbox_head.loss_bbox.beta\n        else:\n            new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,\n                           np.median(self.beta_history))\n        self.beta_history = []\n        self.bbox_head.loss_bbox.beta = new_beta\n        return new_iou_thr, new_beta\n"
  },
  {
    "path": "mmdet/models/roi_heads/grid_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils.misc import unpack_gt_instances\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass GridRoIHead(StandardRoIHead):\n    \"\"\"Implementation of `Grid RoI Head <https://arxiv.org/abs/1811.12030>`_\n\n    Args:\n        grid_roi_extractor (:obj:`ConfigDict` or dict): Config of\n            roi extractor.\n        grid_head (:obj:`ConfigDict` or dict): Config of grid head\n    \"\"\"\n\n    def __init__(self, grid_roi_extractor: ConfigType, grid_head: ConfigType,\n                 **kwargs) -> None:\n        assert grid_head is not None\n        super().__init__(**kwargs)\n        if grid_roi_extractor is not None:\n            self.grid_roi_extractor = MODELS.build(grid_roi_extractor)\n            self.share_roi_extractor = False\n        else:\n            self.share_roi_extractor = True\n            self.grid_roi_extractor = self.bbox_roi_extractor\n        self.grid_head = MODELS.build(grid_head)\n\n    def _random_jitter(self,\n                       sampling_results: List[SamplingResult],\n                       batch_img_metas: List[dict],\n                       amplitude: float = 0.15) -> List[SamplingResult]:\n        \"\"\"Ramdom jitter positive proposals for training.\n\n        Args:\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            batch_img_metas (list[dict]): List of image information.\n            amplitude (float): Amplitude of random offset. Defaults to 0.15.\n\n        Returns:\n            list[obj:SamplingResult]: SamplingResults after random jittering.\n        \"\"\"\n        for sampling_result, img_meta in zip(sampling_results,\n                                             batch_img_metas):\n            bboxes = sampling_result.pos_priors\n            random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(\n                -amplitude, amplitude)\n            # before jittering\n            cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2\n            wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()\n            # after jittering\n            new_cxcy = cxcy + wh * random_offsets[:, :2]\n            new_wh = wh * (1 + random_offsets[:, 2:])\n            # xywh to xyxy\n            new_x1y1 = (new_cxcy - new_wh / 2)\n            new_x2y2 = (new_cxcy + new_wh / 2)\n            new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)\n            # clip bboxes\n            max_shape = img_meta['img_shape']\n            if max_shape is not None:\n                new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)\n                new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)\n\n            sampling_result.pos_priors = new_bboxes\n        return sampling_results\n\n    # TODO: Forward is incorrect and need to refactor.\n    def forward(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList = None) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (Tuple[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n            the meta information of each image and corresponding\n            annotations.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        results = ()\n        proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n        rois = bbox2roi(proposals)\n        # bbox head\n        if self.with_bbox:\n            bbox_results = self._bbox_forward(x, rois)\n            results = results + (bbox_results['cls_score'], )\n            if self.bbox_head.with_reg:\n                results = results + (bbox_results['bbox_pred'], )\n\n            # grid head\n            grid_rois = rois[:100]\n            grid_feats = self.grid_roi_extractor(\n                x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)\n            if self.with_shared_head:\n                grid_feats = self.shared_head(grid_feats)\n            self.grid_head.test_mode = True\n            grid_preds = self.grid_head(grid_feats)\n            results = results + (grid_preds, )\n\n        # mask head\n        if self.with_mask:\n            mask_rois = rois[:100]\n            mask_results = self._mask_forward(x, mask_rois)\n            results = results + (mask_results['mask_preds'], )\n        return results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList, **kwargs) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n\n        # assign gts and sample proposals\n        num_imgs = len(batch_data_samples)\n        sampling_results = []\n        for i in range(num_imgs):\n            # rename rpn_results.bboxes to rpn_results.priors\n            rpn_results = rpn_results_list[i]\n            rpn_results.priors = rpn_results.pop('bboxes')\n\n            assign_result = self.bbox_assigner.assign(\n                rpn_results, batch_gt_instances[i],\n                batch_gt_instances_ignore[i])\n            sampling_result = self.bbox_sampler.sample(\n                assign_result,\n                rpn_results,\n                batch_gt_instances[i],\n                feats=[lvl_feat[i][None] for lvl_feat in x])\n            sampling_results.append(sampling_result)\n\n        losses = dict()\n        # bbox head loss\n        if self.with_bbox:\n            bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self.mask_loss(x, sampling_results,\n                                          bbox_results['bbox_feats'],\n                                          batch_gt_instances)\n            losses.update(mask_results['loss_mask'])\n\n        return losses\n\n    def bbox_loss(self,\n                  x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  batch_img_metas: Optional[List[dict]] = None) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the bbox head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[:obj:`SamplingResult`]): Sampling results.\n            batch_img_metas (list[dict], optional): Meta information of each\n                image, e.g., image size, scaling factor, etc.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n            - `cls_score` (Tensor): Classification scores.\n            - `bbox_pred` (Tensor): Box energies / deltas.\n            - `bbox_feats` (Tensor): Extract bbox RoI features.\n            - `loss_bbox` (dict): A dictionary of bbox loss components.\n        \"\"\"\n        assert batch_img_metas is not None\n        bbox_results = super().bbox_loss(x, sampling_results)\n\n        # Grid head forward and loss\n        sampling_results = self._random_jitter(sampling_results,\n                                               batch_img_metas)\n        pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n\n        # GN in head does not support zero shape input\n        if pos_rois.shape[0] == 0:\n            return bbox_results\n\n        grid_feats = self.grid_roi_extractor(\n            x[:self.grid_roi_extractor.num_inputs], pos_rois)\n        if self.with_shared_head:\n            grid_feats = self.shared_head(grid_feats)\n        # Accelerate training\n        max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)\n        sample_idx = torch.randperm(\n            grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid\n                                      )]\n        grid_feats = grid_feats[sample_idx]\n        grid_pred = self.grid_head(grid_feats)\n\n        loss_grid = self.grid_head.loss(grid_pred, sample_idx,\n                                        sampling_results, self.train_cfg)\n\n        bbox_results['loss_bbox'].update(loss_grid)\n        return bbox_results\n\n    def predict_bbox(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     rpn_results_list: InstanceList,\n                     rcnn_test_cfg: ConfigType,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the bbox head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            rcnn_test_cfg (:obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape \\\n            (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4), the last \\\n            dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        results_list = super().predict_bbox(\n            x,\n            batch_img_metas=batch_img_metas,\n            rpn_results_list=rpn_results_list,\n            rcnn_test_cfg=rcnn_test_cfg,\n            rescale=False)\n\n        grid_rois = bbox2roi([res.bboxes for res in results_list])\n        if grid_rois.shape[0] != 0:\n            grid_feats = self.grid_roi_extractor(\n                x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)\n            if self.with_shared_head:\n                grid_feats = self.shared_head(grid_feats)\n            self.grid_head.test_mode = True\n            grid_preds = self.grid_head(grid_feats)\n            results_list = self.grid_head.predict_by_feat(\n                grid_preds=grid_preds,\n                results_list=results_list,\n                batch_img_metas=batch_img_metas,\n                rescale=rescale)\n\n        return results_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/htc_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom mmdet.models.test_time_augs import merge_aug_masks\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import InstanceList, OptConfigType\nfrom ..layers import adaptive_avg_pool2d\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils import empty_instances, unpack_gt_instances\nfrom .cascade_roi_head import CascadeRoIHead\n\n\n@MODELS.register_module()\nclass HybridTaskCascadeRoIHead(CascadeRoIHead):\n    \"\"\"Hybrid task cascade roi head including one bbox head and one mask head.\n\n    https://arxiv.org/abs/1901.07518\n\n    Args:\n        num_stages (int): Number of cascade stages.\n        stage_loss_weights (list[float]): Loss weight for every stage.\n        semantic_roi_extractor (:obj:`ConfigDict` or dict, optional):\n            Config of semantic roi extractor. Defaults to None.\n        Semantic_head (:obj:`ConfigDict` or dict, optional):\n            Config of semantic head. Defaults to None.\n        interleaved (bool): Whether to interleaves the box branch and mask\n            branch. If True, the mask branch can take the refined bounding\n            box predictions. Defaults to True.\n        mask_info_flow (bool): Whether to turn on the mask information flow,\n            which means that feeding the mask features of the preceding stage\n            to the current stage. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 num_stages: int,\n                 stage_loss_weights: List[float],\n                 semantic_roi_extractor: OptConfigType = None,\n                 semantic_head: OptConfigType = None,\n                 semantic_fusion: Tuple[str] = ('bbox', 'mask'),\n                 interleaved: bool = True,\n                 mask_info_flow: bool = True,\n                 **kwargs) -> None:\n        super().__init__(\n            num_stages=num_stages,\n            stage_loss_weights=stage_loss_weights,\n            **kwargs)\n        assert self.with_bbox\n        assert not self.with_shared_head  # shared head is not supported\n\n        if semantic_head is not None:\n            self.semantic_roi_extractor = MODELS.build(semantic_roi_extractor)\n            self.semantic_head = MODELS.build(semantic_head)\n\n        self.semantic_fusion = semantic_fusion\n        self.interleaved = interleaved\n        self.mask_info_flow = mask_info_flow\n\n    # TODO move to base_roi_head later\n    @property\n    def with_semantic(self) -> bool:\n        \"\"\"bool: whether the head has semantic head\"\"\"\n        return hasattr(self,\n                       'semantic_head') and self.semantic_head is not None\n\n    def _bbox_forward(\n            self,\n            stage: int,\n            x: Tuple[Tensor],\n            rois: Tensor,\n            semantic_feat: Optional[Tensor] = None) -> Dict[str, Tensor]:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            semantic_feat (Tensor, optional): Semantic feature. Defaults to\n                None.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        if self.with_semantic and 'bbox' in self.semantic_fusion:\n            bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:\n                bbox_semantic_feat = adaptive_avg_pool2d(\n                    bbox_semantic_feat, bbox_feats.shape[-2:])\n            bbox_feats += bbox_semantic_feat\n        cls_score, bbox_pred = bbox_head(bbox_feats)\n\n        bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)\n        return bbox_results\n\n    def bbox_loss(self,\n                  stage: int,\n                  x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  semantic_feat: Optional[Tensor] = None) -> dict:\n        \"\"\"Run forward function and calculate loss for box head in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            semantic_feat (Tensor, optional): Semantic feature. Defaults to\n                None.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n                - `loss_bbox` (dict): A dictionary of bbox loss components.\n                - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n                  column indicates batch id of each RoI.\n                - `bbox_targets` (tuple):  Ground truth for proposals in a\n                  single image. Containing the following list of Tensors:\n                  (labels, label_weights, bbox_targets, bbox_weights)\n        \"\"\"\n        bbox_head = self.bbox_head[stage]\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(\n            stage, x, rois, semantic_feat=semantic_feat)\n        bbox_results.update(rois=rois)\n\n        bbox_loss_and_target = bbox_head.loss_and_target(\n            cls_score=bbox_results['cls_score'],\n            bbox_pred=bbox_results['bbox_pred'],\n            rois=rois,\n            sampling_results=sampling_results,\n            rcnn_train_cfg=self.train_cfg[stage])\n        bbox_results.update(bbox_loss_and_target)\n        return bbox_results\n\n    def _mask_forward(self,\n                      stage: int,\n                      x: Tuple[Tensor],\n                      rois: Tensor,\n                      semantic_feat: Optional[Tensor] = None,\n                      training: bool = True) -> Dict[str, Tensor]:\n        \"\"\"Mask head forward function used only in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            semantic_feat (Tensor, optional): Semantic feature. Defaults to\n                None.\n            training (bool): Mask Forward is different between training and\n                testing. If True, use the mask forward in training.\n                Defaults to True.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n        \"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n                                        rois)\n\n        # semantic feature fusion\n        # element-wise sum for original features and pooled semantic features\n        if self.with_semantic and 'mask' in self.semantic_fusion:\n            mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:\n                mask_semantic_feat = F.adaptive_avg_pool2d(\n                    mask_semantic_feat, mask_feats.shape[-2:])\n            mask_feats = mask_feats + mask_semantic_feat\n\n        # mask information flow\n        # forward all previous mask heads to obtain last_feat, and fuse it\n        # with the normal mask feature\n        if training:\n            if self.mask_info_flow:\n                last_feat = None\n                for i in range(stage):\n                    last_feat = self.mask_head[i](\n                        mask_feats, last_feat, return_logits=False)\n                mask_preds = mask_head(\n                    mask_feats, last_feat, return_feat=False)\n            else:\n                mask_preds = mask_head(mask_feats, return_feat=False)\n\n            mask_results = dict(mask_preds=mask_preds)\n        else:\n            aug_masks = []\n            last_feat = None\n            for i in range(self.num_stages):\n                mask_head = self.mask_head[i]\n                if self.mask_info_flow:\n                    mask_preds, last_feat = mask_head(mask_feats, last_feat)\n                else:\n                    mask_preds = mask_head(mask_feats)\n            aug_masks.append(mask_preds)\n\n            mask_results = dict(mask_preds=aug_masks)\n\n        return mask_results\n\n    def mask_loss(self,\n                  stage: int,\n                  x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  batch_gt_instances: InstanceList,\n                  semantic_feat: Optional[Tensor] = None) -> dict:\n        \"\"\"Run forward function and calculate loss for mask head in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            semantic_feat (Tensor, optional): Semantic feature. Defaults to\n                None.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `loss_mask` (dict): A dictionary of mask loss components.\n        \"\"\"\n        pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n        mask_results = self._mask_forward(\n            stage=stage,\n            x=x,\n            rois=pos_rois,\n            semantic_feat=semantic_feat,\n            training=True)\n\n        mask_head = self.mask_head[stage]\n        mask_loss_and_target = mask_head.loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=self.train_cfg[stage])\n        mask_results.update(mask_loss_and_target)\n\n        return mask_results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n\n        # semantic segmentation part\n        # 2 outputs: segmentation prediction and embedded features\n        losses = dict()\n        if self.with_semantic:\n            gt_semantic_segs = [\n                data_sample.gt_sem_seg.sem_seg\n                for data_sample in batch_data_samples\n            ]\n            gt_semantic_segs = torch.stack(gt_semantic_segs)\n            semantic_pred, semantic_feat = self.semantic_head(x)\n            loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_segs)\n            losses['loss_semantic_seg'] = loss_seg\n        else:\n            semantic_feat = None\n\n        results_list = rpn_results_list\n        num_imgs = len(batch_img_metas)\n        for stage in range(self.num_stages):\n            self.current_stage = stage\n\n            stage_loss_weight = self.stage_loss_weights[stage]\n\n            # assign gts and sample proposals\n            sampling_results = []\n            bbox_assigner = self.bbox_assigner[stage]\n            bbox_sampler = self.bbox_sampler[stage]\n            for i in range(num_imgs):\n                results = results_list[i]\n                # rename rpn_results.bboxes to rpn_results.priors\n                if 'bboxes' in results:\n                    results.priors = results.pop('bboxes')\n\n                assign_result = bbox_assigner.assign(\n                    results, batch_gt_instances[i],\n                    batch_gt_instances_ignore[i])\n                sampling_result = bbox_sampler.sample(\n                    assign_result,\n                    results,\n                    batch_gt_instances[i],\n                    feats=[lvl_feat[i][None] for lvl_feat in x])\n                sampling_results.append(sampling_result)\n\n            # bbox head forward and loss\n            bbox_results = self.bbox_loss(\n                stage=stage,\n                x=x,\n                sampling_results=sampling_results,\n                semantic_feat=semantic_feat)\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{stage}.{name}'] = (\n                    value * stage_loss_weight if 'loss' in name else value)\n\n            # mask head forward and loss\n            if self.with_mask:\n                # interleaved execution: use regressed bboxes by the box branch\n                # to train the mask branch\n                if self.interleaved:\n                    bbox_head = self.bbox_head[stage]\n                    with torch.no_grad():\n                        results_list = bbox_head.refine_bboxes(\n                            sampling_results, bbox_results, batch_img_metas)\n                        # re-assign and sample 512 RoIs from 512 RoIs\n                        sampling_results = []\n                        for i in range(num_imgs):\n                            results = results_list[i]\n                            # rename rpn_results.bboxes to rpn_results.priors\n                            results.priors = results.pop('bboxes')\n                            assign_result = bbox_assigner.assign(\n                                results, batch_gt_instances[i],\n                                batch_gt_instances_ignore[i])\n                            sampling_result = bbox_sampler.sample(\n                                assign_result,\n                                results,\n                                batch_gt_instances[i],\n                                feats=[lvl_feat[i][None] for lvl_feat in x])\n                            sampling_results.append(sampling_result)\n                mask_results = self.mask_loss(\n                    stage=stage,\n                    x=x,\n                    sampling_results=sampling_results,\n                    batch_gt_instances=batch_gt_instances,\n                    semantic_feat=semantic_feat)\n                for name, value in mask_results['loss_mask'].items():\n                    losses[f's{stage}.{name}'] = (\n                        value * stage_loss_weight if 'loss' in name else value)\n\n            # refine bboxes (same as Cascade R-CNN)\n            if stage < self.num_stages - 1 and not self.interleaved:\n                bbox_head = self.bbox_head[stage]\n                with torch.no_grad():\n                    results_list = bbox_head.refine_bboxes(\n                        sampling_results=sampling_results,\n                        bbox_results=bbox_results,\n                        batch_img_metas=batch_img_metas)\n\n        return losses\n\n    def predict(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the roi head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (N, C, H, W).\n            rpn_results_list (list[:obj:`InstanceData`]): list of region\n                proposals.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results to\n                the original image. Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n\n        # TODO: nms_op in mmcv need be enhanced, the bbox result may get\n        #  difference when not rescale in bbox_head\n\n        # If it has the mask branch, the bbox branch does not need\n        # to be scaled to the original image scale, because the mask\n        # branch will scale both bbox and mask at the same time.\n        bbox_rescale = rescale if not self.with_mask else False\n        results_list = self.predict_bbox(\n            x=x,\n            semantic_feat=semantic_feat,\n            batch_img_metas=batch_img_metas,\n            rpn_results_list=rpn_results_list,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=bbox_rescale)\n\n        if self.with_mask:\n            results_list = self.predict_mask(\n                x=x,\n                semantic_heat=semantic_feat,\n                batch_img_metas=batch_img_metas,\n                results_list=results_list,\n                rescale=rescale)\n\n        return results_list\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     semantic_heat: Tensor,\n                     batch_img_metas: List[dict],\n                     results_list: InstanceList,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            semantic_feat (Tensor): Semantic feature.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas=batch_img_metas,\n                device=mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        num_mask_rois_per_img = [len(res) for res in results_list]\n        mask_results = self._mask_forward(\n            stage=-1,\n            x=x,\n            rois=mask_rois,\n            semantic_feat=semantic_heat,\n            training=False)\n        # split batch mask prediction back to each image\n        aug_masks = [[\n            mask.sigmoid().detach()\n            for mask in mask_preds.split(num_mask_rois_per_img, 0)\n        ] for mask_preds in mask_results['mask_preds']]\n\n        merged_masks = []\n        for i in range(num_imgs):\n            aug_mask = [mask[i] for mask in aug_masks]\n            merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n            merged_masks.append(merged_mask)\n\n        results_list = self.mask_head[-1].predict_by_feat(\n            mask_preds=merged_masks,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale,\n            activate_map=True)\n\n        return results_list\n\n    def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n                batch_data_samples: SampleList) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (List[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        results = ()\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n        num_imgs = len(batch_img_metas)\n\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n\n        proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = bbox2roi(proposals)\n        # bbox head\n        if self.with_bbox:\n            rois, cls_scores, bbox_preds = self._refine_roi(\n                x=x,\n                rois=rois,\n                semantic_feat=semantic_feat,\n                batch_img_metas=batch_img_metas,\n                num_proposals_per_img=num_proposals_per_img)\n            results = results + (cls_scores, bbox_preds)\n        # mask head\n        if self.with_mask:\n            rois = torch.cat(rois)\n            mask_results = self._mask_forward(\n                stage=-1,\n                x=x,\n                rois=rois,\n                semantic_feat=semantic_feat,\n                training=False)\n            aug_masks = [[\n                mask.sigmoid().detach()\n                for mask in mask_preds.split(num_proposals_per_img, 0)\n            ] for mask_preds in mask_results['mask_preds']]\n\n            merged_masks = []\n            for i in range(num_imgs):\n                aug_mask = [mask[i] for mask in aug_masks]\n                merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n                merged_masks.append(merged_mask)\n            results = results + (merged_masks, )\n        return results\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .coarse_mask_head import CoarseMaskHead\nfrom .dynamic_mask_head import DynamicMaskHead\nfrom .fcn_mask_head import FCNMaskHead\nfrom .feature_relay_head import FeatureRelayHead\nfrom .fused_semantic_head import FusedSemanticHead\nfrom .global_context_head import GlobalContextHead\nfrom .grid_head import GridHead\nfrom .htc_mask_head import HTCMaskHead\nfrom .mask_point_head import MaskPointHead\nfrom .maskiou_head import MaskIoUHead\nfrom .scnet_mask_head import SCNetMaskHead\nfrom .scnet_semantic_head import SCNetSemanticHead\n\n__all__ = [\n    'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',\n    'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',\n    'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead',\n    'DynamicMaskHead'\n]\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/coarse_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import ConvModule, Linear\nfrom mmengine.model import ModuleList\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@MODELS.register_module()\nclass CoarseMaskHead(FCNMaskHead):\n    \"\"\"Coarse mask head used in PointRend.\n\n    Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample\n    the input feature map instead of upsample it.\n\n    Args:\n        num_convs (int): Number of conv layers in the head. Defaults to 0.\n        num_fcs (int): Number of fc layers in the head. Defaults to 2.\n        fc_out_channels (int): Number of output channels of fc layer.\n            Defaults to 1024.\n        downsample_factor (int): The factor that feature map is downsampled by.\n            Defaults to 2.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_convs: int = 0,\n                 num_fcs: int = 2,\n                 fc_out_channels: int = 1024,\n                 downsample_factor: int = 2,\n                 init_cfg: MultiConfig = dict(\n                     type='Xavier',\n                     override=[\n                         dict(name='fcs'),\n                         dict(type='Constant', val=0.001, name='fc_logits')\n                     ]),\n                 *arg,\n                 **kwarg) -> None:\n        super().__init__(\n            *arg,\n            num_convs=num_convs,\n            upsample_cfg=dict(type=None),\n            init_cfg=None,\n            **kwarg)\n        self.init_cfg = init_cfg\n        self.num_fcs = num_fcs\n        assert self.num_fcs > 0\n        self.fc_out_channels = fc_out_channels\n        self.downsample_factor = downsample_factor\n        assert self.downsample_factor >= 1\n        # remove conv_logit\n        delattr(self, 'conv_logits')\n\n        if downsample_factor > 1:\n            downsample_in_channels = (\n                self.conv_out_channels\n                if self.num_convs > 0 else self.in_channels)\n            self.downsample_conv = ConvModule(\n                downsample_in_channels,\n                self.conv_out_channels,\n                kernel_size=downsample_factor,\n                stride=downsample_factor,\n                padding=0,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n        else:\n            self.downsample_conv = None\n\n        self.output_size = (self.roi_feat_size[0] // downsample_factor,\n                            self.roi_feat_size[1] // downsample_factor)\n        self.output_area = self.output_size[0] * self.output_size[1]\n\n        last_layer_dim = self.conv_out_channels * self.output_area\n\n        self.fcs = ModuleList()\n        for i in range(num_fcs):\n            fc_in_channels = (\n                last_layer_dim if i == 0 else self.fc_out_channels)\n            self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))\n        last_layer_dim = self.fc_out_channels\n        output_channels = self.num_classes * self.output_area\n        self.fc_logits = Linear(last_layer_dim, output_channels)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights.\"\"\"\n        super(FCNMaskHead, self).init_weights()\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (Tensor): Extract mask RoI features.\n\n        Returns:\n            Tensor: Predicted foreground masks.\n        \"\"\"\n        for conv in self.convs:\n            x = conv(x)\n\n        if self.downsample_conv is not None:\n            x = self.downsample_conv(x)\n\n        x = x.flatten(1)\n        for fc in self.fcs:\n            x = self.relu(fc(x))\n        mask_preds = self.fc_logits(x).view(\n            x.size(0), self.num_classes, *self.output_size)\n        return mask_preds\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.config import ConfigDict\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules import SamplingResult\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, OptConfigType, reduce_mean\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@MODELS.register_module()\nclass DynamicMaskHead(FCNMaskHead):\n    r\"\"\"Dynamic Mask Head for\n    `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n    Args:\n        num_convs (int): Number of convolution layer.\n            Defaults to 4.\n        roi_feat_size (int): The output size of RoI extractor,\n            Defaults to 14.\n        in_channels (int): Input feature channels.\n            Defaults to 256.\n        conv_kernel_size (int): Kernel size of convolution layers.\n            Defaults to 3.\n        conv_out_channels (int): Output channels of convolution layers.\n            Defaults to 256.\n        num_classes (int): Number of classes.\n            Defaults to 80\n        class_agnostic (int): Whether generate class agnostic prediction.\n            Defaults to False.\n        dropout (float): Probability of drop the channel.\n            Defaults to 0.0\n        upsample_cfg (:obj:`ConfigDict` or dict): The config for\n            upsample layer.\n        conv_cfg (:obj:`ConfigDict` or dict, optional): The convolution\n            layer config.\n        norm_cfg (:obj:`ConfigDict` or dict, optional): The norm layer config.\n        dynamic_conv_cfg (:obj:`ConfigDict` or dict): The dynamic convolution\n            layer config.\n        loss_mask (:obj:`ConfigDict` or dict): The config for mask loss.\n    \"\"\"\n\n    def __init__(self,\n                 num_convs: int = 4,\n                 roi_feat_size: int = 14,\n                 in_channels: int = 256,\n                 conv_kernel_size: int = 3,\n                 conv_out_channels: int = 256,\n                 num_classes: int = 80,\n                 class_agnostic: bool = False,\n                 upsample_cfg: ConfigType = dict(\n                     type='deconv', scale_factor=2),\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 dynamic_conv_cfg: ConfigType = dict(\n                     type='DynamicConv',\n                     in_channels=256,\n                     feat_channels=64,\n                     out_channels=256,\n                     input_feat_shape=14,\n                     with_proj=False,\n                     act_cfg=dict(type='ReLU', inplace=True),\n                     norm_cfg=dict(type='LN')),\n                 loss_mask: ConfigType = dict(\n                     type='DiceLoss', loss_weight=8.0),\n                 **kwargs) -> None:\n        super().__init__(\n            num_convs=num_convs,\n            roi_feat_size=roi_feat_size,\n            in_channels=in_channels,\n            conv_kernel_size=conv_kernel_size,\n            conv_out_channels=conv_out_channels,\n            num_classes=num_classes,\n            class_agnostic=class_agnostic,\n            upsample_cfg=upsample_cfg,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            loss_mask=loss_mask,\n            **kwargs)\n        assert class_agnostic is False, \\\n            'DynamicMaskHead only support class_agnostic=False'\n        self.fp16_enabled = False\n\n        self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg)\n\n    def init_weights(self) -> None:\n        \"\"\"Use xavier initialization for all weight parameter and set\n        classification head bias as a specific value when use focal loss.\"\"\"\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n            nn.init.constant_(self.conv_logits.bias, 0.)\n\n    def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> Tensor:\n        \"\"\"Forward function of DynamicMaskHead.\n\n        Args:\n            roi_feat (Tensor): Roi-pooling features with shape\n                (batch_size*num_proposals, feature_dimensions,\n                pooling_h , pooling_w).\n            proposal_feat (Tensor): Intermediate feature get from\n                diihead in last stage, has shape\n                (batch_size*num_proposals, feature_dimensions)\n\n          Returns:\n            mask_preds (Tensor): Predicted foreground masks with shape\n            (batch_size*num_proposals, num_classes, pooling_h*2, pooling_w*2).\n        \"\"\"\n\n        proposal_feat = proposal_feat.reshape(-1, self.in_channels)\n        proposal_feat_iic = self.instance_interactive_conv(\n            proposal_feat, roi_feat)\n\n        x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size())\n\n        for conv in self.convs:\n            x = conv(x)\n        if self.upsample is not None:\n            x = self.upsample(x)\n            if self.upsample_method == 'deconv':\n                x = self.relu(x)\n        mask_preds = self.conv_logits(x)\n        return mask_preds\n\n    def loss_and_target(self, mask_preds: Tensor,\n                        sampling_results: List[SamplingResult],\n                        batch_gt_instances: InstanceList,\n                        rcnn_train_cfg: ConfigDict) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mask_preds (Tensor): Predicted foreground masks, has shape\n                (num_pos, num_classes, h, w).\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n\n        Returns:\n            dict: A dictionary of loss and targets components.\n        \"\"\"\n        mask_targets = self.get_targets(\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=rcnn_train_cfg)\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n        num_pos = pos_labels.new_ones(pos_labels.size()).float().sum()\n        avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item()\n        loss = dict()\n        if mask_preds.size(0) == 0:\n            loss_mask = mask_preds.sum()\n        else:\n            loss_mask = self.loss_mask(\n                mask_preds[torch.arange(num_pos).long(), pos_labels,\n                           ...].sigmoid(),\n                mask_targets,\n                avg_factor=avg_factor)\n        loss['loss_mask'] = loss_mask\n        return dict(loss_mask=loss, mask_targets=mask_targets)\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/fcn_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer\nfrom mmcv.ops.carafe import CARAFEPack\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule, ModuleList\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.models.utils import empty_instances\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.mask import mask_target\nfrom mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig\n\nBYTES_PER_FLOAT = 4\n# TODO: This memory limit may be too much or too little. It would be better to\n#  determine it based on available resources.\nGPU_MEM_LIMIT = 1024**3  # 1 GB memory limit\n\n\n@MODELS.register_module()\nclass FCNMaskHead(BaseModule):\n\n    def __init__(self,\n                 num_convs: int = 4,\n                 roi_feat_size: int = 14,\n                 in_channels: int = 256,\n                 conv_kernel_size: int = 3,\n                 conv_out_channels: int = 256,\n                 num_classes: int = 80,\n                 class_agnostic: int = False,\n                 upsample_cfg: ConfigType = dict(\n                     type='deconv', scale_factor=2),\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: OptConfigType = None,\n                 predictor_cfg: ConfigType = dict(type='Conv'),\n                 loss_mask: ConfigType = dict(\n                     type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n                 init_cfg: OptMultiConfig = None) -> None:\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg)\n        self.upsample_cfg = upsample_cfg.copy()\n        if self.upsample_cfg['type'] not in [\n                None, 'deconv', 'nearest', 'bilinear', 'carafe'\n        ]:\n            raise ValueError(\n                f'Invalid upsample method {self.upsample_cfg[\"type\"]}, '\n                'accepted methods are \"deconv\", \"nearest\", \"bilinear\", '\n                '\"carafe\"')\n        self.num_convs = num_convs\n        # WARN: roi_feat_size is reserved and not used\n        self.roi_feat_size = _pair(roi_feat_size)\n        self.in_channels = in_channels\n        self.conv_kernel_size = conv_kernel_size\n        self.conv_out_channels = conv_out_channels\n        self.upsample_method = self.upsample_cfg.get('type')\n        self.scale_factor = self.upsample_cfg.pop('scale_factor', None)\n        self.num_classes = num_classes\n        self.class_agnostic = class_agnostic\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.predictor_cfg = predictor_cfg\n        self.loss_mask = MODELS.build(loss_mask)\n\n        self.convs = ModuleList()\n        for i in range(self.num_convs):\n            in_channels = (\n                self.in_channels if i == 0 else self.conv_out_channels)\n            padding = (self.conv_kernel_size - 1) // 2\n            self.convs.append(\n                ConvModule(\n                    in_channels,\n                    self.conv_out_channels,\n                    self.conv_kernel_size,\n                    padding=padding,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg))\n        upsample_in_channels = (\n            self.conv_out_channels if self.num_convs > 0 else in_channels)\n        upsample_cfg_ = self.upsample_cfg.copy()\n        if self.upsample_method is None:\n            self.upsample = None\n        elif self.upsample_method == 'deconv':\n            upsample_cfg_.update(\n                in_channels=upsample_in_channels,\n                out_channels=self.conv_out_channels,\n                kernel_size=self.scale_factor,\n                stride=self.scale_factor)\n            self.upsample = build_upsample_layer(upsample_cfg_)\n        elif self.upsample_method == 'carafe':\n            upsample_cfg_.update(\n                channels=upsample_in_channels, scale_factor=self.scale_factor)\n            self.upsample = build_upsample_layer(upsample_cfg_)\n        else:\n            # suppress warnings\n            align_corners = (None\n                             if self.upsample_method == 'nearest' else False)\n            upsample_cfg_.update(\n                scale_factor=self.scale_factor,\n                mode=self.upsample_method,\n                align_corners=align_corners)\n            self.upsample = build_upsample_layer(upsample_cfg_)\n\n        out_channels = 1 if self.class_agnostic else self.num_classes\n        logits_in_channel = (\n            self.conv_out_channels\n            if self.upsample_method == 'deconv' else upsample_in_channels)\n        self.conv_logits = build_conv_layer(self.predictor_cfg,\n                                            logits_in_channel, out_channels, 1)\n        self.relu = nn.ReLU(inplace=True)\n        self.debug_imgs = None\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize the weights.\"\"\"\n        super().init_weights()\n        for m in [self.upsample, self.conv_logits]:\n            if m is None:\n                continue\n            elif isinstance(m, CARAFEPack):\n                m.init_weights()\n            elif hasattr(m, 'weight') and hasattr(m, 'bias'):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_out', nonlinearity='relu')\n                nn.init.constant_(m.bias, 0)\n\n    def forward(self, x: Tensor) -> Tensor:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (Tensor): Extract mask RoI features.\n\n        Returns:\n            Tensor: Predicted foreground masks.\n        \"\"\"\n        for conv in self.convs:\n            x = conv(x)\n        if self.upsample is not None:\n            x = self.upsample(x)\n            if self.upsample_method == 'deconv':\n                x = self.relu(x)\n        mask_preds = self.conv_logits(x)\n        return mask_preds\n\n    def get_targets(self, sampling_results: List[SamplingResult],\n                    batch_gt_instances: InstanceList,\n                    rcnn_train_cfg: ConfigDict) -> Tensor:\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\n\n        Args:\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n\n        Returns:\n            Tensor: Mask target of each positive proposals in the image.\n        \"\"\"\n        pos_proposals = [res.pos_priors for res in sampling_results]\n        pos_assigned_gt_inds = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        gt_masks = [res.masks for res in batch_gt_instances]\n        mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\n                                   gt_masks, rcnn_train_cfg)\n        return mask_targets\n\n    def loss_and_target(self, mask_preds: Tensor,\n                        sampling_results: List[SamplingResult],\n                        batch_gt_instances: InstanceList,\n                        rcnn_train_cfg: ConfigDict) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the mask head.\n\n        Args:\n            mask_preds (Tensor): Predicted foreground masks, has shape\n                (num_pos, num_classes, h, w).\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n\n        Returns:\n            dict: A dictionary of loss and targets components.\n        \"\"\"\n        mask_targets = self.get_targets(\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=rcnn_train_cfg)\n\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n        loss = dict()\n        if mask_preds.size(0) == 0:\n            loss_mask = mask_preds.sum()\n        else:\n            if self.class_agnostic:\n                loss_mask = self.loss_mask(mask_preds, mask_targets,\n                                           torch.zeros_like(pos_labels))\n            else:\n                loss_mask = self.loss_mask(mask_preds, mask_targets,\n                                           pos_labels)\n        loss['loss_mask'] = loss_mask\n        # TODO: which algorithm requires mask_targets?\n        return dict(loss_mask=loss, mask_targets=mask_targets)\n\n    def predict_by_feat(self,\n                        mask_preds: Tuple[Tensor],\n                        results_list: List[InstanceData],\n                        batch_img_metas: List[dict],\n                        rcnn_test_cfg: ConfigDict,\n                        rescale: bool = False,\n                        activate_map: bool = False) -> InstanceList:\n        \"\"\"Transform a batch of output features extracted from the head into\n        mask results.\n\n        Args:\n            mask_preds (tuple[Tensor]): Tuple of predicted foreground masks,\n                each has shape (n, num_classes, h, w).\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            batch_img_metas (list[dict]): List of image information.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            activate_map (book): Whether get results with augmentations test.\n                If True, the `mask_preds` will not process with sigmoid.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        assert len(mask_preds) == len(results_list) == len(batch_img_metas)\n\n        for img_id in range(len(batch_img_metas)):\n            img_meta = batch_img_metas[img_id]\n            results = results_list[img_id]\n            bboxes = results.bboxes\n            if bboxes.shape[0] == 0:\n                results_list[img_id] = empty_instances(\n                    [img_meta],\n                    bboxes.device,\n                    task_type='mask',\n                    instance_results=[results],\n                    mask_thr_binary=rcnn_test_cfg.mask_thr_binary)[0]\n            else:\n                im_mask = self._predict_by_feat_single(\n                    mask_preds=mask_preds[img_id],\n                    bboxes=bboxes,\n                    labels=results.labels,\n                    img_meta=img_meta,\n                    rcnn_test_cfg=rcnn_test_cfg,\n                    rescale=rescale,\n                    activate_map=activate_map)\n                results.masks = im_mask\n        return results_list\n\n    def _predict_by_feat_single(self,\n                                mask_preds: Tensor,\n                                bboxes: Tensor,\n                                labels: Tensor,\n                                img_meta: dict,\n                                rcnn_test_cfg: ConfigDict,\n                                rescale: bool = False,\n                                activate_map: bool = False) -> Tensor:\n        \"\"\"Get segmentation masks from mask_preds and bboxes.\n\n        Args:\n            mask_preds (Tensor): Predicted foreground masks, has shape\n                (n, num_classes, h, w).\n            bboxes (Tensor): Predicted bboxes, has shape (n, 4)\n            labels (Tensor): Labels of bboxes, has shape (n, )\n            img_meta (dict): image information.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n                Defaults to None.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            activate_map (book): Whether get results with augmentations test.\n                If True, the `mask_preds` will not process with sigmoid.\n                Defaults to False.\n\n        Returns:\n            Tensor: Encoded masks, has shape (n, img_w, img_h)\n\n        Example:\n            >>> from mmengine.config import Config\n            >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import *  # NOQA\n            >>> N = 7  # N = number of extracted ROIs\n            >>> C, H, W = 11, 32, 32\n            >>> # Create example instance of FCN Mask Head.\n            >>> self = FCNMaskHead(num_classes=C, num_convs=0)\n            >>> inputs = torch.rand(N, self.in_channels, H, W)\n            >>> mask_preds = self.forward(inputs)\n            >>> # Each input is associated with some bounding box\n            >>> bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)\n            >>> labels = torch.randint(0, C, size=(N,))\n            >>> rcnn_test_cfg = Config({'mask_thr_binary': 0, })\n            >>> ori_shape = (H * 4, W * 4)\n            >>> scale_factor = (1, 1)\n            >>> rescale = False\n            >>> img_meta = {'scale_factor': scale_factor,\n            ...             'ori_shape': ori_shape}\n            >>> # Encoded masks are a list for each category.\n            >>> encoded_masks = self._get_seg_masks_single(\n            ...     mask_preds, bboxes, labels,\n            ...     img_meta, rcnn_test_cfg, rescale)\n            >>> assert encoded_masks.size()[0] == N\n            >>> assert encoded_masks.size()[1:] == ori_shape\n        \"\"\"\n        scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(\n            (1, 2))\n        img_h, img_w = img_meta['ori_shape'][:2]\n        device = bboxes.device\n\n        if not activate_map:\n            mask_preds = mask_preds.sigmoid()\n        else:\n            # In AugTest, has been activated before\n            mask_preds = bboxes.new_tensor(mask_preds)\n\n        if rescale:  # in-placed rescale the bboxes\n            bboxes /= scale_factor\n        else:\n            w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1]\n            img_h = np.round(img_h * h_scale.item()).astype(np.int32)\n            img_w = np.round(img_w * w_scale.item()).astype(np.int32)\n\n        N = len(mask_preds)\n        # The actual implementation split the input into chunks,\n        # and paste them chunk by chunk.\n        if device.type == 'cpu':\n            # CPU is most efficient when they are pasted one by one with\n            # skip_empty=True, so that it performs minimal number of\n            # operations.\n            num_chunks = N\n        else:\n            # GPU benefits from parallelism for larger chunks,\n            # but may have memory issue\n            # the types of img_w and img_h are np.int32,\n            # when the image resolution is large,\n            # the calculation of num_chunks will overflow.\n            # so we need to change the types of img_w and img_h to int.\n            # See https://github.com/open-mmlab/mmdetection/pull/5191\n            num_chunks = int(\n                np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /\n                        GPU_MEM_LIMIT))\n            assert (num_chunks <=\n                    N), 'Default GPU_MEM_LIMIT is too small; try increasing it'\n        chunks = torch.chunk(torch.arange(N, device=device), num_chunks)\n\n        threshold = rcnn_test_cfg.mask_thr_binary\n        im_mask = torch.zeros(\n            N,\n            img_h,\n            img_w,\n            device=device,\n            dtype=torch.bool if threshold >= 0 else torch.uint8)\n\n        if not self.class_agnostic:\n            mask_preds = mask_preds[range(N), labels][:, None]\n\n        for inds in chunks:\n            masks_chunk, spatial_inds = _do_paste_mask(\n                mask_preds[inds],\n                bboxes[inds],\n                img_h,\n                img_w,\n                skip_empty=device.type == 'cpu')\n\n            if threshold >= 0:\n                masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)\n            else:\n                # for visualization and debugging\n                masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)\n\n            im_mask[(inds, ) + spatial_inds] = masks_chunk\n        return im_mask\n\n\ndef _do_paste_mask(masks: Tensor,\n                   boxes: Tensor,\n                   img_h: int,\n                   img_w: int,\n                   skip_empty: bool = True) -> tuple:\n    \"\"\"Paste instance masks according to boxes.\n\n    This implementation is modified from\n    https://github.com/facebookresearch/detectron2/\n\n    Args:\n        masks (Tensor): N, 1, H, W\n        boxes (Tensor): N, 4\n        img_h (int): Height of the image to be pasted.\n        img_w (int): Width of the image to be pasted.\n        skip_empty (bool): Only paste masks within the region that\n            tightly bound all boxes, and returns the results this region only.\n            An important optimization for CPU.\n\n    Returns:\n        tuple: (Tensor, tuple). The first item is mask tensor, the second one\n        is the slice object.\n\n            If skip_empty == False, the whole image will be pasted. It will\n            return a mask of shape (N, img_h, img_w) and an empty tuple.\n\n            If skip_empty == True, only area around the mask will be pasted.\n            A mask of shape (N, h', w') and its start and end coordinates\n            in the original image will be returned.\n    \"\"\"\n    # On GPU, paste all masks together (up to chunk size)\n    # by using the entire image to sample the masks\n    # Compared to pasting them one by one,\n    # this has more operations but is faster on COCO-scale dataset.\n    device = masks.device\n    if skip_empty:\n        x0_int, y0_int = torch.clamp(\n            boxes.min(dim=0).values.floor()[:2] - 1,\n            min=0).to(dtype=torch.int32)\n        x1_int = torch.clamp(\n            boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)\n        y1_int = torch.clamp(\n            boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)\n    else:\n        x0_int, y0_int = 0, 0\n        x1_int, y1_int = img_w, img_h\n    x0, y0, x1, y1 = torch.split(boxes, 1, dim=1)  # each is Nx1\n\n    N = masks.shape[0]\n\n    img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5\n    img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5\n    img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n    img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n    # img_x, img_y have shapes (N, w), (N, h)\n    # IsInf op is not supported with ONNX<=1.7.0\n    if not torch.onnx.is_in_onnx_export():\n        if torch.isinf(img_x).any():\n            inds = torch.where(torch.isinf(img_x))\n            img_x[inds] = 0\n        if torch.isinf(img_y).any():\n            inds = torch.where(torch.isinf(img_y))\n            img_y[inds] = 0\n\n    gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))\n    gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))\n    grid = torch.stack([gx, gy], dim=3)\n\n    img_masks = F.grid_sample(\n        masks.to(dtype=torch.float32), grid, align_corners=False)\n\n    if skip_empty:\n        return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))\n    else:\n        return img_masks[:, 0], ()\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/feature_relay_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch.nn as nn\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig\n\n\n@MODELS.register_module()\nclass FeatureRelayHead(BaseModule):\n    \"\"\"Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        in_channels (int): number of input channels. Defaults to 256.\n        conv_out_channels (int): number of output channels before\n            classification layer. Defaults to 256.\n        roi_feat_size (int): roi feat size at box head. Default: 7.\n        scale_factor (int): scale factor to match roi feat size\n            at mask head. Defaults to 2.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`]): Initialization config dict. Defaults to\n            dict(type='Kaiming', layer='Linear').\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels: int = 1024,\n        out_conv_channels: int = 256,\n        roi_feat_size: int = 7,\n        scale_factor: int = 2,\n        init_cfg: MultiConfig = dict(type='Kaiming', layer='Linear')\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        assert isinstance(roi_feat_size, int)\n\n        self.in_channels = in_channels\n        self.out_conv_channels = out_conv_channels\n        self.roi_feat_size = roi_feat_size\n        self.out_channels = (roi_feat_size**2) * out_conv_channels\n        self.scale_factor = scale_factor\n        self.fp16_enabled = False\n\n        self.fc = nn.Linear(self.in_channels, self.out_channels)\n        self.upsample = nn.Upsample(\n            scale_factor=scale_factor, mode='bilinear', align_corners=True)\n\n    def forward(self, x: Tensor) -> Optional[Tensor]:\n        \"\"\"Forward function.\n\n        Args:\n            x (Tensor): Input feature.\n\n        Returns:\n            Optional[Tensor]: Output feature. When the first dim of input is\n            0, None is returned.\n        \"\"\"\n        N, _ = x.shape\n        if N > 0:\n            out_C = self.out_conv_channels\n            out_HW = self.roi_feat_size\n            x = self.fc(x)\n            x = x.reshape(N, out_C, out_HW, out_HW)\n            x = self.upsample(x)\n            return x\n        return None\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/fused_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import Tuple\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig, OptConfigType\n\n\n@MODELS.register_module()\nclass FusedSemanticHead(BaseModule):\n    r\"\"\"Multi-level fused semantic segmentation head.\n\n    .. code-block:: none\n\n        in_1 -> 1x1 conv ---\n                            |\n        in_2 -> 1x1 conv -- |\n                           ||\n        in_3 -> 1x1 conv - ||\n                          |||                  /-> 1x1 conv (mask prediction)\n        in_4 -> 1x1 conv -----> 3x3 convs (*4)\n                            |                  \\-> 1x1 conv (feature)\n        in_5 -> 1x1 conv ---\n    \"\"\"  # noqa: W605\n\n    def __init__(\n        self,\n        num_ins: int,\n        fusion_level: int,\n        seg_scale_factor=1 / 8,\n        num_convs: int = 4,\n        in_channels: int = 256,\n        conv_out_channels: int = 256,\n        num_classes: int = 183,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        ignore_label: int = None,\n        loss_weight: float = None,\n        loss_seg: ConfigDict = dict(\n            type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),\n        init_cfg: MultiConfig = dict(\n            type='Kaiming', override=dict(name='conv_logits'))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_ins = num_ins\n        self.fusion_level = fusion_level\n        self.seg_scale_factor = seg_scale_factor\n        self.num_convs = num_convs\n        self.in_channels = in_channels\n        self.conv_out_channels = conv_out_channels\n        self.num_classes = num_classes\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.fp16_enabled = False\n\n        self.lateral_convs = nn.ModuleList()\n        for i in range(self.num_ins):\n            self.lateral_convs.append(\n                ConvModule(\n                    self.in_channels,\n                    self.in_channels,\n                    1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    inplace=False))\n\n        self.convs = nn.ModuleList()\n        for i in range(self.num_convs):\n            in_channels = self.in_channels if i == 0 else conv_out_channels\n            self.convs.append(\n                ConvModule(\n                    in_channels,\n                    conv_out_channels,\n                    3,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.conv_embedding = ConvModule(\n            conv_out_channels,\n            conv_out_channels,\n            1,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg)\n        self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)\n        if ignore_label:\n            loss_seg['ignore_index'] = ignore_label\n        if loss_weight:\n            loss_seg['loss_weight'] = loss_weight\n        if ignore_label or loss_weight:\n            warnings.warn('``ignore_label`` and ``loss_weight`` would be '\n                          'deprecated soon. Please set ``ingore_index`` and '\n                          '``loss_weight`` in ``loss_seg`` instead.')\n        self.criterion = MODELS.build(loss_seg)\n\n    def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:\n        \"\"\"Forward function.\n\n        Args:\n            feats (tuple[Tensor]): Multi scale feature maps.\n\n        Returns:\n            tuple[Tensor]:\n\n                - mask_preds (Tensor): Predicted mask logits.\n                - x (Tensor): Fused feature.\n        \"\"\"\n        x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])\n        fused_size = tuple(x.shape[-2:])\n        for i, feat in enumerate(feats):\n            if i != self.fusion_level:\n                feat = F.interpolate(\n                    feat, size=fused_size, mode='bilinear', align_corners=True)\n                # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n                x = x + self.lateral_convs[i](feat)\n\n        for i in range(self.num_convs):\n            x = self.convs[i](x)\n\n        mask_preds = self.conv_logits(x)\n        x = self.conv_embedding(x)\n        return mask_preds, x\n\n    def loss(self, mask_preds: Tensor, labels: Tensor) -> Tensor:\n        \"\"\"Loss function.\n\n        Args:\n            mask_preds (Tensor): Predicted mask logits.\n            labels (Tensor): Ground truth.\n\n        Returns:\n            Tensor: Semantic segmentation loss.\n        \"\"\"\n        labels = F.interpolate(\n            labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')\n        labels = labels.squeeze(1).long()\n        loss_semantic_seg = self.criterion(mask_preds, labels)\n        return loss_semantic_seg\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/global_context_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.models.layers import ResLayer, SimplifiedBasicBlock\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig, OptConfigType\n\n\n@MODELS.register_module()\nclass GlobalContextHead(BaseModule):\n    \"\"\"Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        num_convs (int, optional): number of convolutional layer in GlbCtxHead.\n            Defaults to 4.\n        in_channels (int, optional): number of input channels. Defaults to 256.\n        conv_out_channels (int, optional): number of output channels before\n            classification layer. Defaults to 256.\n        num_classes (int, optional): number of classes. Defaults to 80.\n        loss_weight (float, optional): global context loss weight.\n            Defaults to 1.\n        conv_cfg (dict, optional): config to init conv layer. Defaults to None.\n        norm_cfg (dict, optional): config to init norm layer. Defaults to None.\n        conv_to_res (bool, optional): if True, 2 convs will be grouped into\n            1 `SimplifiedBasicBlock` using a skip connection.\n            Defaults to False.\n        init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n            list[:obj:`ConfigDict`]): Initialization config dict. Defaults to\n            dict(type='Normal', std=0.01, override=dict(name='fc')).\n    \"\"\"\n\n    def __init__(\n        self,\n        num_convs: int = 4,\n        in_channels: int = 256,\n        conv_out_channels: int = 256,\n        num_classes: int = 80,\n        loss_weight: float = 1.0,\n        conv_cfg: OptConfigType = None,\n        norm_cfg: OptConfigType = None,\n        conv_to_res: bool = False,\n        init_cfg: MultiConfig = dict(\n            type='Normal', std=0.01, override=dict(name='fc'))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_convs = num_convs\n        self.in_channels = in_channels\n        self.conv_out_channels = conv_out_channels\n        self.num_classes = num_classes\n        self.loss_weight = loss_weight\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.conv_to_res = conv_to_res\n        self.fp16_enabled = False\n\n        if self.conv_to_res:\n            num_res_blocks = num_convs // 2\n            self.convs = ResLayer(\n                SimplifiedBasicBlock,\n                in_channels,\n                self.conv_out_channels,\n                num_res_blocks,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n            self.num_convs = num_res_blocks\n        else:\n            self.convs = nn.ModuleList()\n            for i in range(self.num_convs):\n                in_channels = self.in_channels if i == 0 else conv_out_channels\n                self.convs.append(\n                    ConvModule(\n                        in_channels,\n                        conv_out_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n\n        self.pool = nn.AdaptiveAvgPool2d(1)\n        self.fc = nn.Linear(conv_out_channels, num_classes)\n\n        self.criterion = nn.BCEWithLogitsLoss()\n\n    def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:\n        \"\"\"Forward function.\n\n        Args:\n            feats (Tuple[Tensor]): Multi-scale feature maps.\n\n        Returns:\n            Tuple[Tensor]:\n\n                - mc_pred (Tensor): Multi-class prediction.\n                - x (Tensor): Global context feature.\n        \"\"\"\n        x = feats[-1]\n        for i in range(self.num_convs):\n            x = self.convs[i](x)\n        x = self.pool(x)\n\n        # multi-class prediction\n        mc_pred = x.reshape(x.size(0), -1)\n        mc_pred = self.fc(mc_pred)\n\n        return mc_pred, x\n\n    def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor:\n        \"\"\"Loss function.\n\n        Args:\n            pred (Tensor): Logits.\n            labels (list[Tensor]): Grouth truths.\n\n        Returns:\n            Tensor: Loss.\n        \"\"\"\n        labels = [lbl.unique() for lbl in labels]\n        targets = pred.new_zeros(pred.size())\n        for i, label in enumerate(labels):\n            targets[i, label] = 1.0\n        loss = self.loss_weight * self.criterion(pred, targets)\n        return loss\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/grid_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType\n\n\n@MODELS.register_module()\nclass GridHead(BaseModule):\n    \"\"\"Implementation of `Grid Head <https://arxiv.org/abs/1811.12030>`_\n\n    Args:\n        grid_points (int): The number of grid points. Defaults to 9.\n        num_convs (int): The number of convolution layers. Defaults to 8.\n        roi_feat_size (int): RoI feature size. Default to 14.\n        in_channels (int): The channel number of inputs features.\n            Defaults to 256.\n        conv_kernel_size (int): The kernel size of convolution layers.\n            Defaults to 3.\n        point_feat_channels (int): The number of channels of each point\n            features. Defaults to 64.\n        class_agnostic (bool): Whether use class agnostic classification.\n            If so, the output channels of logits will be 1. Defaults to False.\n        loss_grid (:obj:`ConfigDict` or dict): Config of grid loss.\n        conv_cfg (:obj:`ConfigDict` or dict, optional) dictionary to\n            construct and config conv layer.\n        norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and\n            config norm layer.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        grid_points: int = 9,\n        num_convs: int = 8,\n        roi_feat_size: int = 14,\n        in_channels: int = 256,\n        conv_kernel_size: int = 3,\n        point_feat_channels: int = 64,\n        deconv_kernel_size: int = 4,\n        class_agnostic: bool = False,\n        loss_grid: ConfigType = dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15),\n        conv_cfg: OptConfigType = None,\n        norm_cfg: ConfigType = dict(type='GN', num_groups=36),\n        init_cfg: MultiConfig = [\n            dict(type='Kaiming', layer=['Conv2d', 'Linear']),\n            dict(\n                type='Normal',\n                layer='ConvTranspose2d',\n                std=0.001,\n                override=dict(\n                    type='Normal',\n                    name='deconv2',\n                    std=0.001,\n                    bias=-np.log(0.99 / 0.01)))\n        ]\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.grid_points = grid_points\n        self.num_convs = num_convs\n        self.roi_feat_size = roi_feat_size\n        self.in_channels = in_channels\n        self.conv_kernel_size = conv_kernel_size\n        self.point_feat_channels = point_feat_channels\n        self.conv_out_channels = self.point_feat_channels * self.grid_points\n        self.class_agnostic = class_agnostic\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':\n            assert self.conv_out_channels % norm_cfg['num_groups'] == 0\n\n        assert self.grid_points >= 4\n        self.grid_size = int(np.sqrt(self.grid_points))\n        if self.grid_size * self.grid_size != self.grid_points:\n            raise ValueError('grid_points must be a square number')\n\n        # the predicted heatmap is half of whole_map_size\n        if not isinstance(self.roi_feat_size, int):\n            raise ValueError('Only square RoIs are supporeted in Grid R-CNN')\n        self.whole_map_size = self.roi_feat_size * 4\n\n        # compute point-wise sub-regions\n        self.sub_regions = self.calc_sub_regions()\n\n        self.convs = []\n        for i in range(self.num_convs):\n            in_channels = (\n                self.in_channels if i == 0 else self.conv_out_channels)\n            stride = 2 if i == 0 else 1\n            padding = (self.conv_kernel_size - 1) // 2\n            self.convs.append(\n                ConvModule(\n                    in_channels,\n                    self.conv_out_channels,\n                    self.conv_kernel_size,\n                    stride=stride,\n                    padding=padding,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=True))\n        self.convs = nn.Sequential(*self.convs)\n\n        self.deconv1 = nn.ConvTranspose2d(\n            self.conv_out_channels,\n            self.conv_out_channels,\n            kernel_size=deconv_kernel_size,\n            stride=2,\n            padding=(deconv_kernel_size - 2) // 2,\n            groups=grid_points)\n        self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)\n        self.deconv2 = nn.ConvTranspose2d(\n            self.conv_out_channels,\n            grid_points,\n            kernel_size=deconv_kernel_size,\n            stride=2,\n            padding=(deconv_kernel_size - 2) // 2,\n            groups=grid_points)\n\n        # find the 4-neighbor of each grid point\n        self.neighbor_points = []\n        grid_size = self.grid_size\n        for i in range(grid_size):  # i-th column\n            for j in range(grid_size):  # j-th row\n                neighbors = []\n                if i > 0:  # left: (i - 1, j)\n                    neighbors.append((i - 1) * grid_size + j)\n                if j > 0:  # up: (i, j - 1)\n                    neighbors.append(i * grid_size + j - 1)\n                if j < grid_size - 1:  # down: (i, j + 1)\n                    neighbors.append(i * grid_size + j + 1)\n                if i < grid_size - 1:  # right: (i + 1, j)\n                    neighbors.append((i + 1) * grid_size + j)\n                self.neighbor_points.append(tuple(neighbors))\n        # total edges in the grid\n        self.num_edges = sum([len(p) for p in self.neighbor_points])\n\n        self.forder_trans = nn.ModuleList()  # first-order feature transition\n        self.sorder_trans = nn.ModuleList()  # second-order feature transition\n        for neighbors in self.neighbor_points:\n            fo_trans = nn.ModuleList()\n            so_trans = nn.ModuleList()\n            for _ in range(len(neighbors)):\n                # each transition module consists of a 5x5 depth-wise conv and\n                # 1x1 conv.\n                fo_trans.append(\n                    nn.Sequential(\n                        nn.Conv2d(\n                            self.point_feat_channels,\n                            self.point_feat_channels,\n                            5,\n                            stride=1,\n                            padding=2,\n                            groups=self.point_feat_channels),\n                        nn.Conv2d(self.point_feat_channels,\n                                  self.point_feat_channels, 1)))\n                so_trans.append(\n                    nn.Sequential(\n                        nn.Conv2d(\n                            self.point_feat_channels,\n                            self.point_feat_channels,\n                            5,\n                            1,\n                            2,\n                            groups=self.point_feat_channels),\n                        nn.Conv2d(self.point_feat_channels,\n                                  self.point_feat_channels, 1)))\n            self.forder_trans.append(fo_trans)\n            self.sorder_trans.append(so_trans)\n\n        self.loss_grid = MODELS.build(loss_grid)\n\n    def forward(self, x: Tensor) -> Dict[str, Tensor]:\n        \"\"\"forward function of ``GridHead``.\n\n        Args:\n            x (Tensor): RoI features, has shape\n                (num_rois, num_channels, roi_feat_size, roi_feat_size).\n\n        Returns:\n            Dict[str, Tensor]: Return a dict including fused and unfused\n            heatmap.\n        \"\"\"\n        assert x.shape[-1] == x.shape[-2] == self.roi_feat_size\n        # RoI feature transformation, downsample 2x\n        x = self.convs(x)\n\n        c = self.point_feat_channels\n        # first-order fusion\n        x_fo = [None for _ in range(self.grid_points)]\n        for i, points in enumerate(self.neighbor_points):\n            x_fo[i] = x[:, i * c:(i + 1) * c]\n            for j, point_idx in enumerate(points):\n                x_fo[i] = x_fo[i] + self.forder_trans[i][j](\n                    x[:, point_idx * c:(point_idx + 1) * c])\n\n        # second-order fusion\n        x_so = [None for _ in range(self.grid_points)]\n        for i, points in enumerate(self.neighbor_points):\n            x_so[i] = x[:, i * c:(i + 1) * c]\n            for j, point_idx in enumerate(points):\n                x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])\n\n        # predicted heatmap with fused features\n        x2 = torch.cat(x_so, dim=1)\n        x2 = self.deconv1(x2)\n        x2 = F.relu(self.norm1(x2), inplace=True)\n        heatmap = self.deconv2(x2)\n\n        # predicted heatmap with original features (applicable during training)\n        if self.training:\n            x1 = x\n            x1 = self.deconv1(x1)\n            x1 = F.relu(self.norm1(x1), inplace=True)\n            heatmap_unfused = self.deconv2(x1)\n        else:\n            heatmap_unfused = heatmap\n\n        return dict(fused=heatmap, unfused=heatmap_unfused)\n\n    def calc_sub_regions(self) -> List[Tuple[float]]:\n        \"\"\"Compute point specific representation regions.\n\n        See `Grid R-CNN Plus <https://arxiv.org/abs/1906.05688>`_ for details.\n        \"\"\"\n        # to make it consistent with the original implementation, half_size\n        # is computed as 2 * quarter_size, which is smaller\n        half_size = self.whole_map_size // 4 * 2\n        sub_regions = []\n        for i in range(self.grid_points):\n            x_idx = i // self.grid_size\n            y_idx = i % self.grid_size\n            if x_idx == 0:\n                sub_x1 = 0\n            elif x_idx == self.grid_size - 1:\n                sub_x1 = half_size\n            else:\n                ratio = x_idx / (self.grid_size - 1) - 0.25\n                sub_x1 = max(int(ratio * self.whole_map_size), 0)\n\n            if y_idx == 0:\n                sub_y1 = 0\n            elif y_idx == self.grid_size - 1:\n                sub_y1 = half_size\n            else:\n                ratio = y_idx / (self.grid_size - 1) - 0.25\n                sub_y1 = max(int(ratio * self.whole_map_size), 0)\n            sub_regions.append(\n                (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))\n        return sub_regions\n\n    def get_targets(self, sampling_results: List[SamplingResult],\n                    rcnn_train_cfg: ConfigDict) -> Tensor:\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\".\n\n        Args:\n            sampling_results (List[:obj:`SamplingResult`]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (:obj:`ConfigDict`): `train_cfg` of RCNN.\n\n        Returns:\n            Tensor: Grid heatmap targets.\n        \"\"\"\n        # mix all samples (across images) together.\n        pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],\n                               dim=0).cpu()\n        pos_gt_bboxes = torch.cat(\n            [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()\n        assert pos_bboxes.shape == pos_gt_bboxes.shape\n\n        # expand pos_bboxes to 2x of original size\n        x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2\n        y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2\n        x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2\n        y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2\n        pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n        pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)\n        pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)\n\n        num_rois = pos_bboxes.shape[0]\n        map_size = self.whole_map_size\n        # this is not the final target shape\n        targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),\n                              dtype=torch.float)\n\n        # pre-compute interpolation factors for all grid points.\n        # the first item is the factor of x-dim, and the second is y-dim.\n        # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)\n        factors = []\n        for j in range(self.grid_points):\n            x_idx = j // self.grid_size\n            y_idx = j % self.grid_size\n            factors.append((1 - x_idx / (self.grid_size - 1),\n                            1 - y_idx / (self.grid_size - 1)))\n\n        radius = rcnn_train_cfg.pos_radius\n        radius2 = radius**2\n        for i in range(num_rois):\n            # ignore small bboxes\n            if (pos_bbox_ws[i] <= self.grid_size\n                    or pos_bbox_hs[i] <= self.grid_size):\n                continue\n            # for each grid point, mark a small circle as positive\n            for j in range(self.grid_points):\n                factor_x, factor_y = factors[j]\n                gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (\n                    1 - factor_x) * pos_gt_bboxes[i, 2]\n                gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (\n                    1 - factor_y) * pos_gt_bboxes[i, 3]\n\n                cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *\n                         map_size)\n                cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *\n                         map_size)\n\n                for x in range(cx - radius, cx + radius + 1):\n                    for y in range(cy - radius, cy + radius + 1):\n                        if x >= 0 and x < map_size and y >= 0 and y < map_size:\n                            if (x - cx)**2 + (y - cy)**2 <= radius2:\n                                targets[i, j, y, x] = 1\n        # reduce the target heatmap size by a half\n        # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).\n        sub_targets = []\n        for i in range(self.grid_points):\n            sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]\n            sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])\n        sub_targets = torch.cat(sub_targets, dim=1)\n        sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)\n        return sub_targets\n\n    def loss(self, grid_pred: Tensor, sample_idx: Tensor,\n             sampling_results: List[SamplingResult],\n             rcnn_train_cfg: ConfigDict) -> dict:\n        \"\"\"Calculate the loss based on the features extracted by the grid head.\n\n        Args:\n            grid_pred (dict[str, Tensor]): Outputs of grid_head forward.\n            sample_idx (Tensor): The sampling index of ``grid_pred``.\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.\n\n        Returns:\n            dict: A dictionary of loss and targets components.\n        \"\"\"\n        grid_targets = self.get_targets(sampling_results, rcnn_train_cfg)\n        grid_targets = grid_targets[sample_idx]\n\n        loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)\n        loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)\n        loss_grid = loss_fused + loss_unfused\n        return dict(loss_grid=loss_grid)\n\n    def predict_by_feat(self,\n                        grid_preds: Dict[str, Tensor],\n                        results_list: List[InstanceData],\n                        batch_img_metas: List[dict],\n                        rescale: bool = False) -> InstanceList:\n        \"\"\"Adjust the predicted bboxes from bbox head.\n\n        Args:\n            grid_preds (dict[str, Tensor]): dictionary outputted by forward\n                function.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            batch_img_metas (list[dict]): List of image information.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process. Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape \\\n            (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4), the last \\\n            dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        num_roi_per_img = tuple(res.bboxes.size(0) for res in results_list)\n        grid_preds = {\n            k: v.split(num_roi_per_img, 0)\n            for k, v in grid_preds.items()\n        }\n\n        for i, results in enumerate(results_list):\n            if len(results) != 0:\n                bboxes = self._predict_by_feat_single(\n                    grid_pred=grid_preds['fused'][i],\n                    bboxes=results.bboxes,\n                    img_meta=batch_img_metas[i],\n                    rescale=rescale)\n                results.bboxes = bboxes\n        return results_list\n\n    def _predict_by_feat_single(self,\n                                grid_pred: Tensor,\n                                bboxes: Tensor,\n                                img_meta: dict,\n                                rescale: bool = False) -> Tensor:\n        \"\"\"Adjust ``bboxes`` according to ``grid_pred``.\n\n        Args:\n            grid_pred (Tensor): Grid fused heatmap.\n            bboxes (Tensor): Predicted bboxes, has shape (n, 4)\n            img_meta (dict): image information.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            Tensor: adjusted bboxes.\n        \"\"\"\n        assert bboxes.size(0) == grid_pred.size(0)\n        grid_pred = grid_pred.sigmoid()\n\n        R, c, h, w = grid_pred.shape\n        half_size = self.whole_map_size // 4 * 2\n        assert h == w == half_size\n        assert c == self.grid_points\n\n        # find the point with max scores in the half-sized heatmap\n        grid_pred = grid_pred.view(R * c, h * w)\n        pred_scores, pred_position = grid_pred.max(dim=1)\n        xs = pred_position % w\n        ys = pred_position // w\n\n        # get the position in the whole heatmap instead of half-sized heatmap\n        for i in range(self.grid_points):\n            xs[i::self.grid_points] += self.sub_regions[i][0]\n            ys[i::self.grid_points] += self.sub_regions[i][1]\n\n        # reshape to (num_rois, grid_points)\n        pred_scores, xs, ys = tuple(\n            map(lambda x: x.view(R, c), [pred_scores, xs, ys]))\n\n        # get expanded pos_bboxes\n        widths = (bboxes[:, 2] - bboxes[:, 0]).unsqueeze(-1)\n        heights = (bboxes[:, 3] - bboxes[:, 1]).unsqueeze(-1)\n        x1 = (bboxes[:, 0, None] - widths / 2)\n        y1 = (bboxes[:, 1, None] - heights / 2)\n        # map the grid point to the absolute coordinates\n        abs_xs = (xs.float() + 0.5) / w * widths + x1\n        abs_ys = (ys.float() + 0.5) / h * heights + y1\n\n        # get the grid points indices that fall on the bbox boundaries\n        x1_inds = [i for i in range(self.grid_size)]\n        y1_inds = [i * self.grid_size for i in range(self.grid_size)]\n        x2_inds = [\n            self.grid_points - self.grid_size + i\n            for i in range(self.grid_size)\n        ]\n        y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]\n\n        # voting of all grid points on some boundary\n        bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, x1_inds].sum(dim=1, keepdim=True))\n        bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, y1_inds].sum(dim=1, keepdim=True))\n        bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, x2_inds].sum(dim=1, keepdim=True))\n        bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, y2_inds].sum(dim=1, keepdim=True))\n\n        bboxes = torch.cat([bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2], dim=1)\n        bboxes[:, [0, 2]].clamp_(min=0, max=img_meta['img_shape'][1])\n        bboxes[:, [1, 3]].clamp_(min=0, max=img_meta['img_shape'][0])\n\n        if rescale:\n            assert img_meta.get('scale_factor') is not None\n            bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat(\n                (1, 2))\n\n        return bboxes\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/htc_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Union\n\nfrom mmcv.cnn import ConvModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@MODELS.register_module()\nclass HTCMaskHead(FCNMaskHead):\n    \"\"\"Mask head for HTC.\n\n    Args:\n        with_conv_res (bool): Whether add conv layer for ``res_feat``.\n            Defaults to True.\n    \"\"\"\n\n    def __init__(self, with_conv_res: bool = True, *args, **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        self.with_conv_res = with_conv_res\n        if self.with_conv_res:\n            self.conv_res = ConvModule(\n                self.conv_out_channels,\n                self.conv_out_channels,\n                1,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n\n    def forward(self,\n                x: Tensor,\n                res_feat: Optional[Tensor] = None,\n                return_logits: bool = True,\n                return_feat: bool = True) -> Union[Tensor, List[Tensor]]:\n        \"\"\"\n        Args:\n            x (Tensor): Feature map.\n            res_feat (Tensor, optional): Feature for residual connection.\n                Defaults to None.\n            return_logits (bool): Whether return mask logits. Defaults to True.\n            return_feat (bool): Whether return feature map. Defaults to True.\n\n        Returns:\n            Union[Tensor, List[Tensor]]: The return result is one of three\n                results: res_feat, logits, or [logits, res_feat].\n        \"\"\"\n        assert not (not return_logits and not return_feat)\n        if res_feat is not None:\n            assert self.with_conv_res\n            res_feat = self.conv_res(res_feat)\n            x = x + res_feat\n        for conv in self.convs:\n            x = conv(x)\n        res_feat = x\n        outs = []\n        if return_logits:\n            x = self.upsample(x)\n            if self.upsample_method == 'deconv':\n                x = self.relu(x)\n            mask_preds = self.conv_logits(x)\n            outs.append(mask_preds)\n        if return_feat:\n            outs.append(res_feat)\n        return outs if len(outs) > 1 else outs[0]\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/mask_point_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py  # noqa\n\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import point_sample, rel_roi_point_to_rel_img_point\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.models.utils import (get_uncertain_point_coords_with_randomness,\n                                get_uncertainty)\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType\n\n\n@MODELS.register_module()\nclass MaskPointHead(BaseModule):\n    \"\"\"A mask point head use in PointRend.\n\n    ``MaskPointHead`` use shared multi-layer perceptron (equivalent to\n    nn.Conv1d) to predict the logit of input points. The fine-grained feature\n    and coarse feature will be concatenate together for predication.\n\n    Args:\n        num_fcs (int): Number of fc layers in the head. Defaults to 3.\n        in_channels (int): Number of input channels. Defaults to 256.\n        fc_channels (int): Number of fc channels. Defaults to 256.\n        num_classes (int): Number of classes for logits. Defaults to 80.\n        class_agnostic (bool): Whether use class agnostic classification.\n            If so, the output channels of logits will be 1. Defaults to False.\n        coarse_pred_each_layer (bool): Whether concatenate coarse feature with\n            the output of each fc layer. Defaults to True.\n        conv_cfg (:obj:`ConfigDict` or dict): Dictionary to construct\n            and config conv layer. Defaults to dict(type='Conv1d')).\n        norm_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct\n            and config norm layer. Defaults to None.\n        loss_point (:obj:`ConfigDict` or dict): Dictionary to construct and\n            config loss layer of point head. Defaults to\n            dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0).\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes: int,\n        num_fcs: int = 3,\n        in_channels: int = 256,\n        fc_channels: int = 256,\n        class_agnostic: bool = False,\n        coarse_pred_each_layer: bool = True,\n        conv_cfg: ConfigType = dict(type='Conv1d'),\n        norm_cfg: OptConfigType = None,\n        act_cfg: ConfigType = dict(type='ReLU'),\n        loss_point: ConfigType = dict(\n            type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n        init_cfg: MultiConfig = dict(\n            type='Normal', std=0.001, override=dict(name='fc_logits'))\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_fcs = num_fcs\n        self.in_channels = in_channels\n        self.fc_channels = fc_channels\n        self.num_classes = num_classes\n        self.class_agnostic = class_agnostic\n        self.coarse_pred_each_layer = coarse_pred_each_layer\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.loss_point = MODELS.build(loss_point)\n\n        fc_in_channels = in_channels + num_classes\n        self.fcs = nn.ModuleList()\n        for _ in range(num_fcs):\n            fc = ConvModule(\n                fc_in_channels,\n                fc_channels,\n                kernel_size=1,\n                stride=1,\n                padding=0,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            self.fcs.append(fc)\n            fc_in_channels = fc_channels\n            fc_in_channels += num_classes if self.coarse_pred_each_layer else 0\n\n        out_channels = 1 if self.class_agnostic else self.num_classes\n        self.fc_logits = nn.Conv1d(\n            fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n    def forward(self, fine_grained_feats: Tensor,\n                coarse_feats: Tensor) -> Tensor:\n        \"\"\"Classify each point base on fine grained and coarse feats.\n\n        Args:\n            fine_grained_feats (Tensor): Fine grained feature sampled from FPN,\n                shape (num_rois, in_channels, num_points).\n            coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,\n                shape (num_rois, num_classes, num_points).\n\n        Returns:\n            Tensor: Point classification results,\n            shape (num_rois, num_class, num_points).\n        \"\"\"\n\n        x = torch.cat([fine_grained_feats, coarse_feats], dim=1)\n        for fc in self.fcs:\n            x = fc(x)\n            if self.coarse_pred_each_layer:\n                x = torch.cat((x, coarse_feats), dim=1)\n        return self.fc_logits(x)\n\n    def get_targets(self, rois: Tensor, rel_roi_points: Tensor,\n                    sampling_results: List[SamplingResult],\n                    batch_gt_instances: InstanceList,\n                    cfg: ConfigType) -> Tensor:\n        \"\"\"Get training targets of MaskPointHead for all images.\n\n        Args:\n            rois (Tensor): Region of Interest, shape (num_rois, 5).\n            rel_roi_points (Tensor): Points coordinates relative to RoI, shape\n                (num_rois, num_points, 2).\n            sampling_results (:obj:`SamplingResult`): Sampling result after\n                sampling and assignment.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            cfg (obj:`ConfigDict` or dict): Training cfg.\n\n        Returns:\n            Tensor: Point target, shape (num_rois, num_points).\n        \"\"\"\n\n        num_imgs = len(sampling_results)\n        rois_list = []\n        rel_roi_points_list = []\n        for batch_ind in range(num_imgs):\n            inds = (rois[:, 0] == batch_ind)\n            rois_list.append(rois[inds])\n            rel_roi_points_list.append(rel_roi_points[inds])\n        pos_assigned_gt_inds_list = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        cfg_list = [cfg for _ in range(num_imgs)]\n\n        point_targets = map(self._get_targets_single, rois_list,\n                            rel_roi_points_list, pos_assigned_gt_inds_list,\n                            batch_gt_instances, cfg_list)\n        point_targets = list(point_targets)\n\n        if len(point_targets) > 0:\n            point_targets = torch.cat(point_targets)\n\n        return point_targets\n\n    def _get_targets_single(self, rois: Tensor, rel_roi_points: Tensor,\n                            pos_assigned_gt_inds: Tensor,\n                            gt_instances: InstanceData,\n                            cfg: ConfigType) -> Tensor:\n        \"\"\"Get training target of MaskPointHead for each image.\"\"\"\n        num_pos = rois.size(0)\n        num_points = cfg.num_points\n        if num_pos > 0:\n            gt_masks_th = (\n                gt_instances.masks.to_tensor(rois.dtype,\n                                             rois.device).index_select(\n                                                 0, pos_assigned_gt_inds))\n            gt_masks_th = gt_masks_th.unsqueeze(1)\n            rel_img_points = rel_roi_point_to_rel_img_point(\n                rois, rel_roi_points, gt_masks_th)\n            point_targets = point_sample(gt_masks_th,\n                                         rel_img_points).squeeze(1)\n        else:\n            point_targets = rois.new_zeros((0, num_points))\n        return point_targets\n\n    def loss_and_target(self, point_pred: Tensor, rel_roi_points: Tensor,\n                        sampling_results: List[SamplingResult],\n                        batch_gt_instances: InstanceList,\n                        cfg: ConfigType) -> dict:\n        \"\"\"Calculate loss for MaskPointHead.\n\n        Args:\n            point_pred (Tensor): Point predication result, shape\n                (num_rois, num_classes, num_points).\n            rel_roi_points (Tensor): Points coordinates relative to RoI, shape\n                (num_rois, num_points, 2).\n             sampling_results (:obj:`SamplingResult`): Sampling result after\n                sampling and assignment.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            cfg (obj:`ConfigDict` or dict): Training cfg.\n\n        Returns:\n            dict: a dictionary of point loss and point target.\n        \"\"\"\n        rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n        point_target = self.get_targets(rois, rel_roi_points, sampling_results,\n                                        batch_gt_instances, cfg)\n        if self.class_agnostic:\n            loss_point = self.loss_point(point_pred, point_target,\n                                         torch.zeros_like(pos_labels))\n        else:\n            loss_point = self.loss_point(point_pred, point_target, pos_labels)\n\n        return dict(loss_point=loss_point, point_target=point_target)\n\n    def get_roi_rel_points_train(self, mask_preds: Tensor, labels: Tensor,\n                                 cfg: ConfigType) -> Tensor:\n        \"\"\"Get ``num_points`` most uncertain points with random points during\n        train.\n\n        Sample points in [0, 1] x [0, 1] coordinate space based on their\n        uncertainty. The uncertainties are calculated for each point using\n        '_get_uncertainty()' function that takes point's logit prediction as\n        input.\n\n        Args:\n            mask_preds (Tensor): A tensor of shape (num_rois, num_classes,\n                mask_height, mask_width) for class-specific or class-agnostic\n                prediction.\n            labels (Tensor): The ground truth class for each instance.\n            cfg (:obj:`ConfigDict` or dict): Training config of point head.\n\n        Returns:\n            point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n            that contains the coordinates sampled points.\n        \"\"\"\n        point_coords = get_uncertain_point_coords_with_randomness(\n            mask_preds, labels, cfg.num_points, cfg.oversample_ratio,\n            cfg.importance_sample_ratio)\n        return point_coords\n\n    def get_roi_rel_points_test(self, mask_preds: Tensor, label_preds: Tensor,\n                                cfg: ConfigType) -> Tuple[Tensor, Tensor]:\n        \"\"\"Get ``num_points`` most uncertain points during test.\n\n        Args:\n            mask_preds (Tensor): A tensor of shape (num_rois, num_classes,\n                mask_height, mask_width) for class-specific or class-agnostic\n                prediction.\n            label_preds (Tensor): The predication class for each instance.\n            cfg (:obj:`ConfigDict` or dict): Testing config of point head.\n\n        Returns:\n            tuple:\n\n            - point_indices (Tensor): A tensor of shape (num_rois, num_points)\n              that contains indices from [0, mask_height x mask_width) of the\n              most uncertain points.\n            - point_coords (Tensor): A tensor of shape (num_rois, num_points,\n              2) that contains [0, 1] x [0, 1] normalized coordinates of the\n              most uncertain points from the [mask_height, mask_width] grid.\n        \"\"\"\n        num_points = cfg.subdivision_num_points\n        uncertainty_map = get_uncertainty(mask_preds, label_preds)\n        num_rois, _, mask_height, mask_width = uncertainty_map.shape\n\n        # During ONNX exporting, the type of each elements of 'shape' is\n        # `Tensor(float)`, while it is `float` during PyTorch inference.\n        if isinstance(mask_height, torch.Tensor):\n            h_step = 1.0 / mask_height.float()\n            w_step = 1.0 / mask_width.float()\n        else:\n            h_step = 1.0 / mask_height\n            w_step = 1.0 / mask_width\n        # cast to int to avoid dynamic K for TopK op in ONNX\n        mask_size = int(mask_height * mask_width)\n        uncertainty_map = uncertainty_map.view(num_rois, mask_size)\n        num_points = min(mask_size, num_points)\n        point_indices = uncertainty_map.topk(num_points, dim=1)[1]\n        xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step\n        ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step\n        point_coords = torch.stack([xs, ys], dim=2)\n        return point_indices, point_coords\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/maskiou_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Conv2d, Linear, MaxPool2d\nfrom mmengine.config import ConfigDict\nfrom mmengine.model import BaseModule\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, InstanceList, OptMultiConfig\n\n\n@MODELS.register_module()\nclass MaskIoUHead(BaseModule):\n    \"\"\"Mask IoU Head.\n\n    This head predicts the IoU of predicted masks and corresponding gt masks.\n\n    Args:\n        num_convs (int): The number of convolution layers. Defaults to 4.\n        num_fcs (int): The number of fully connected layers. Defaults to 2.\n        roi_feat_size (int): RoI feature size. Default to 14.\n        in_channels (int): The channel number of inputs features.\n            Defaults to 256.\n        conv_out_channels (int): The feature channels of convolution layers.\n            Defaults to 256.\n        fc_out_channels (int): The feature channels of fully connected layers.\n            Defaults to 1024.\n        num_classes (int): Number of categories excluding the background\n            category. Defaults to 80.\n        loss_iou (:obj:`ConfigDict` or dict): IoU loss.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_convs: int = 4,\n        num_fcs: int = 2,\n        roi_feat_size: int = 14,\n        in_channels: int = 256,\n        conv_out_channels: int = 256,\n        fc_out_channels: int = 1024,\n        num_classes: int = 80,\n        loss_iou: ConfigType = dict(type='MSELoss', loss_weight=0.5),\n        init_cfg: OptMultiConfig = [\n            dict(type='Kaiming', override=dict(name='convs')),\n            dict(type='Caffe2Xavier', override=dict(name='fcs')),\n            dict(type='Normal', std=0.01, override=dict(name='fc_mask_iou'))\n        ]\n    ) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.num_classes = num_classes\n\n        self.convs = nn.ModuleList()\n        for i in range(num_convs):\n            if i == 0:\n                # concatenation of mask feature and mask prediction\n                in_channels = self.in_channels + 1\n            else:\n                in_channels = self.conv_out_channels\n            stride = 2 if i == num_convs - 1 else 1\n            self.convs.append(\n                Conv2d(\n                    in_channels,\n                    self.conv_out_channels,\n                    3,\n                    stride=stride,\n                    padding=1))\n\n        roi_feat_size = _pair(roi_feat_size)\n        pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)\n        self.fcs = nn.ModuleList()\n        for i in range(num_fcs):\n            in_channels = (\n                self.conv_out_channels *\n                pooled_area if i == 0 else self.fc_out_channels)\n            self.fcs.append(Linear(in_channels, self.fc_out_channels))\n\n        self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)\n        self.relu = nn.ReLU()\n        self.max_pool = MaxPool2d(2, 2)\n        self.loss_iou = MODELS.build(loss_iou)\n\n    def forward(self, mask_feat: Tensor, mask_preds: Tensor) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            mask_feat (Tensor): Mask features from upstream models.\n            mask_preds (Tensor): Mask predictions from mask head.\n\n        Returns:\n            Tensor: Mask IoU predictions.\n        \"\"\"\n        mask_preds = mask_preds.sigmoid()\n        mask_pred_pooled = self.max_pool(mask_preds.unsqueeze(1))\n\n        x = torch.cat((mask_feat, mask_pred_pooled), 1)\n\n        for conv in self.convs:\n            x = self.relu(conv(x))\n        x = x.flatten(1)\n        for fc in self.fcs:\n            x = self.relu(fc(x))\n        mask_iou = self.fc_mask_iou(x)\n        return mask_iou\n\n    def loss_and_target(self, mask_iou_pred: Tensor, mask_preds: Tensor,\n                        mask_targets: Tensor,\n                        sampling_results: List[SamplingResult],\n                        batch_gt_instances: InstanceList,\n                        rcnn_train_cfg: ConfigDict) -> dict:\n        \"\"\"Calculate the loss and targets of MaskIoUHead.\n\n        Args:\n            mask_iou_pred (Tensor): Mask IoU predictions results, has shape\n                (num_pos, num_classes)\n            mask_preds (Tensor): Mask predictions from mask head, has shape\n                (num_pos, mask_size, mask_size).\n            mask_targets (Tensor): The ground truth masks assigned with\n                predictions, has shape\n                (num_pos, mask_size, mask_size).\n            sampling_results (List[obj:SamplingResult]): Assign results of\n                all images in a batch after sampling.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It includes ``masks`` inside.\n            rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.\n\n        Returns:\n            dict: A dictionary of loss and targets components.\n                The targets are only used for cascade rcnn.\n        \"\"\"\n        mask_iou_targets = self.get_targets(\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            mask_preds=mask_preds,\n            mask_targets=mask_targets,\n            rcnn_train_cfg=rcnn_train_cfg)\n\n        pos_inds = mask_iou_targets > 0\n        if pos_inds.sum() > 0:\n            loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],\n                                          mask_iou_targets[pos_inds])\n        else:\n            loss_mask_iou = mask_iou_pred.sum() * 0\n        return dict(loss_mask_iou=loss_mask_iou)\n\n    def get_targets(self, sampling_results: List[SamplingResult],\n                    batch_gt_instances: InstanceList, mask_preds: Tensor,\n                    mask_targets: Tensor,\n                    rcnn_train_cfg: ConfigDict) -> Tensor:\n        \"\"\"Compute target of mask IoU.\n\n        Mask IoU target is the IoU of the predicted mask (inside a bbox) and\n        the gt mask of corresponding gt mask (the whole instance).\n        The intersection area is computed inside the bbox, and the gt mask area\n        is computed with two steps, firstly we compute the gt area inside the\n        bbox, then divide it by the area ratio of gt area inside the bbox and\n        the gt area of the whole instance.\n\n        Args:\n            sampling_results (list[:obj:`SamplingResult`]): sampling results.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance.  It includes ``masks`` inside.\n            mask_preds (Tensor): Predicted masks of each positive proposal,\n                shape (num_pos, h, w).\n            mask_targets (Tensor): Gt mask of each positive proposal,\n                binary map of the shape (num_pos, h, w).\n            rcnn_train_cfg (obj:`ConfigDict`): Training config for R-CNN part.\n\n        Returns:\n            Tensor: mask iou target (length == num positive).\n        \"\"\"\n        pos_proposals = [res.pos_priors for res in sampling_results]\n        pos_assigned_gt_inds = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        gt_masks = [res.masks for res in batch_gt_instances]\n\n        # compute the area ratio of gt areas inside the proposals and\n        # the whole instance\n        area_ratios = map(self._get_area_ratio, pos_proposals,\n                          pos_assigned_gt_inds, gt_masks)\n        area_ratios = torch.cat(list(area_ratios))\n        assert mask_targets.size(0) == area_ratios.size(0)\n\n        mask_preds = (mask_preds > rcnn_train_cfg.mask_thr_binary).float()\n        mask_pred_areas = mask_preds.sum((-1, -2))\n\n        # mask_preds and mask_targets are binary maps\n        overlap_areas = (mask_preds * mask_targets).sum((-1, -2))\n\n        # compute the mask area of the whole instance\n        gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)\n\n        mask_iou_targets = overlap_areas / (\n            mask_pred_areas + gt_full_areas - overlap_areas)\n        return mask_iou_targets\n\n    def _get_area_ratio(self, pos_proposals: Tensor,\n                        pos_assigned_gt_inds: Tensor,\n                        gt_masks: InstanceData) -> Tensor:\n        \"\"\"Compute area ratio of the gt mask inside the proposal and the gt\n        mask of the corresponding instance.\n\n        Args:\n            pos_proposals (Tensor): Positive proposals, has shape (num_pos, 4).\n            pos_assigned_gt_inds (Tensor): positive proposals assigned ground\n                truth index.\n            gt_masks (BitmapMask or PolygonMask): Gt masks (the whole instance)\n                of each image, with the same shape of the input image.\n\n        Returns:\n            Tensor: The area ratio of the gt mask inside the proposal and the\n            gt mask of the corresponding instance.\n        \"\"\"\n        num_pos = pos_proposals.size(0)\n        if num_pos > 0:\n            area_ratios = []\n            proposals_np = pos_proposals.cpu().numpy()\n            pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()\n            # compute mask areas of gt instances (batch processing for speedup)\n            gt_instance_mask_area = gt_masks.areas\n            for i in range(num_pos):\n                gt_mask = gt_masks[pos_assigned_gt_inds[i]]\n\n                # crop the gt mask inside the proposal\n                bbox = proposals_np[i, :].astype(np.int32)\n                gt_mask_in_proposal = gt_mask.crop(bbox)\n\n                ratio = gt_mask_in_proposal.areas[0] / (\n                    gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)\n                area_ratios.append(ratio)\n            area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(\n                pos_proposals.device)\n        else:\n            area_ratios = pos_proposals.new_zeros((0, ))\n        return area_ratios\n\n    def predict_by_feat(self, mask_iou_preds: Tuple[Tensor],\n                        results_list: InstanceList) -> InstanceList:\n        \"\"\"Predict the mask iou and calculate it into ``results.scores``.\n\n        Args:\n            mask_iou_preds (Tensor): Mask IoU predictions results, has shape\n                (num_proposals, num_classes)\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process. Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        assert len(mask_iou_preds) == len(results_list)\n        for results, mask_iou_pred in zip(results_list, mask_iou_preds):\n            labels = results.labels\n            scores = results.scores\n            results.scores = scores * mask_iou_pred[range(labels.size(0)),\n                                                    labels]\n        return results_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/scnet_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.layers import ResLayer, SimplifiedBasicBlock\nfrom mmdet.registry import MODELS\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@MODELS.register_module()\nclass SCNetMaskHead(FCNMaskHead):\n    \"\"\"Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        conv_to_res (bool, optional): if True, change the conv layers to\n            ``SimplifiedBasicBlock``.\n    \"\"\"\n\n    def __init__(self, conv_to_res: bool = True, **kwargs) -> None:\n        super().__init__(**kwargs)\n        self.conv_to_res = conv_to_res\n        if conv_to_res:\n            assert self.conv_kernel_size == 3\n            self.num_res_blocks = self.num_convs // 2\n            self.convs = ResLayer(\n                SimplifiedBasicBlock,\n                self.in_channels,\n                self.conv_out_channels,\n                self.num_res_blocks,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.layers import ResLayer, SimplifiedBasicBlock\nfrom mmdet.registry import MODELS\nfrom .fused_semantic_head import FusedSemanticHead\n\n\n@MODELS.register_module()\nclass SCNetSemanticHead(FusedSemanticHead):\n    \"\"\"Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        conv_to_res (bool, optional): if True, change the conv layers to\n            ``SimplifiedBasicBlock``.\n    \"\"\"\n\n    def __init__(self, conv_to_res: bool = True, **kwargs) -> None:\n        super().__init__(**kwargs)\n        self.conv_to_res = conv_to_res\n        if self.conv_to_res:\n            num_res_blocks = self.num_convs // 2\n            self.convs = ResLayer(\n                SimplifiedBasicBlock,\n                self.in_channels,\n                self.conv_out_channels,\n                num_res_blocks,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n            self.num_convs = num_res_blocks\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_scoring_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils.misc import empty_instances\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass MaskScoringRoIHead(StandardRoIHead):\n    \"\"\"Mask Scoring RoIHead for `Mask Scoring RCNN.\n\n    <https://arxiv.org/abs/1903.00241>`_.\n\n    Args:\n        mask_iou_head (:obj`ConfigDict`, dict): The config of mask_iou_head.\n    \"\"\"\n\n    def __init__(self, mask_iou_head: ConfigType, **kwargs):\n        assert mask_iou_head is not None\n        super().__init__(**kwargs)\n        self.mask_iou_head = MODELS.build(mask_iou_head)\n\n    def forward(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList = None) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (List[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n            the meta information of each image and corresponding\n            annotations.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        results = ()\n        proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n        rois = bbox2roi(proposals)\n        # bbox head\n        if self.with_bbox:\n            bbox_results = self._bbox_forward(x, rois)\n            results = results + (bbox_results['cls_score'],\n                                 bbox_results['bbox_pred'])\n        # mask head\n        if self.with_mask:\n            mask_rois = rois[:100]\n            mask_results = self._mask_forward(x, mask_rois)\n            results = results + (mask_results['mask_preds'], )\n\n            # mask iou head\n            cls_score = bbox_results['cls_score'][:100]\n            mask_preds = mask_results['mask_preds']\n            mask_feats = mask_results['mask_feats']\n            _, labels = cls_score[:, :self.bbox_head.num_classes].max(dim=1)\n            mask_iou_preds = self.mask_iou_head(\n                mask_feats, mask_preds[range(labels.size(0)), labels])\n            results = results + (mask_iou_preds, )\n\n        return results\n\n    def mask_loss(self, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult], bbox_feats,\n                  batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the mask head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            bbox_feats (Tensor): Extract bbox RoI features.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `mask_feats` (Tensor): Extract mask RoI features.\n                - `mask_targets` (Tensor): Mask target of each positive\\\n                    proposals in the image.\n                - `loss_mask` (dict): A dictionary of mask loss components.\n                - `loss_mask_iou` (Tensor): mask iou loss.\n        \"\"\"\n        if not self.share_roi_extractor:\n            pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n            mask_results = self._mask_forward(x, pos_rois)\n        else:\n            pos_inds = []\n            device = bbox_feats.device\n            for res in sampling_results:\n                pos_inds.append(\n                    torch.ones(\n                        res.pos_priors.shape[0],\n                        device=device,\n                        dtype=torch.uint8))\n                pos_inds.append(\n                    torch.zeros(\n                        res.neg_priors.shape[0],\n                        device=device,\n                        dtype=torch.uint8))\n            pos_inds = torch.cat(pos_inds)\n\n            mask_results = self._mask_forward(\n                x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n        mask_loss_and_target = self.mask_head.loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=self.train_cfg)\n        mask_targets = mask_loss_and_target['mask_targets']\n        mask_results.update(loss_mask=mask_loss_and_target['loss_mask'])\n        if mask_results['loss_mask'] is None:\n            return mask_results\n\n        # mask iou head forward and loss\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        pos_mask_pred = mask_results['mask_preds'][\n            range(mask_results['mask_preds'].size(0)), pos_labels]\n        mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],\n                                           pos_mask_pred)\n        pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),\n                                          pos_labels]\n\n        loss_mask_iou = self.mask_iou_head.loss_and_target(\n            pos_mask_iou_pred, pos_mask_pred, mask_targets, sampling_results,\n            batch_gt_instances, self.train_cfg)\n        mask_results['loss_mask'].update(loss_mask_iou)\n        return mask_results\n\n    def predict_mask(self,\n                     x: Tensor,\n                     batch_img_metas: List[dict],\n                     results_list: InstanceList,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas,\n                mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        mask_results = self._mask_forward(x, mask_rois)\n        mask_preds = mask_results['mask_preds']\n        mask_feats = mask_results['mask_feats']\n        # get mask scores with mask iou head\n        labels = torch.cat([res.labels for res in results_list])\n        mask_iou_preds = self.mask_iou_head(\n            mask_feats, mask_preds[range(labels.size(0)), labels])\n        # split batch mask prediction back to each image\n        num_mask_rois_per_img = [len(res) for res in results_list]\n        mask_preds = mask_preds.split(num_mask_rois_per_img, 0)\n        mask_iou_preds = mask_iou_preds.split(num_mask_rois_per_img, 0)\n\n        # TODO: Handle the case where rescale is false\n        results_list = self.mask_head.predict_by_feat(\n            mask_preds=mask_preds,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale)\n        results_list = self.mask_iou_head.predict_by_feat(\n            mask_iou_preds=mask_iou_preds, results_list=results_list)\n        return results_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/multi_instance_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils import empty_instances, unpack_gt_instances\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass MultiInstanceRoIHead(StandardRoIHead):\n    \"\"\"The roi head for Multi-instance prediction.\"\"\"\n\n    def __init__(self, num_instance: int = 2, *args, **kwargs) -> None:\n        self.num_instance = num_instance\n        super().__init__(*args, **kwargs)\n\n    def init_bbox_head(self, bbox_roi_extractor: ConfigType,\n                       bbox_head: ConfigType) -> None:\n        \"\"\"Initialize box head and box roi extractor.\n\n        Args:\n            bbox_roi_extractor (dict or ConfigDict): Config of box\n                roi extractor.\n            bbox_head (dict or ConfigDict): Config of box in box head.\n        \"\"\"\n        self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor)\n        self.bbox_head = MODELS.build(bbox_head)\n\n    def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `cls_score_ref` (Tensor): The cls_score after refine model.\n                - `bbox_pred_ref` (Tensor): The bbox_pred after refine model.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        # TODO: a more flexible way to decide which feature maps to use\n        bbox_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs], rois)\n        bbox_results = self.bbox_head(bbox_feats)\n\n        if self.bbox_head.with_refine:\n            bbox_results = dict(\n                cls_score=bbox_results[0],\n                bbox_pred=bbox_results[1],\n                cls_score_ref=bbox_results[2],\n                bbox_pred_ref=bbox_results[3],\n                bbox_feats=bbox_feats)\n        else:\n            bbox_results = dict(\n                cls_score=bbox_results[0],\n                bbox_pred=bbox_results[1],\n                bbox_feats=bbox_feats)\n\n        return bbox_results\n\n    def bbox_loss(self, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the bbox head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n                - `loss_bbox` (dict): A dictionary of bbox loss components.\n        \"\"\"\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(x, rois)\n\n        # If there is a refining process, add refine loss.\n        if 'cls_score_ref' in bbox_results:\n            bbox_loss_and_target = self.bbox_head.loss_and_target(\n                cls_score=bbox_results['cls_score'],\n                bbox_pred=bbox_results['bbox_pred'],\n                rois=rois,\n                sampling_results=sampling_results,\n                rcnn_train_cfg=self.train_cfg)\n            bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])\n            bbox_loss_and_target_ref = self.bbox_head.loss_and_target(\n                cls_score=bbox_results['cls_score_ref'],\n                bbox_pred=bbox_results['bbox_pred_ref'],\n                rois=rois,\n                sampling_results=sampling_results,\n                rcnn_train_cfg=self.train_cfg)\n            bbox_results['loss_bbox']['loss_rcnn_emd_ref'] = \\\n                bbox_loss_and_target_ref['loss_bbox']['loss_rcnn_emd']\n        else:\n            bbox_loss_and_target = self.bbox_head.loss_and_target(\n                cls_score=bbox_results['cls_score'],\n                bbox_pred=bbox_results['bbox_pred'],\n                rois=rois,\n                sampling_results=sampling_results,\n                rcnn_train_cfg=self.train_cfg)\n            bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])\n\n        return bbox_results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: List[DetDataSample]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, _ = outputs\n\n        sampling_results = []\n        for i in range(len(batch_data_samples)):\n            # rename rpn_results.bboxes to rpn_results.priors\n            rpn_results = rpn_results_list[i]\n            rpn_results.priors = rpn_results.pop('bboxes')\n\n            assign_result = self.bbox_assigner.assign(\n                rpn_results, batch_gt_instances[i],\n                batch_gt_instances_ignore[i])\n            sampling_result = self.bbox_sampler.sample(\n                assign_result,\n                rpn_results,\n                batch_gt_instances[i],\n                batch_gt_instances_ignore=batch_gt_instances_ignore[i])\n            sampling_results.append(sampling_result)\n\n        losses = dict()\n        # bbox head loss\n        if self.with_bbox:\n            bbox_results = self.bbox_loss(x, sampling_results)\n            losses.update(bbox_results['loss_bbox'])\n\n        return losses\n\n    def predict_bbox(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     rpn_results_list: InstanceList,\n                     rcnn_test_cfg: ConfigType,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the bbox head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        proposals = [res.bboxes for res in rpn_results_list]\n        rois = bbox2roi(proposals)\n\n        if rois.shape[0] == 0:\n            return empty_instances(\n                batch_img_metas, rois.device, task_type='bbox')\n\n        bbox_results = self._bbox_forward(x, rois)\n\n        # split batch bbox prediction back to each image\n        if 'cls_score_ref' in bbox_results:\n            cls_scores = bbox_results['cls_score_ref']\n            bbox_preds = bbox_results['bbox_pred_ref']\n        else:\n            cls_scores = bbox_results['cls_score']\n            bbox_preds = bbox_results['bbox_pred']\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = rois.split(num_proposals_per_img, 0)\n        cls_scores = cls_scores.split(num_proposals_per_img, 0)\n\n        if bbox_preds is not None:\n            bbox_preds = bbox_preds.split(num_proposals_per_img, 0)\n        else:\n            bbox_preds = (None, ) * len(proposals)\n\n        result_list = self.bbox_head.predict_by_feat(\n            rois=rois,\n            cls_scores=cls_scores,\n            bbox_preds=bbox_preds,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=rcnn_test_cfg,\n            rescale=rescale)\n        return result_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/pisa_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules import SamplingResult\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import InstanceList\nfrom ..losses.pisa_loss import carl_loss, isr_p\nfrom ..utils import unpack_gt_instances\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass PISARoIHead(StandardRoIHead):\n    r\"\"\"The RoI head for `Prime Sample Attention in Object Detection\n    <https://arxiv.org/abs/1904.04821>`_.\"\"\"\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: List[DetDataSample]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, _ = outputs\n\n        # assign gts and sample proposals\n        num_imgs = len(batch_data_samples)\n        sampling_results = []\n        neg_label_weights = []\n        for i in range(num_imgs):\n            # rename rpn_results.bboxes to rpn_results.priors\n            rpn_results = rpn_results_list[i]\n            rpn_results.priors = rpn_results.pop('bboxes')\n\n            assign_result = self.bbox_assigner.assign(\n                rpn_results, batch_gt_instances[i],\n                batch_gt_instances_ignore[i])\n            sampling_result = self.bbox_sampler.sample(\n                assign_result,\n                rpn_results,\n                batch_gt_instances[i],\n                feats=[lvl_feat[i][None] for lvl_feat in x])\n            if isinstance(sampling_result, tuple):\n                sampling_result, neg_label_weight = sampling_result\n            sampling_results.append(sampling_result)\n            neg_label_weights.append(neg_label_weight)\n\n        losses = dict()\n        # bbox head forward and loss\n        if self.with_bbox:\n            bbox_results = self.bbox_loss(\n                x, sampling_results, neg_label_weights=neg_label_weights)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self.mask_loss(x, sampling_results,\n                                          bbox_results['bbox_feats'],\n                                          batch_gt_instances)\n            losses.update(mask_results['loss_mask'])\n\n        return losses\n\n    def bbox_loss(self,\n                  x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  neg_label_weights: List[Tensor] = None) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the bbox head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n            - `cls_score` (Tensor): Classification scores.\n            - `bbox_pred` (Tensor): Box energies / deltas.\n            - `bbox_feats` (Tensor): Extract bbox RoI features.\n            - `loss_bbox` (dict): A dictionary of bbox loss components.\n        \"\"\"\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(x, rois)\n        bbox_targets = self.bbox_head.get_targets(sampling_results,\n                                                  self.train_cfg)\n\n        # neg_label_weights obtained by sampler is image-wise, mapping back to\n        # the corresponding location in label weights\n        if neg_label_weights[0] is not None:\n            label_weights = bbox_targets[1]\n            cur_num_rois = 0\n            for i in range(len(sampling_results)):\n                num_pos = sampling_results[i].pos_inds.size(0)\n                num_neg = sampling_results[i].neg_inds.size(0)\n                label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +\n                              num_neg] = neg_label_weights[i]\n                cur_num_rois += num_pos + num_neg\n\n        cls_score = bbox_results['cls_score']\n        bbox_pred = bbox_results['bbox_pred']\n\n        # Apply ISR-P\n        isr_cfg = self.train_cfg.get('isr', None)\n        if isr_cfg is not None:\n            bbox_targets = isr_p(\n                cls_score,\n                bbox_pred,\n                bbox_targets,\n                rois,\n                sampling_results,\n                self.bbox_head.loss_cls,\n                self.bbox_head.bbox_coder,\n                **isr_cfg,\n                num_class=self.bbox_head.num_classes)\n        loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,\n                                        *bbox_targets)\n\n        # Add CARL Loss\n        carl_cfg = self.train_cfg.get('carl', None)\n        if carl_cfg is not None:\n            loss_carl = carl_loss(\n                cls_score,\n                bbox_targets[0],\n                bbox_pred,\n                bbox_targets[2],\n                self.bbox_head.loss_bbox,\n                **carl_cfg,\n                num_class=self.bbox_head.num_classes)\n            loss_bbox.update(loss_carl)\n\n        bbox_results.update(loss_bbox=loss_bbox)\n        return bbox_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/point_rend_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend  # noqa\nfrom typing import List, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.ops import point_sample, rel_roi_point_to_rel_img_point\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils import empty_instances\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass PointRendRoIHead(StandardRoIHead):\n    \"\"\"`PointRend <https://arxiv.org/abs/1912.08193>`_.\"\"\"\n\n    def __init__(self, point_head: ConfigType, *args, **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        assert self.with_bbox and self.with_mask\n        self.init_point_head(point_head)\n\n    def init_point_head(self, point_head: ConfigType) -> None:\n        \"\"\"Initialize ``point_head``\"\"\"\n        self.point_head = MODELS.build(point_head)\n\n    def mask_loss(self, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult], bbox_feats: Tensor,\n                  batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Run forward function and calculate loss for mask head and point head\n        in training.\"\"\"\n        mask_results = super().mask_loss(\n            x=x,\n            sampling_results=sampling_results,\n            bbox_feats=bbox_feats,\n            batch_gt_instances=batch_gt_instances)\n\n        mask_point_results = self._mask_point_loss(\n            x=x,\n            sampling_results=sampling_results,\n            mask_preds=mask_results['mask_preds'],\n            batch_gt_instances=batch_gt_instances)\n        mask_results['loss_mask'].update(\n            loss_point=mask_point_results['loss_point'])\n\n        return mask_results\n\n    def _mask_point_loss(self, x: Tuple[Tensor],\n                         sampling_results: List[SamplingResult],\n                         mask_preds: Tensor,\n                         batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Run forward function and calculate loss for point head in\n        training.\"\"\"\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        rel_roi_points = self.point_head.get_roi_rel_points_train(\n            mask_preds, pos_labels, cfg=self.train_cfg)\n        rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n\n        fine_grained_point_feats = self._get_fine_grained_point_feats(\n            x, rois, rel_roi_points)\n        coarse_point_feats = point_sample(mask_preds, rel_roi_points)\n        mask_point_pred = self.point_head(fine_grained_point_feats,\n                                          coarse_point_feats)\n\n        loss_and_target = self.point_head.loss_and_target(\n            point_pred=mask_point_pred,\n            rel_roi_points=rel_roi_points,\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            cfg=self.train_cfg)\n\n        return loss_and_target\n\n    def _mask_point_forward_test(self, x: Tuple[Tensor], rois: Tensor,\n                                 label_preds: Tensor,\n                                 mask_preds: Tensor) -> Tensor:\n        \"\"\"Mask refining process with point head in testing.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            rois (Tensor): shape (num_rois, 5).\n            label_preds (Tensor): The predication class for each rois.\n            mask_preds (Tensor): The predication coarse masks of\n                shape (num_rois, num_classes, small_size, small_size).\n\n        Returns:\n            Tensor: The refined masks of shape (num_rois, num_classes,\n            large_size, large_size).\n        \"\"\"\n        refined_mask_pred = mask_preds.clone()\n        for subdivision_step in range(self.test_cfg.subdivision_steps):\n            refined_mask_pred = F.interpolate(\n                refined_mask_pred,\n                scale_factor=self.test_cfg.scale_factor,\n                mode='bilinear',\n                align_corners=False)\n            # If `subdivision_num_points` is larger or equal to the\n            # resolution of the next step, then we can skip this step\n            num_rois, channels, mask_height, mask_width = \\\n                refined_mask_pred.shape\n            if (self.test_cfg.subdivision_num_points >=\n                    self.test_cfg.scale_factor**2 * mask_height * mask_width\n                    and\n                    subdivision_step < self.test_cfg.subdivision_steps - 1):\n                continue\n            point_indices, rel_roi_points = \\\n                self.point_head.get_roi_rel_points_test(\n                    refined_mask_pred, label_preds, cfg=self.test_cfg)\n\n            fine_grained_point_feats = self._get_fine_grained_point_feats(\n                x=x, rois=rois, rel_roi_points=rel_roi_points)\n            coarse_point_feats = point_sample(mask_preds, rel_roi_points)\n            mask_point_pred = self.point_head(fine_grained_point_feats,\n                                              coarse_point_feats)\n\n            point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)\n            refined_mask_pred = refined_mask_pred.reshape(\n                num_rois, channels, mask_height * mask_width)\n            refined_mask_pred = refined_mask_pred.scatter_(\n                2, point_indices, mask_point_pred)\n            refined_mask_pred = refined_mask_pred.view(num_rois, channels,\n                                                       mask_height, mask_width)\n\n        return refined_mask_pred\n\n    def _get_fine_grained_point_feats(self, x: Tuple[Tensor], rois: Tensor,\n                                      rel_roi_points: Tensor) -> Tensor:\n        \"\"\"Sample fine grained feats from each level feature map and\n        concatenate them together.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            rois (Tensor): shape (num_rois, 5).\n            rel_roi_points (Tensor): A tensor of shape (num_rois, num_points,\n                2) that contains [0, 1] x [0, 1] normalized coordinates of the\n                most uncertain points from the [mask_height, mask_width] grid.\n\n        Returns:\n            Tensor: The fine grained features for each points,\n            has shape (num_rois, feats_channels, num_points).\n        \"\"\"\n        assert rois.shape[0] > 0, 'RoI is a empty tensor.'\n        num_imgs = x[0].shape[0]\n        fine_grained_feats = []\n        for idx in range(self.mask_roi_extractor.num_inputs):\n            feats = x[idx]\n            spatial_scale = 1. / float(\n                self.mask_roi_extractor.featmap_strides[idx])\n            point_feats = []\n            for batch_ind in range(num_imgs):\n                # unravel batch dim\n                feat = feats[batch_ind].unsqueeze(0)\n                inds = (rois[:, 0].long() == batch_ind)\n                if inds.any():\n                    rel_img_points = rel_roi_point_to_rel_img_point(\n                        rois=rois[inds],\n                        rel_roi_points=rel_roi_points[inds],\n                        img=feat.shape[2:],\n                        spatial_scale=spatial_scale).unsqueeze(0)\n                    point_feat = point_sample(feat, rel_img_points)\n                    point_feat = point_feat.squeeze(0).transpose(0, 1)\n                    point_feats.append(point_feat)\n            fine_grained_feats.append(torch.cat(point_feats, dim=0))\n        return torch.cat(fine_grained_feats, dim=1)\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     results_list: InstanceList,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n            - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        # don't need to consider aug_test.\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas,\n                mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        mask_results = self._mask_forward(x, mask_rois)\n        mask_preds = mask_results['mask_preds']\n        # split batch mask prediction back to each image\n        num_mask_rois_per_img = [len(res) for res in results_list]\n        mask_preds = mask_preds.split(num_mask_rois_per_img, 0)\n\n        # refine mask_preds\n        mask_rois = mask_rois.split(num_mask_rois_per_img, 0)\n        mask_preds_refined = []\n        for i in range(len(batch_img_metas)):\n            labels = results_list[i].labels\n            x_i = [xx[[i]] for xx in x]\n            mask_rois_i = mask_rois[i]\n            mask_rois_i[:, 0] = 0\n            mask_pred_i = self._mask_point_forward_test(\n                x_i, mask_rois_i, labels, mask_preds[i])\n            mask_preds_refined.append(mask_pred_i)\n\n        # TODO: Handle the case where rescale is false\n        results_list = self.mask_head.predict_by_feat(\n            mask_preds=mask_preds_refined,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale)\n        return results_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_roi_extractor import BaseRoIExtractor\nfrom .generic_roi_extractor import GenericRoIExtractor\nfrom .single_level_roi_extractor import SingleRoIExtractor\n\n__all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv import ops\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.utils import ConfigType, OptMultiConfig\n\n\nclass BaseRoIExtractor(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for RoI extractor.\n\n    Args:\n        roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and\n            arguments.\n        out_channels (int): Output channels of RoI layers.\n        featmap_strides (list[int]): Strides of input feature maps.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 roi_layer: ConfigType,\n                 out_channels: int,\n                 featmap_strides: List[int],\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)\n        self.out_channels = out_channels\n        self.featmap_strides = featmap_strides\n\n    @property\n    def num_inputs(self) -> int:\n        \"\"\"int: Number of input feature maps.\"\"\"\n        return len(self.featmap_strides)\n\n    def build_roi_layers(self, layer_cfg: ConfigType,\n                         featmap_strides: List[int]) -> nn.ModuleList:\n        \"\"\"Build RoI operator to extract feature from each level feature map.\n\n        Args:\n            layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and\n                config RoI layer operation. Options are modules under\n                ``mmcv/ops`` such as ``RoIAlign``.\n            featmap_strides (list[int]): The stride of input feature map w.r.t\n                to the original image size, which would be used to scale RoI\n                coordinate (original image coordinate system) to feature\n                coordinate system.\n\n        Returns:\n            :obj:`nn.ModuleList`: The RoI extractor modules for each level\n                feature map.\n        \"\"\"\n\n        cfg = layer_cfg.copy()\n        layer_type = cfg.pop('type')\n        assert hasattr(ops, layer_type)\n        layer_cls = getattr(ops, layer_type)\n        roi_layers = nn.ModuleList(\n            [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])\n        return roi_layers\n\n    def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:\n        \"\"\"Scale RoI coordinates by scale factor.\n\n        Args:\n            rois (Tensor): RoI (Region of Interest), shape (n, 5)\n            scale_factor (float): Scale factor that RoI will be multiplied by.\n\n        Returns:\n            Tensor: Scaled RoI.\n        \"\"\"\n\n        cx = (rois[:, 1] + rois[:, 3]) * 0.5\n        cy = (rois[:, 2] + rois[:, 4]) * 0.5\n        w = rois[:, 3] - rois[:, 1]\n        h = rois[:, 4] - rois[:, 2]\n        new_w = w * scale_factor\n        new_h = h * scale_factor\n        x1 = cx - new_w * 0.5\n        x2 = cx + new_w * 0.5\n        y1 = cy - new_h * 0.5\n        y2 = cy + new_h * 0.5\n        new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)\n        return new_rois\n\n    @abstractmethod\n    def forward(self,\n                feats: Tuple[Tensor],\n                rois: Tensor,\n                roi_scale_factor: Optional[float] = None) -> Tensor:\n        \"\"\"Extractor ROI feats.\n\n        Args:\n            feats (Tuple[Tensor]): Multi-scale features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            roi_scale_factor (Optional[float]): RoI scale factor.\n                Defaults to None.\n\n        Returns:\n            Tensor: RoI feature.\n        \"\"\"\n        pass\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple\n\nfrom mmcv.cnn.bricks import build_plugin_layer\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType\nfrom .base_roi_extractor import BaseRoIExtractor\n\n\n@MODELS.register_module()\nclass GenericRoIExtractor(BaseRoIExtractor):\n    \"\"\"Extract RoI features from all level feature maps levels.\n\n    This is the implementation of `A novel Region of Interest Extraction Layer\n    for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.\n\n    Args:\n        aggregation (str): The method to aggregate multiple feature maps.\n            Options are 'sum', 'concat'. Defaults to 'sum'.\n        pre_cfg (:obj:`ConfigDict` or dict): Specify pre-processing modules.\n            Defaults to None.\n        post_cfg (:obj:`ConfigDict` or dict): Specify post-processing modules.\n            Defaults to None.\n        kwargs (keyword arguments): Arguments that are the same\n            as :class:`BaseRoIExtractor`.\n    \"\"\"\n\n    def __init__(self,\n                 aggregation: str = 'sum',\n                 pre_cfg: OptConfigType = None,\n                 post_cfg: OptConfigType = None,\n                 **kwargs) -> None:\n        super().__init__(**kwargs)\n\n        assert aggregation in ['sum', 'concat']\n\n        self.aggregation = aggregation\n        self.with_post = post_cfg is not None\n        self.with_pre = pre_cfg is not None\n        # build pre/post processing modules\n        if self.with_post:\n            self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]\n        if self.with_pre:\n            self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]\n\n    def forward(self,\n                feats: Tuple[Tensor],\n                rois: Tensor,\n                roi_scale_factor: Optional[float] = None) -> Tensor:\n        \"\"\"Extractor ROI feats.\n\n        Args:\n            feats (Tuple[Tensor]): Multi-scale features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            roi_scale_factor (Optional[float]): RoI scale factor.\n                Defaults to None.\n\n        Returns:\n            Tensor: RoI feature.\n        \"\"\"\n        out_size = self.roi_layers[0].output_size\n        num_levels = len(feats)\n        roi_feats = feats[0].new_zeros(\n            rois.size(0), self.out_channels, *out_size)\n\n        # some times rois is an empty tensor\n        if roi_feats.shape[0] == 0:\n            return roi_feats\n\n        if num_levels == 1:\n            return self.roi_layers[0](feats[0], rois)\n\n        if roi_scale_factor is not None:\n            rois = self.roi_rescale(rois, roi_scale_factor)\n\n        # mark the starting channels for concat mode\n        start_channels = 0\n        for i in range(num_levels):\n            roi_feats_t = self.roi_layers[i](feats[i], rois)\n            end_channels = start_channels + roi_feats_t.size(1)\n            if self.with_pre:\n                # apply pre-processing to a RoI extracted from each layer\n                roi_feats_t = self.pre_module(roi_feats_t)\n            if self.aggregation == 'sum':\n                # and sum them all\n                roi_feats += roi_feats_t\n            else:\n                # and concat them along channel dimension\n                roi_feats[:, start_channels:end_channels] = roi_feats_t\n            # update channels starting position\n            start_channels = end_channels\n        # check if concat channels match at the end\n        if self.aggregation == 'concat':\n            assert start_channels == self.out_channels\n\n        if self.with_post:\n            # apply post-processing before return the result\n            roi_feats = self.post_module(roi_feats)\n        return roi_feats\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptMultiConfig\nfrom .base_roi_extractor import BaseRoIExtractor\n\n\n@MODELS.register_module()\nclass SingleRoIExtractor(BaseRoIExtractor):\n    \"\"\"Extract RoI features from a single level feature map.\n\n    If there are multiple input feature levels, each RoI is mapped to a level\n    according to its scale. The mapping rule is proposed in\n    `FPN <https://arxiv.org/abs/1612.03144>`_.\n\n    Args:\n        roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and\n            arguments.\n        out_channels (int): Output channels of RoI layers.\n        featmap_strides (List[int]): Strides of input feature maps.\n        finest_scale (int): Scale threshold of mapping to level 0.\n            Defaults to 56.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict], optional): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 roi_layer: ConfigType,\n                 out_channels: int,\n                 featmap_strides: List[int],\n                 finest_scale: int = 56,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            roi_layer=roi_layer,\n            out_channels=out_channels,\n            featmap_strides=featmap_strides,\n            init_cfg=init_cfg)\n        self.finest_scale = finest_scale\n\n    def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:\n        \"\"\"Map rois to corresponding feature levels by scales.\n\n        - scale < finest_scale * 2: level 0\n        - finest_scale * 2 <= scale < finest_scale * 4: level 1\n        - finest_scale * 4 <= scale < finest_scale * 8: level 2\n        - scale >= finest_scale * 8: level 3\n\n        Args:\n            rois (Tensor): Input RoIs, shape (k, 5).\n            num_levels (int): Total level number.\n\n        Returns:\n            Tensor: Level index (0-based) of each RoI, shape (k, )\n        \"\"\"\n        scale = torch.sqrt(\n            (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))\n        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))\n        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()\n        return target_lvls\n\n    def forward(self,\n                feats: Tuple[Tensor],\n                rois: Tensor,\n                roi_scale_factor: Optional[float] = None):\n        \"\"\"Extractor ROI feats.\n\n        Args:\n            feats (Tuple[Tensor]): Multi-scale features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            roi_scale_factor (Optional[float]): RoI scale factor.\n                Defaults to None.\n\n        Returns:\n            Tensor: RoI feature.\n        \"\"\"\n        # convert fp32 to fp16 when amp is on\n        rois = rois.type_as(feats[0])\n        out_size = self.roi_layers[0].output_size\n        num_levels = len(feats)\n        roi_feats = feats[0].new_zeros(\n            rois.size(0), self.out_channels, *out_size)\n\n        # TODO: remove this when parrots supports\n        if torch.__version__ == 'parrots':\n            roi_feats.requires_grad = True\n\n        if num_levels == 1:\n            if len(rois) == 0:\n                return roi_feats\n            return self.roi_layers[0](feats[0], rois)\n\n        target_lvls = self.map_roi_levels(rois, num_levels)\n\n        if roi_scale_factor is not None:\n            rois = self.roi_rescale(rois, roi_scale_factor)\n\n        for i in range(num_levels):\n            mask = target_lvls == i\n            inds = mask.nonzero(as_tuple=False).squeeze(1)\n            if inds.numel() > 0:\n                rois_ = rois[inds]\n                roi_feats_t = self.roi_layers[i](feats[i], rois_)\n                roi_feats[inds] = roi_feats_t\n            else:\n                # Sometimes some pyramid levels will not be used for RoI\n                # feature extraction and this will cause an incomplete\n                # computation graph in one GPU, which is different from those\n                # in other GPUs and will cause a hanging error.\n                # Therefore, we add it to ensure each feature pyramid is\n                # included in the computation graph to avoid runtime bugs.\n                roi_feats += sum(\n                    x.view(-1)[0]\n                    for x in self.parameters()) * 0. + feats[i].sum() * 0.\n        return roi_feats\n"
  },
  {
    "path": "mmdet/models/roi_heads/scnet_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList, OptConfigType\nfrom ..layers import adaptive_avg_pool2d\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils import empty_instances, unpack_gt_instances\nfrom .cascade_roi_head import CascadeRoIHead\n\n\n@MODELS.register_module()\nclass SCNetRoIHead(CascadeRoIHead):\n    \"\"\"RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        num_stages (int): number of cascade stages.\n        stage_loss_weights (list): loss weight of cascade stages.\n        semantic_roi_extractor (dict): config to init semantic roi extractor.\n        semantic_head (dict): config to init semantic head.\n        feat_relay_head (dict): config to init feature_relay_head.\n        glbctx_head (dict): config to init global context head.\n    \"\"\"\n\n    def __init__(self,\n                 num_stages: int,\n                 stage_loss_weights: List[float],\n                 semantic_roi_extractor: OptConfigType = None,\n                 semantic_head: OptConfigType = None,\n                 feat_relay_head: OptConfigType = None,\n                 glbctx_head: OptConfigType = None,\n                 **kwargs) -> None:\n        super().__init__(\n            num_stages=num_stages,\n            stage_loss_weights=stage_loss_weights,\n            **kwargs)\n        assert self.with_bbox and self.with_mask\n        assert not self.with_shared_head  # shared head is not supported\n\n        if semantic_head is not None:\n            self.semantic_roi_extractor = MODELS.build(semantic_roi_extractor)\n            self.semantic_head = MODELS.build(semantic_head)\n\n        if feat_relay_head is not None:\n            self.feat_relay_head = MODELS.build(feat_relay_head)\n\n        if glbctx_head is not None:\n            self.glbctx_head = MODELS.build(glbctx_head)\n\n    def init_mask_head(self, mask_roi_extractor: ConfigType,\n                       mask_head: ConfigType) -> None:\n        \"\"\"Initialize ``mask_head``\"\"\"\n        if mask_roi_extractor is not None:\n            self.mask_roi_extractor = MODELS.build(mask_roi_extractor)\n            self.mask_head = MODELS.build(mask_head)\n\n    # TODO move to base_roi_head later\n    @property\n    def with_semantic(self) -> bool:\n        \"\"\"bool: whether the head has semantic head\"\"\"\n        return hasattr(self,\n                       'semantic_head') and self.semantic_head is not None\n\n    @property\n    def with_feat_relay(self) -> bool:\n        \"\"\"bool: whether the head has feature relay head\"\"\"\n        return (hasattr(self, 'feat_relay_head')\n                and self.feat_relay_head is not None)\n\n    @property\n    def with_glbctx(self) -> bool:\n        \"\"\"bool: whether the head has global context head\"\"\"\n        return hasattr(self, 'glbctx_head') and self.glbctx_head is not None\n\n    def _fuse_glbctx(self, roi_feats: Tensor, glbctx_feat: Tensor,\n                     rois: Tensor) -> Tensor:\n        \"\"\"Fuse global context feats with roi feats.\n\n        Args:\n            roi_feats (Tensor): RoI features.\n            glbctx_feat (Tensor): Global context feature..\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n            Tensor: Fused feature.\n        \"\"\"\n        assert roi_feats.size(0) == rois.size(0)\n        # RuntimeError: isDifferentiableType(variable.scalar_type())\n        # INTERNAL ASSERT FAILED if detach() is not used when calling\n        # roi_head.predict().\n        img_inds = torch.unique(rois[:, 0].detach().cpu(), sorted=True).long()\n        fused_feats = torch.zeros_like(roi_feats)\n        for img_id in img_inds:\n            inds = (rois[:, 0] == img_id.item())\n            fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]\n        return fused_feats\n\n    def _slice_pos_feats(self, feats: Tensor,\n                         sampling_results: List[SamplingResult]) -> Tensor:\n        \"\"\"Get features from pos rois.\n\n        Args:\n            feats (Tensor): Input features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n        Returns:\n            Tensor: Sliced features.\n        \"\"\"\n        num_rois = [res.priors.size(0) for res in sampling_results]\n        num_pos_rois = [res.pos_priors.size(0) for res in sampling_results]\n        inds = torch.zeros(sum(num_rois), dtype=torch.bool)\n        start = 0\n        for i in range(len(num_rois)):\n            start = 0 if i == 0 else start + num_rois[i - 1]\n            stop = start + num_pos_rois[i]\n            inds[start:stop] = 1\n        sliced_feats = feats[inds]\n        return sliced_feats\n\n    def _bbox_forward(self,\n                      stage: int,\n                      x: Tuple[Tensor],\n                      rois: Tensor,\n                      semantic_feat: Optional[Tensor] = None,\n                      glbctx_feat: Optional[Tensor] = None) -> dict:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            semantic_feat (Tensor): Semantic feature. Defaults to None.\n            glbctx_feat (Tensor): Global context feature. Defaults to None.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        if self.with_semantic and semantic_feat is not None:\n            bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:\n                bbox_semantic_feat = adaptive_avg_pool2d(\n                    bbox_semantic_feat, bbox_feats.shape[-2:])\n            bbox_feats += bbox_semantic_feat\n        if self.with_glbctx and glbctx_feat is not None:\n            bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)\n        cls_score, bbox_pred, relayed_feat = bbox_head(\n            bbox_feats, return_shared_feat=True)\n\n        bbox_results = dict(\n            cls_score=cls_score,\n            bbox_pred=bbox_pred,\n            relayed_feat=relayed_feat)\n        return bbox_results\n\n    def _mask_forward(self,\n                      x: Tuple[Tensor],\n                      rois: Tensor,\n                      semantic_feat: Optional[Tensor] = None,\n                      glbctx_feat: Optional[Tensor] = None,\n                      relayed_feat: Optional[Tensor] = None) -> dict:\n        \"\"\"Mask head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            semantic_feat (Tensor): Semantic feature. Defaults to None.\n            glbctx_feat (Tensor): Global context feature. Defaults to None.\n            relayed_feat (Tensor): Relayed feature. Defaults to None.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n        \"\"\"\n        mask_feats = self.mask_roi_extractor(\n            x[:self.mask_roi_extractor.num_inputs], rois)\n        if self.with_semantic and semantic_feat is not None:\n            mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:\n                mask_semantic_feat = F.adaptive_avg_pool2d(\n                    mask_semantic_feat, mask_feats.shape[-2:])\n            mask_feats += mask_semantic_feat\n        if self.with_glbctx and glbctx_feat is not None:\n            mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)\n        if self.with_feat_relay and relayed_feat is not None:\n            mask_feats = mask_feats + relayed_feat\n        mask_preds = self.mask_head(mask_feats)\n        mask_results = dict(mask_preds=mask_preds)\n\n        return mask_results\n\n    def bbox_loss(self,\n                  stage: int,\n                  x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  semantic_feat: Optional[Tensor] = None,\n                  glbctx_feat: Optional[Tensor] = None) -> dict:\n        \"\"\"Run forward function and calculate loss for box head in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            semantic_feat (Tensor): Semantic feature. Defaults to None.\n            glbctx_feat (Tensor): Global context feature. Defaults to None.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n                - `loss_bbox` (dict): A dictionary of bbox loss components.\n                - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n                  column indicates batch id of each RoI.\n                - `bbox_targets` (tuple):  Ground truth for proposals in a\n                  single image. Containing the following list of Tensors:\n                  (labels, label_weights, bbox_targets, bbox_weights)\n        \"\"\"\n        bbox_head = self.bbox_head[stage]\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(\n            stage,\n            x,\n            rois,\n            semantic_feat=semantic_feat,\n            glbctx_feat=glbctx_feat)\n        bbox_results.update(rois=rois)\n\n        bbox_loss_and_target = bbox_head.loss_and_target(\n            cls_score=bbox_results['cls_score'],\n            bbox_pred=bbox_results['bbox_pred'],\n            rois=rois,\n            sampling_results=sampling_results,\n            rcnn_train_cfg=self.train_cfg[stage])\n\n        bbox_results.update(bbox_loss_and_target)\n        return bbox_results\n\n    def mask_loss(self,\n                  x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult],\n                  batch_gt_instances: InstanceList,\n                  semantic_feat: Optional[Tensor] = None,\n                  glbctx_feat: Optional[Tensor] = None,\n                  relayed_feat: Optional[Tensor] = None) -> dict:\n        \"\"\"Run forward function and calculate loss for mask head in training.\n\n        Args:\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            semantic_feat (Tensor): Semantic feature. Defaults to None.\n            glbctx_feat (Tensor): Global context feature. Defaults to None.\n            relayed_feat (Tensor): Relayed feature. Defaults to None.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `loss_mask` (dict): A dictionary of mask loss components.\n        \"\"\"\n        pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n        mask_results = self._mask_forward(\n            x,\n            pos_rois,\n            semantic_feat=semantic_feat,\n            glbctx_feat=glbctx_feat,\n            relayed_feat=relayed_feat)\n\n        mask_loss_and_target = self.mask_head.loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=self.train_cfg[-1])\n        mask_results.update(mask_loss_and_target)\n\n        return mask_results\n\n    def semantic_loss(self, x: Tuple[Tensor],\n                      batch_data_samples: SampleList) -> dict:\n        \"\"\"Semantic segmentation loss.\n\n        Args:\n            x (Tuple[Tensor]): Tuple of multi-level img features.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `semantic_feat` (Tensor): Semantic feature.\n                - `loss_seg` (dict): Semantic segmentation loss.\n        \"\"\"\n        gt_semantic_segs = [\n            data_sample.gt_sem_seg.sem_seg\n            for data_sample in batch_data_samples\n        ]\n        gt_semantic_segs = torch.stack(gt_semantic_segs)\n        semantic_pred, semantic_feat = self.semantic_head(x)\n        loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_segs)\n\n        semantic_results = dict(loss_seg=loss_seg, semantic_feat=semantic_feat)\n\n        return semantic_results\n\n    def global_context_loss(self, x: Tuple[Tensor],\n                            batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Global context loss.\n\n        Args:\n            x (Tuple[Tensor]): Tuple of multi-level img features.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `glbctx_feat` (Tensor): Global context feature.\n                - `loss_glbctx` (dict): Global context loss.\n        \"\"\"\n        gt_labels = [\n            gt_instances.labels for gt_instances in batch_gt_instances\n        ]\n        mc_pred, glbctx_feat = self.glbctx_head(x)\n        loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)\n        global_context_results = dict(\n            loss_glbctx=loss_glbctx, glbctx_feat=glbctx_feat)\n\n        return global_context_results\n\n    def loss(self, x: Tensor, rpn_results_list: InstanceList,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n\n        losses = dict()\n\n        # semantic segmentation branch\n        if self.with_semantic:\n            semantic_results = self.semantic_loss(\n                x=x, batch_data_samples=batch_data_samples)\n            losses['loss_semantic_seg'] = semantic_results['loss_seg']\n            semantic_feat = semantic_results['semantic_feat']\n        else:\n            semantic_feat = None\n\n        # global context branch\n        if self.with_glbctx:\n            global_context_results = self.global_context_loss(\n                x=x, batch_gt_instances=batch_gt_instances)\n            losses['loss_glbctx'] = global_context_results['loss_glbctx']\n            glbctx_feat = global_context_results['glbctx_feat']\n        else:\n            glbctx_feat = None\n\n        results_list = rpn_results_list\n        num_imgs = len(batch_img_metas)\n        for stage in range(self.num_stages):\n            stage_loss_weight = self.stage_loss_weights[stage]\n\n            # assign gts and sample proposals\n            sampling_results = []\n            bbox_assigner = self.bbox_assigner[stage]\n            bbox_sampler = self.bbox_sampler[stage]\n            for i in range(num_imgs):\n                results = results_list[i]\n                # rename rpn_results.bboxes to rpn_results.priors\n                results.priors = results.pop('bboxes')\n\n                assign_result = bbox_assigner.assign(\n                    results, batch_gt_instances[i],\n                    batch_gt_instances_ignore[i])\n                sampling_result = bbox_sampler.sample(\n                    assign_result,\n                    results,\n                    batch_gt_instances[i],\n                    feats=[lvl_feat[i][None] for lvl_feat in x])\n                sampling_results.append(sampling_result)\n\n            # bbox head forward and loss\n            bbox_results = self.bbox_loss(\n                stage=stage,\n                x=x,\n                sampling_results=sampling_results,\n                semantic_feat=semantic_feat,\n                glbctx_feat=glbctx_feat)\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{stage}.{name}'] = (\n                    value * stage_loss_weight if 'loss' in name else value)\n\n            # refine bboxes\n            if stage < self.num_stages - 1:\n                bbox_head = self.bbox_head[stage]\n                with torch.no_grad():\n                    results_list = bbox_head.refine_bboxes(\n                        sampling_results=sampling_results,\n                        bbox_results=bbox_results,\n                        batch_img_metas=batch_img_metas)\n\n        if self.with_feat_relay:\n            relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],\n                                                 sampling_results)\n            relayed_feat = self.feat_relay_head(relayed_feat)\n        else:\n            relayed_feat = None\n\n        # mask head forward and loss\n        mask_results = self.mask_loss(\n            x=x,\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            semantic_feat=semantic_feat,\n            glbctx_feat=glbctx_feat,\n            relayed_feat=relayed_feat)\n        mask_stage_loss_weight = sum(self.stage_loss_weights)\n        losses['loss_mask'] = mask_stage_loss_weight * mask_results[\n            'loss_mask']['loss_mask']\n\n        return losses\n\n    def predict(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the roi head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (N, C, H, W).\n            rpn_results_list (list[:obj:`InstanceData`]): list of region\n                proposals.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results to\n                the original image. Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n\n        if self.with_glbctx:\n            _, glbctx_feat = self.glbctx_head(x)\n        else:\n            glbctx_feat = None\n\n        # TODO: nms_op in mmcv need be enhanced, the bbox result may get\n        #  difference when not rescale in bbox_head\n\n        # If it has the mask branch, the bbox branch does not need\n        # to be scaled to the original image scale, because the mask\n        # branch will scale both bbox and mask at the same time.\n        bbox_rescale = rescale if not self.with_mask else False\n        results_list = self.predict_bbox(\n            x=x,\n            semantic_feat=semantic_feat,\n            glbctx_feat=glbctx_feat,\n            batch_img_metas=batch_img_metas,\n            rpn_results_list=rpn_results_list,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=bbox_rescale)\n\n        if self.with_mask:\n            results_list = self.predict_mask(\n                x=x,\n                semantic_heat=semantic_feat,\n                glbctx_feat=glbctx_feat,\n                batch_img_metas=batch_img_metas,\n                results_list=results_list,\n                rescale=rescale)\n\n        return results_list\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     semantic_heat: Tensor,\n                     glbctx_feat: Tensor,\n                     batch_img_metas: List[dict],\n                     results_list: List[InstanceData],\n                     rescale: bool = False) -> List[InstanceData]:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            semantic_feat (Tensor): Semantic feature.\n            glbctx_feat (Tensor): Global context feature.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas=batch_img_metas,\n                device=mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        bboxes_results = self._bbox_forward(\n            stage=-1,\n            x=x,\n            rois=mask_rois,\n            semantic_feat=semantic_heat,\n            glbctx_feat=glbctx_feat)\n        relayed_feat = bboxes_results['relayed_feat']\n        relayed_feat = self.feat_relay_head(relayed_feat)\n\n        mask_results = self._mask_forward(\n            x=x,\n            rois=mask_rois,\n            semantic_feat=semantic_heat,\n            glbctx_feat=glbctx_feat,\n            relayed_feat=relayed_feat)\n        mask_preds = mask_results['mask_preds']\n\n        # split batch mask prediction back to each image\n        num_bbox_per_img = tuple(len(_bbox) for _bbox in bboxes)\n        mask_preds = mask_preds.split(num_bbox_per_img, 0)\n\n        results_list = self.mask_head.predict_by_feat(\n            mask_preds=mask_preds,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale)\n\n        return results_list\n\n    def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n                batch_data_samples: SampleList) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (List[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n                the meta information of each image and corresponding\n                annotations.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        results = ()\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n\n        if self.with_glbctx:\n            _, glbctx_feat = self.glbctx_head(x)\n        else:\n            glbctx_feat = None\n\n        proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = bbox2roi(proposals)\n        # bbox head\n        if self.with_bbox:\n            rois, cls_scores, bbox_preds = self._refine_roi(\n                x=x,\n                rois=rois,\n                semantic_feat=semantic_feat,\n                glbctx_feat=glbctx_feat,\n                batch_img_metas=batch_img_metas,\n                num_proposals_per_img=num_proposals_per_img)\n            results = results + (cls_scores, bbox_preds)\n        # mask head\n        if self.with_mask:\n            rois = torch.cat(rois)\n            bboxes_results = self._bbox_forward(\n                stage=-1,\n                x=x,\n                rois=rois,\n                semantic_feat=semantic_feat,\n                glbctx_feat=glbctx_feat)\n            relayed_feat = bboxes_results['relayed_feat']\n            relayed_feat = self.feat_relay_head(relayed_feat)\n            mask_results = self._mask_forward(\n                x=x,\n                rois=rois,\n                semantic_feat=semantic_feat,\n                glbctx_feat=glbctx_feat,\n                relayed_feat=relayed_feat)\n            mask_preds = mask_results['mask_preds']\n            mask_preds = mask_preds.split(num_proposals_per_img, 0)\n            results = results + (mask_preds, )\n        return results\n"
  },
  {
    "path": "mmdet/models/roi_heads/shared_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .res_layer import ResLayer\n\n__all__ = ['ResLayer']\n"
  },
  {
    "path": "mmdet/models/roi_heads/shared_heads/res_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmengine.model import BaseModule\n\nfrom mmdet.models.backbones import ResNet\nfrom mmdet.models.layers import ResLayer as _ResLayer\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass ResLayer(BaseModule):\n\n    def __init__(self,\n                 depth,\n                 stage=3,\n                 stride=2,\n                 dilation=1,\n                 style='pytorch',\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 norm_eval=True,\n                 with_cp=False,\n                 dcn=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ResLayer, self).__init__(init_cfg)\n\n        self.norm_eval = norm_eval\n        self.norm_cfg = norm_cfg\n        self.stage = stage\n        self.fp16_enabled = False\n        block, stage_blocks = ResNet.arch_settings[depth]\n        stage_block = stage_blocks[stage]\n        planes = 64 * 2**stage\n        inplanes = 64 * 2**(stage - 1) * block.expansion\n\n        res_layer = _ResLayer(\n            block,\n            inplanes,\n            planes,\n            stage_block,\n            stride=stride,\n            dilation=dilation,\n            style=style,\n            with_cp=with_cp,\n            norm_cfg=self.norm_cfg,\n            dcn=dcn)\n        self.add_module(f'layer{stage + 1}', res_layer)\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is a deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    def forward(self, x):\n        res_layer = getattr(self, f'layer{self.stage + 1}')\n        out = res_layer(x)\n        return out\n\n    def train(self, mode=True):\n        super(ResLayer, self).train(mode)\n        if self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/roi_heads/sparse_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules.samplers import PseudoSampler\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList, OptConfigType\nfrom ..utils.misc import empty_instances, unpack_gt_instances\nfrom .cascade_roi_head import CascadeRoIHead\n\n\n@MODELS.register_module()\nclass SparseRoIHead(CascadeRoIHead):\n    r\"\"\"The RoIHead for `Sparse R-CNN: End-to-End Object Detection with\n    Learnable Proposals <https://arxiv.org/abs/2011.12450>`_\n    and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n    Args:\n        num_stages (int): Number of stage whole iterative process.\n            Defaults to 6.\n        stage_loss_weights (Tuple[float]): The loss\n            weight of each stage. By default all stages have\n            the same weight 1.\n        bbox_roi_extractor (:obj:`ConfigDict` or dict): Config of box\n            roi extractor.\n        mask_roi_extractor (:obj:`ConfigDict` or dict): Config of mask\n            roi extractor.\n        bbox_head (:obj:`ConfigDict` or dict): Config of box head.\n        mask_head (:obj:`ConfigDict` or dict): Config of mask head.\n        train_cfg (:obj:`ConfigDict` or dict, Optional): Configuration\n            information in train stage. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, Optional): Configuration\n            information in test stage. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \\\n            dict]): Initialization config dict. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 num_stages: int = 6,\n                 stage_loss_weights: Tuple[float] = (1, 1, 1, 1, 1, 1),\n                 proposal_feature_channel: int = 256,\n                 bbox_roi_extractor: ConfigType = dict(\n                     type='SingleRoIExtractor',\n                     roi_layer=dict(\n                         type='RoIAlign', output_size=7, sampling_ratio=2),\n                     out_channels=256,\n                     featmap_strides=[4, 8, 16, 32]),\n                 mask_roi_extractor: OptConfigType = None,\n                 bbox_head: ConfigType = dict(\n                     type='DIIHead',\n                     num_classes=80,\n                     num_fcs=2,\n                     num_heads=8,\n                     num_cls_fcs=1,\n                     num_reg_fcs=3,\n                     feedforward_channels=2048,\n                     hidden_channels=256,\n                     dropout=0.0,\n                     roi_feat_size=7,\n                     ffn_act_cfg=dict(type='ReLU', inplace=True)),\n                 mask_head: OptConfigType = None,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptConfigType = None) -> None:\n        assert bbox_roi_extractor is not None\n        assert bbox_head is not None\n        assert len(stage_loss_weights) == num_stages\n        self.num_stages = num_stages\n        self.stage_loss_weights = stage_loss_weights\n        self.proposal_feature_channel = proposal_feature_channel\n        super().__init__(\n            num_stages=num_stages,\n            stage_loss_weights=stage_loss_weights,\n            bbox_roi_extractor=bbox_roi_extractor,\n            mask_roi_extractor=mask_roi_extractor,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg)\n        # train_cfg would be None when run the test.py\n        if train_cfg is not None:\n            for stage in range(num_stages):\n                assert isinstance(self.bbox_sampler[stage], PseudoSampler), \\\n                    'Sparse R-CNN and QueryInst only support `PseudoSampler`'\n\n    def bbox_loss(self, stage: int, x: Tuple[Tensor],\n                  results_list: InstanceList, object_feats: Tensor,\n                  batch_img_metas: List[dict],\n                  batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the bbox head on\n        the features of the upstream network.\n\n        Args:\n            stage (int): The current stage in iterative process.\n            x (tuple[Tensor]): List of multi-level img features.\n            results_list (List[:obj:`InstanceData`]) : List of region\n                proposals.\n            object_feats (Tensor): The object feature extracted from\n                the previous stage.\n            batch_img_metas (list[dict]): Meta information of each image.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n            - `cls_score` (Tensor): Classification scores.\n            - `bbox_pred` (Tensor): Box energies / deltas.\n            - `bbox_feats` (Tensor): Extract bbox RoI features.\n            - `loss_bbox` (dict): A dictionary of bbox loss components.\n        \"\"\"\n        proposal_list = [res.bboxes for res in results_list]\n        rois = bbox2roi(proposal_list)\n        bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n                                          batch_img_metas)\n        imgs_whwh = torch.cat(\n            [res.imgs_whwh[None, ...] for res in results_list])\n        cls_pred_list = bbox_results['detached_cls_scores']\n        proposal_list = bbox_results['detached_proposals']\n\n        sampling_results = []\n        bbox_head = self.bbox_head[stage]\n        for i in range(len(batch_img_metas)):\n            pred_instances = InstanceData()\n            # TODO: Enhance the logic\n            pred_instances.bboxes = proposal_list[i]  # for assinger\n            pred_instances.scores = cls_pred_list[i]\n            pred_instances.priors = proposal_list[i]  # for sampler\n\n            assign_result = self.bbox_assigner[stage].assign(\n                pred_instances=pred_instances,\n                gt_instances=batch_gt_instances[i],\n                gt_instances_ignore=None,\n                img_meta=batch_img_metas[i])\n\n            sampling_result = self.bbox_sampler[stage].sample(\n                assign_result, pred_instances, batch_gt_instances[i])\n            sampling_results.append(sampling_result)\n\n        bbox_results.update(sampling_results=sampling_results)\n\n        cls_score = bbox_results['cls_score']\n        decoded_bboxes = bbox_results['decoded_bboxes']\n        cls_score = cls_score.view(-1, cls_score.size(-1))\n        decoded_bboxes = decoded_bboxes.view(-1, 4)\n        bbox_loss_and_target = bbox_head.loss_and_target(\n            cls_score,\n            decoded_bboxes,\n            sampling_results,\n            self.train_cfg[stage],\n            imgs_whwh=imgs_whwh,\n            concat=True)\n        bbox_results.update(bbox_loss_and_target)\n\n        # propose for the new proposal_list\n        proposal_list = []\n        for idx in range(len(batch_img_metas)):\n            results = InstanceData()\n            results.imgs_whwh = results_list[idx].imgs_whwh\n            results.bboxes = bbox_results['detached_proposals'][idx]\n            proposal_list.append(results)\n        bbox_results.update(results_list=proposal_list)\n        return bbox_results\n\n    def _bbox_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor,\n                      object_feats: Tensor,\n                      batch_img_metas: List[dict]) -> dict:\n        \"\"\"Box head forward function used in both training and testing. Returns\n        all regression, classification results and a intermediate feature.\n\n        Args:\n            stage (int): The current stage in iterative process.\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n                Each dimension means (img_index, x1, y1, x2, y2).\n            object_feats (Tensor): The object feature extracted from\n                the previous stage.\n            batch_img_metas (list[dict]): Meta information of each image.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of bbox head outputs,\n            Containing the following results:\n\n            - cls_score (Tensor): The score of each class, has\n              shape (batch_size, num_proposals, num_classes)\n              when use focal loss or\n              (batch_size, num_proposals, num_classes+1)\n              otherwise.\n            - decoded_bboxes (Tensor): The regression results\n              with shape (batch_size, num_proposal, 4).\n              The last dimension 4 represents\n              [tl_x, tl_y, br_x, br_y].\n            - object_feats (Tensor): The object feature extracted\n              from current stage\n            - detached_cls_scores (list[Tensor]): The detached\n              classification results, length is batch_size, and\n              each tensor has shape (num_proposal, num_classes).\n            - detached_proposals (list[tensor]): The detached\n              regression results, length is batch_size, and each\n              tensor has shape (num_proposal, 4). The last\n              dimension 4 represents [tl_x, tl_y, br_x, br_y].\n        \"\"\"\n        num_imgs = len(batch_img_metas)\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        cls_score, bbox_pred, object_feats, attn_feats = bbox_head(\n            bbox_feats, object_feats)\n\n        fake_bbox_results = dict(\n            rois=rois,\n            bbox_targets=(rois.new_zeros(len(rois), dtype=torch.long), None),\n            bbox_pred=bbox_pred.view(-1, bbox_pred.size(-1)),\n            cls_score=cls_score.view(-1, cls_score.size(-1)))\n        fake_sampling_results = [\n            InstanceData(pos_is_gt=rois.new_zeros(object_feats.size(1)))\n            for _ in range(len(batch_img_metas))\n        ]\n\n        results_list = bbox_head.refine_bboxes(\n            sampling_results=fake_sampling_results,\n            bbox_results=fake_bbox_results,\n            batch_img_metas=batch_img_metas)\n        proposal_list = [res.bboxes for res in results_list]\n        bbox_results = dict(\n            cls_score=cls_score,\n            decoded_bboxes=torch.cat(proposal_list),\n            object_feats=object_feats,\n            attn_feats=attn_feats,\n            # detach then use it in label assign\n            detached_cls_scores=[\n                cls_score[i].detach() for i in range(num_imgs)\n            ],\n            detached_proposals=[item.detach() for item in proposal_list])\n\n        return bbox_results\n\n    def _mask_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor,\n                      attn_feats) -> dict:\n        \"\"\"Mask head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            attn_feats (Tensot): Intermediate feature get from the last\n                diihead, has shape\n                (batch_size*num_proposals, feature_dimensions)\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n            - `mask_preds` (Tensor): Mask prediction.\n        \"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        mask_preds = mask_head(mask_feats, attn_feats)\n\n        mask_results = dict(mask_preds=mask_preds)\n        return mask_results\n\n    def mask_loss(self, stage: int, x: Tuple[Tensor], bbox_results: dict,\n                  batch_gt_instances: InstanceList,\n                  rcnn_train_cfg: ConfigDict) -> dict:\n        \"\"\"Run forward function and calculate loss for mask head in training.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            bbox_results (dict): Results obtained from `bbox_loss`.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n            - `mask_preds` (Tensor): Mask prediction.\n            - `loss_mask` (dict): A dictionary of mask loss components.\n        \"\"\"\n        attn_feats = bbox_results['attn_feats']\n        sampling_results = bbox_results['sampling_results']\n\n        pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n\n        attn_feats = torch.cat([\n            feats[res.pos_inds]\n            for (feats, res) in zip(attn_feats, sampling_results)\n        ])\n        mask_results = self._mask_forward(stage, x, pos_rois, attn_feats)\n\n        mask_loss_and_target = self.mask_head[stage].loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=rcnn_train_cfg)\n        mask_results.update(mask_loss_and_target)\n\n        return mask_results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (List[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: a dictionary of loss components of all stage.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n            = outputs\n\n        object_feats = torch.cat(\n            [res.pop('features')[None, ...] for res in rpn_results_list])\n        results_list = rpn_results_list\n        losses = {}\n        for stage in range(self.num_stages):\n            stage_loss_weight = self.stage_loss_weights[stage]\n\n            # bbox head forward and loss\n            bbox_results = self.bbox_loss(\n                stage=stage,\n                x=x,\n                object_feats=object_feats,\n                results_list=results_list,\n                batch_img_metas=batch_img_metas,\n                batch_gt_instances=batch_gt_instances)\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{stage}.{name}'] = (\n                    value * stage_loss_weight if 'loss' in name else value)\n\n            if self.with_mask:\n                mask_results = self.mask_loss(\n                    stage=stage,\n                    x=x,\n                    bbox_results=bbox_results,\n                    batch_gt_instances=batch_gt_instances,\n                    rcnn_train_cfg=self.train_cfg[stage])\n\n                for name, value in mask_results['loss_mask'].items():\n                    losses[f's{stage}.{name}'] = (\n                        value * stage_loss_weight if 'loss' in name else value)\n\n            object_feats = bbox_results['object_feats']\n            results_list = bbox_results['results_list']\n        return losses\n\n    def predict_bbox(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     rpn_results_list: InstanceList,\n                     rcnn_test_cfg: ConfigType,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the bbox head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x(tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        proposal_list = [res.bboxes for res in rpn_results_list]\n        object_feats = torch.cat(\n            [res.pop('features')[None, ...] for res in rpn_results_list])\n        if all([proposal.shape[0] == 0 for proposal in proposal_list]):\n            # There is no proposal in the whole batch\n            return empty_instances(\n                batch_img_metas, x[0].device, task_type='bbox')\n\n        for stage in range(self.num_stages):\n            rois = bbox2roi(proposal_list)\n            bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n                                              batch_img_metas)\n            object_feats = bbox_results['object_feats']\n            cls_score = bbox_results['cls_score']\n            proposal_list = bbox_results['detached_proposals']\n\n        num_classes = self.bbox_head[-1].num_classes\n\n        if self.bbox_head[-1].loss_cls.use_sigmoid:\n            cls_score = cls_score.sigmoid()\n        else:\n            cls_score = cls_score.softmax(-1)[..., :-1]\n\n        topk_inds_list = []\n        results_list = []\n        for img_id in range(len(batch_img_metas)):\n            cls_score_per_img = cls_score[img_id]\n            scores_per_img, topk_inds = cls_score_per_img.flatten(0, 1).topk(\n                self.test_cfg.max_per_img, sorted=False)\n            labels_per_img = topk_inds % num_classes\n            bboxes_per_img = proposal_list[img_id][topk_inds // num_classes]\n            topk_inds_list.append(topk_inds)\n            if rescale and bboxes_per_img.size(0) > 0:\n                assert batch_img_metas[img_id].get('scale_factor') is not None\n                scale_factor = bboxes_per_img.new_tensor(\n                    batch_img_metas[img_id]['scale_factor']).repeat((1, 2))\n                bboxes_per_img = (\n                    bboxes_per_img.view(bboxes_per_img.size(0), -1, 4) /\n                    scale_factor).view(bboxes_per_img.size()[0], -1)\n\n            results = InstanceData()\n            results.bboxes = bboxes_per_img\n            results.scores = scores_per_img\n            results.labels = labels_per_img\n            results_list.append(results)\n        if self.with_mask:\n            for img_id in range(len(batch_img_metas)):\n                # add positive information in InstanceData to predict\n                # mask results in `mask_head`.\n                proposals = bbox_results['detached_proposals'][img_id]\n                topk_inds = topk_inds_list[img_id]\n                attn_feats = bbox_results['attn_feats'][img_id]\n\n                results_list[img_id].proposals = proposals\n                results_list[img_id].topk_inds = topk_inds\n                results_list[img_id].attn_feats = attn_feats\n        return results_list\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     results_list: InstanceList,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image. Each item usually contains following keys:\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - proposal (Tensor): Bboxes predicted from bbox_head,\n                  has a shape (num_instances, 4).\n                - topk_inds (Tensor): Topk indices of each image, has\n                  shape (num_instances, )\n                - attn_feats (Tensor): Intermediate feature get from the last\n                  diihead, has shape (num_instances, feature_dimensions)\n\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n            - scores (Tensor): Classification scores, has a shape\n              (num_instance, )\n            - labels (Tensor): Labels of bboxes, has a shape\n              (num_instances, ).\n            - bboxes (Tensor): Has a shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n            - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        proposal_list = [res.pop('proposals') for res in results_list]\n        topk_inds_list = [res.pop('topk_inds') for res in results_list]\n        attn_feats = torch.cat(\n            [res.pop('attn_feats')[None, ...] for res in results_list])\n\n        rois = bbox2roi(proposal_list)\n\n        if rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas,\n                rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        last_stage = self.num_stages - 1\n        mask_results = self._mask_forward(last_stage, x, rois, attn_feats)\n\n        num_imgs = len(batch_img_metas)\n        mask_results['mask_preds'] = mask_results['mask_preds'].reshape(\n            num_imgs, -1, *mask_results['mask_preds'].size()[1:])\n        num_classes = self.bbox_head[-1].num_classes\n\n        mask_preds = []\n        for img_id in range(num_imgs):\n            topk_inds = topk_inds_list[img_id]\n            masks_per_img = mask_results['mask_preds'][img_id].flatten(\n                0, 1)[topk_inds]\n            masks_per_img = masks_per_img[:, None,\n                                          ...].repeat(1, num_classes, 1, 1)\n            mask_preds.append(masks_per_img)\n        results_list = self.mask_head[-1].predict_by_feat(\n            mask_preds,\n            results_list,\n            batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale)\n\n        return results_list\n\n    # TODO: Need to refactor later\n    def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n                batch_data_samples: SampleList) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (List[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (List[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        outputs = unpack_gt_instances(batch_data_samples)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = outputs\n\n        all_stage_bbox_results = []\n        object_feats = torch.cat(\n            [res.pop('features')[None, ...] for res in rpn_results_list])\n        results_list = rpn_results_list\n        if self.with_bbox:\n            for stage in range(self.num_stages):\n                bbox_results = self.bbox_loss(\n                    stage=stage,\n                    x=x,\n                    results_list=results_list,\n                    object_feats=object_feats,\n                    batch_img_metas=batch_img_metas,\n                    batch_gt_instances=batch_gt_instances)\n                bbox_results.pop('loss_bbox')\n                # torch.jit does not support obj:SamplingResult\n                bbox_results.pop('results_list')\n                bbox_res = bbox_results.copy()\n                bbox_res.pop('sampling_results')\n                all_stage_bbox_results.append((bbox_res, ))\n\n                if self.with_mask:\n                    attn_feats = bbox_results['attn_feats']\n                    sampling_results = bbox_results['sampling_results']\n\n                    pos_rois = bbox2roi(\n                        [res.pos_priors for res in sampling_results])\n\n                    attn_feats = torch.cat([\n                        feats[res.pos_inds]\n                        for (feats, res) in zip(attn_feats, sampling_results)\n                    ])\n                    mask_results = self._mask_forward(stage, x, pos_rois,\n                                                      attn_feats)\n                    all_stage_bbox_results[-1] += (mask_results, )\n        return tuple(all_stage_bbox_results)\n"
  },
  {
    "path": "mmdet/models/roi_heads/standard_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import DetDataSample, SampleList\nfrom mmdet.structures.bbox import bbox2roi\nfrom mmdet.utils import ConfigType, InstanceList\nfrom ..task_modules.samplers import SamplingResult\nfrom ..utils import empty_instances, unpack_gt_instances\nfrom .base_roi_head import BaseRoIHead\n\n\n@MODELS.register_module()\nclass StandardRoIHead(BaseRoIHead):\n    \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n\n    def init_assigner_sampler(self) -> None:\n        \"\"\"Initialize assigner and sampler.\"\"\"\n        self.bbox_assigner = None\n        self.bbox_sampler = None\n        if self.train_cfg:\n            self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner)\n            self.bbox_sampler = TASK_UTILS.build(\n                self.train_cfg.sampler, default_args=dict(context=self))\n\n    def init_bbox_head(self, bbox_roi_extractor: ConfigType,\n                       bbox_head: ConfigType) -> None:\n        \"\"\"Initialize box head and box roi extractor.\n\n        Args:\n            bbox_roi_extractor (dict or ConfigDict): Config of box\n                roi extractor.\n            bbox_head (dict or ConfigDict): Config of box in box head.\n        \"\"\"\n        self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor)\n        self.bbox_head = MODELS.build(bbox_head)\n\n    def init_mask_head(self, mask_roi_extractor: ConfigType,\n                       mask_head: ConfigType) -> None:\n        \"\"\"Initialize mask head and mask roi extractor.\n\n        Args:\n            mask_roi_extractor (dict or ConfigDict): Config of mask roi\n                extractor.\n            mask_head (dict or ConfigDict): Config of mask in mask head.\n        \"\"\"\n        if mask_roi_extractor is not None:\n            self.mask_roi_extractor = MODELS.build(mask_roi_extractor)\n            self.share_roi_extractor = False\n        else:\n            self.share_roi_extractor = True\n            self.mask_roi_extractor = self.bbox_roi_extractor\n        self.mask_head = MODELS.build(mask_head)\n\n    # TODO: Need to refactor later\n    def forward(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList = None) -> tuple:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n        Args:\n            x (List[Tensor]): Multi-level features that may have different\n                resolutions.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): Each item contains\n            the meta information of each image and corresponding\n            annotations.\n\n        Returns\n            tuple: A tuple of features from ``bbox_head`` and ``mask_head``\n            forward.\n        \"\"\"\n        results = ()\n        proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]\n        rois = bbox2roi(proposals)\n        # bbox head\n        if self.with_bbox:\n            bbox_results = self._bbox_forward(x, rois)\n            results = results + (bbox_results['cls_score'],\n                                 bbox_results['bbox_pred'])\n        # mask head\n        if self.with_mask:\n            mask_rois = rois[:100]\n            mask_results = self._mask_forward(x, mask_rois)\n            results = results + (mask_results['mask_preds'], )\n        return results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: List[DetDataSample]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        assert len(rpn_results_list) == len(batch_data_samples)\n        outputs = unpack_gt_instances(batch_data_samples)\n        batch_gt_instances, batch_gt_instances_ignore, _ = outputs\n\n        # assign gts and sample proposals\n        num_imgs = len(batch_data_samples)\n        sampling_results = []\n        for i in range(num_imgs):\n            # rename rpn_results.bboxes to rpn_results.priors\n            rpn_results = rpn_results_list[i]\n            rpn_results.priors = rpn_results.pop('bboxes')\n\n            assign_result = self.bbox_assigner.assign(\n                rpn_results, batch_gt_instances[i],\n                batch_gt_instances_ignore[i])\n            sampling_result = self.bbox_sampler.sample(\n                assign_result,\n                rpn_results,\n                batch_gt_instances[i],\n                feats=[lvl_feat[i][None] for lvl_feat in x])\n            sampling_results.append(sampling_result)\n\n        losses = dict()\n        # bbox head loss\n        if self.with_bbox:\n            bbox_results = self.bbox_loss(x, sampling_results)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self.mask_loss(x, sampling_results,\n                                          bbox_results['bbox_feats'],\n                                          batch_gt_instances)\n            losses.update(mask_results['loss_mask'])\n\n        return losses\n\n    def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        # TODO: a more flexible way to decide which feature maps to use\n        bbox_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs], rois)\n        if self.with_shared_head:\n            bbox_feats = self.shared_head(bbox_feats)\n        cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n        return bbox_results\n\n    def bbox_loss(self, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult]) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the bbox head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n                - `loss_bbox` (dict): A dictionary of bbox loss components.\n        \"\"\"\n        rois = bbox2roi([res.priors for res in sampling_results])\n        bbox_results = self._bbox_forward(x, rois)\n\n        bbox_loss_and_target = self.bbox_head.loss_and_target(\n            cls_score=bbox_results['cls_score'],\n            bbox_pred=bbox_results['bbox_pred'],\n            rois=rois,\n            sampling_results=sampling_results,\n            rcnn_train_cfg=self.train_cfg)\n\n        bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])\n        return bbox_results\n\n    def mask_loss(self, x: Tuple[Tensor],\n                  sampling_results: List[SamplingResult], bbox_feats: Tensor,\n                  batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the mask head on\n        the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            bbox_feats (Tensor): Extract bbox RoI features.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `mask_feats` (Tensor): Extract mask RoI features.\n                - `mask_targets` (Tensor): Mask target of each positive\\\n                    proposals in the image.\n                - `loss_mask` (dict): A dictionary of mask loss components.\n        \"\"\"\n        if not self.share_roi_extractor:\n            pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n            mask_results = self._mask_forward(x, pos_rois)\n        else:\n            pos_inds = []\n            device = bbox_feats.device\n            for res in sampling_results:\n                pos_inds.append(\n                    torch.ones(\n                        res.pos_priors.shape[0],\n                        device=device,\n                        dtype=torch.uint8))\n                pos_inds.append(\n                    torch.zeros(\n                        res.neg_priors.shape[0],\n                        device=device,\n                        dtype=torch.uint8))\n            pos_inds = torch.cat(pos_inds)\n\n            mask_results = self._mask_forward(\n                x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n        mask_loss_and_target = self.mask_head.loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=self.train_cfg)\n\n        mask_results.update(loss_mask=mask_loss_and_target['loss_mask'])\n        return mask_results\n\n    def _mask_forward(self,\n                      x: Tuple[Tensor],\n                      rois: Tensor = None,\n                      pos_inds: Optional[Tensor] = None,\n                      bbox_feats: Optional[Tensor] = None) -> dict:\n        \"\"\"Mask head forward function used in both training and testing.\n\n        Args:\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n            pos_inds (Tensor, optional): Indices of positive samples.\n                Defaults to None.\n            bbox_feats (Tensor): Extract bbox RoI features. Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `mask_feats` (Tensor): Extract mask RoI features.\n        \"\"\"\n        assert ((rois is not None) ^\n                (pos_inds is not None and bbox_feats is not None))\n        if rois is not None:\n            mask_feats = self.mask_roi_extractor(\n                x[:self.mask_roi_extractor.num_inputs], rois)\n            if self.with_shared_head:\n                mask_feats = self.shared_head(mask_feats)\n        else:\n            assert bbox_feats is not None\n            mask_feats = bbox_feats[pos_inds]\n\n        mask_preds = self.mask_head(mask_feats)\n        mask_results = dict(mask_preds=mask_preds, mask_feats=mask_feats)\n        return mask_results\n\n    def predict_bbox(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     rpn_results_list: InstanceList,\n                     rcnn_test_cfg: ConfigType,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the bbox head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        proposals = [res.bboxes for res in rpn_results_list]\n        rois = bbox2roi(proposals)\n\n        if rois.shape[0] == 0:\n            return empty_instances(\n                batch_img_metas,\n                rois.device,\n                task_type='bbox',\n                box_type=self.bbox_head.predict_box_type,\n                num_classes=self.bbox_head.num_classes,\n                score_per_cls=rcnn_test_cfg is None)\n\n        bbox_results = self._bbox_forward(x, rois)\n\n        # split batch bbox prediction back to each image\n        cls_scores = bbox_results['cls_score']\n        bbox_preds = bbox_results['bbox_pred']\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = rois.split(num_proposals_per_img, 0)\n        cls_scores = cls_scores.split(num_proposals_per_img, 0)\n\n        # some detector with_reg is False, bbox_preds will be None\n        if bbox_preds is not None:\n            # TODO move this to a sabl_roi_head\n            # the bbox prediction of some detectors like SABL is not Tensor\n            if isinstance(bbox_preds, torch.Tensor):\n                bbox_preds = bbox_preds.split(num_proposals_per_img, 0)\n            else:\n                bbox_preds = self.bbox_head.bbox_pred_split(\n                    bbox_preds, num_proposals_per_img)\n        else:\n            bbox_preds = (None, ) * len(proposals)\n\n        result_list = self.bbox_head.predict_by_feat(\n            rois=rois,\n            cls_scores=cls_scores,\n            bbox_preds=bbox_preds,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=rcnn_test_cfg,\n            rescale=rescale)\n        return result_list\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     results_list: InstanceList,\n                     rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        # don't need to consider aug_test.\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas,\n                mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        mask_results = self._mask_forward(x, mask_rois)\n        mask_preds = mask_results['mask_preds']\n        # split batch mask prediction back to each image\n        num_mask_rois_per_img = [len(res) for res in results_list]\n        mask_preds = mask_preds.split(num_mask_rois_per_img, 0)\n\n        # TODO: Handle the case where rescale is false\n        results_list = self.mask_head.predict_by_feat(\n            mask_preds=mask_preds,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale)\n        return results_list\n"
  },
  {
    "path": "mmdet/models/roi_heads/test_mixins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# TODO: delete this file after refactor\nimport sys\n\nimport torch\n\nfrom mmdet.models.layers import multiclass_nms\nfrom mmdet.models.test_time_augs import merge_aug_bboxes, merge_aug_masks\nfrom mmdet.structures.bbox import bbox2roi, bbox_mapping\n\nif sys.version_info >= (3, 7):\n    from mmdet.utils.contextmanagers import completed\n\n\nclass BBoxTestMixin:\n\n    if sys.version_info >= (3, 7):\n        # TODO: Currently not supported\n        async def async_test_bboxes(self,\n                                    x,\n                                    img_metas,\n                                    proposals,\n                                    rcnn_test_cfg,\n                                    rescale=False,\n                                    **kwargs):\n            \"\"\"Asynchronized test for box head without augmentation.\"\"\"\n            rois = bbox2roi(proposals)\n            roi_feats = self.bbox_roi_extractor(\n                x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n            if self.with_shared_head:\n                roi_feats = self.shared_head(roi_feats)\n            sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\n\n            async with completed(\n                    __name__, 'bbox_head_forward',\n                    sleep_interval=sleep_interval):\n                cls_score, bbox_pred = self.bbox_head(roi_feats)\n\n            img_shape = img_metas[0]['img_shape']\n            scale_factor = img_metas[0]['scale_factor']\n            det_bboxes, det_labels = self.bbox_head.get_bboxes(\n                rois,\n                cls_score,\n                bbox_pred,\n                img_shape,\n                scale_factor,\n                rescale=rescale,\n                cfg=rcnn_test_cfg)\n            return det_bboxes, det_labels\n\n    # TODO: Currently not supported\n    def aug_test_bboxes(self, feats, img_metas, rpn_results_list,\n                        rcnn_test_cfg):\n        \"\"\"Test det bboxes with test time augmentation.\"\"\"\n        aug_bboxes = []\n        aug_scores = []\n        for x, img_meta in zip(feats, img_metas):\n            # only one image in the batch\n            img_shape = img_meta[0]['img_shape']\n            scale_factor = img_meta[0]['scale_factor']\n            flip = img_meta[0]['flip']\n            flip_direction = img_meta[0]['flip_direction']\n            # TODO more flexible\n            proposals = bbox_mapping(rpn_results_list[0][:, :4], img_shape,\n                                     scale_factor, flip, flip_direction)\n            rois = bbox2roi([proposals])\n            bbox_results = self.bbox_forward(x, rois)\n            bboxes, scores = self.bbox_head.get_bboxes(\n                rois,\n                bbox_results['cls_score'],\n                bbox_results['bbox_pred'],\n                img_shape,\n                scale_factor,\n                rescale=False,\n                cfg=None)\n            aug_bboxes.append(bboxes)\n            aug_scores.append(scores)\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n        if merged_bboxes.shape[0] == 0:\n            # There is no proposal in the single image\n            det_bboxes = merged_bboxes.new_zeros(0, 5)\n            det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long)\n        else:\n            det_bboxes, det_labels = multiclass_nms(merged_bboxes,\n                                                    merged_scores,\n                                                    rcnn_test_cfg.score_thr,\n                                                    rcnn_test_cfg.nms,\n                                                    rcnn_test_cfg.max_per_img)\n        return det_bboxes, det_labels\n\n\nclass MaskTestMixin:\n\n    if sys.version_info >= (3, 7):\n        # TODO: Currently not supported\n        async def async_test_mask(self,\n                                  x,\n                                  img_metas,\n                                  det_bboxes,\n                                  det_labels,\n                                  rescale=False,\n                                  mask_test_cfg=None):\n            \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\n            # image shape of the first image in the batch (only one)\n            ori_shape = img_metas[0]['ori_shape']\n            scale_factor = img_metas[0]['scale_factor']\n            if det_bboxes.shape[0] == 0:\n                segm_result = [[] for _ in range(self.mask_head.num_classes)]\n            else:\n                if rescale and not isinstance(scale_factor,\n                                              (float, torch.Tensor)):\n                    scale_factor = det_bboxes.new_tensor(scale_factor)\n                _bboxes = (\n                    det_bboxes[:, :4] *\n                    scale_factor if rescale else det_bboxes)\n                mask_rois = bbox2roi([_bboxes])\n                mask_feats = self.mask_roi_extractor(\n                    x[:len(self.mask_roi_extractor.featmap_strides)],\n                    mask_rois)\n\n                if self.with_shared_head:\n                    mask_feats = self.shared_head(mask_feats)\n                if mask_test_cfg and \\\n                        mask_test_cfg.get('async_sleep_interval'):\n                    sleep_interval = mask_test_cfg['async_sleep_interval']\n                else:\n                    sleep_interval = 0.035\n                async with completed(\n                        __name__,\n                        'mask_head_forward',\n                        sleep_interval=sleep_interval):\n                    mask_pred = self.mask_head(mask_feats)\n                segm_result = self.mask_head.get_results(\n                    mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\n                    scale_factor, rescale)\n            return segm_result\n\n    # TODO: Currently not supported\n    def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n        \"\"\"Test for mask head with test time augmentation.\"\"\"\n        if det_bboxes.shape[0] == 0:\n            segm_result = [[] for _ in range(self.mask_head.num_classes)]\n        else:\n            aug_masks = []\n            for x, img_meta in zip(feats, img_metas):\n                img_shape = img_meta[0]['img_shape']\n                scale_factor = img_meta[0]['scale_factor']\n                flip = img_meta[0]['flip']\n                flip_direction = img_meta[0]['flip_direction']\n                _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n                                       scale_factor, flip, flip_direction)\n                mask_rois = bbox2roi([_bboxes])\n                mask_results = self._mask_forward(x, mask_rois)\n                # convert to numpy array to save memory\n                aug_masks.append(\n                    mask_results['mask_pred'].sigmoid().cpu().numpy())\n            merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n            ori_shape = img_metas[0][0]['ori_shape']\n            scale_factor = det_bboxes.new_ones(4)\n            segm_result = self.mask_head.get_results(\n                merged_masks,\n                det_bboxes,\n                det_labels,\n                self.test_cfg,\n                ori_shape,\n                scale_factor=scale_factor,\n                rescale=False)\n        return segm_result\n"
  },
  {
    "path": "mmdet/models/roi_heads/trident_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch\nfrom mmcv.ops import batched_nms\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import InstanceList\nfrom .standard_roi_head import StandardRoIHead\n\n\n@MODELS.register_module()\nclass TridentRoIHead(StandardRoIHead):\n    \"\"\"Trident roi head.\n\n    Args:\n        num_branch (int): Number of branches in TridentNet.\n        test_branch_idx (int): In inference, all 3 branches will be used\n            if `test_branch_idx==-1`, otherwise only branch with index\n            `test_branch_idx` will be used.\n    \"\"\"\n\n    def __init__(self, num_branch: int, test_branch_idx: int,\n                 **kwargs) -> None:\n        self.num_branch = num_branch\n        self.test_branch_idx = test_branch_idx\n        super().__init__(**kwargs)\n\n    def merge_trident_bboxes(self,\n                             trident_results: InstanceList) -> InstanceData:\n        \"\"\"Merge bbox predictions of each branch.\n\n        Args:\n            trident_results (List[:obj:`InstanceData`]): A list of InstanceData\n                predicted from every branch.\n\n        Returns:\n            :obj:`InstanceData`: merged InstanceData.\n        \"\"\"\n        bboxes = torch.cat([res.bboxes for res in trident_results])\n        scores = torch.cat([res.scores for res in trident_results])\n        labels = torch.cat([res.labels for res in trident_results])\n\n        nms_cfg = self.test_cfg['nms']\n        results = InstanceData()\n        if bboxes.numel() == 0:\n            results.bboxes = bboxes\n            results.scores = scores\n            results.labels = labels\n        else:\n            det_bboxes, keep = batched_nms(bboxes, scores, labels, nms_cfg)\n            results.bboxes = det_bboxes[:, :-1]\n            results.scores = det_bboxes[:, -1]\n            results.labels = labels[keep]\n\n        if self.test_cfg['max_per_img'] > 0:\n            results = results[:self.test_cfg['max_per_img']]\n        return results\n\n    def predict(self,\n                x: Tuple[Tensor],\n                rpn_results_list: InstanceList,\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the roi head and predict detection\n        results on the features of the upstream network.\n\n        - Compute prediction bbox and label per branch.\n        - Merge predictions of each branch according to scores of\n          bboxes, i.e., bboxes with higher score are kept to give\n          top-k prediction.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (N, C, H, W).\n            rpn_results_list (list[:obj:`InstanceData`]): list of region\n                proposals.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results to\n                the original image. Defaults to True.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        results_list = super().predict(\n            x=x,\n            rpn_results_list=rpn_results_list,\n            batch_data_samples=batch_data_samples,\n            rescale=rescale)\n\n        num_branch = self.num_branch \\\n            if self.training or self.test_branch_idx == -1 else 1\n\n        merged_results_list = []\n        for i in range(len(batch_data_samples) // num_branch):\n            merged_results_list.append(\n                self.merge_trident_bboxes(results_list[i * num_branch:(i + 1) *\n                                                       num_branch]))\n        return merged_results_list\n"
  },
  {
    "path": "mmdet/models/seg_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .panoptic_fpn_head import PanopticFPNHead  # noqa: F401,F403\nfrom .panoptic_fusion_heads import *  # noqa: F401,F403\n"
  },
  {
    "path": "mmdet/models/seg_heads/base_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Dict, List, Tuple, Union\n\nimport torch.nn.functional as F\nfrom mmengine.model import BaseModule\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptMultiConfig\n\n\n@MODELS.register_module()\nclass BaseSemanticHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base module of Semantic Head.\n\n    Args:\n        num_classes (int): the number of classes.\n        seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,\n            which equals to ``1 / output_strides``. The output_strides is\n            for ``seg_preds``. Defaults to  1 / 4.\n        init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization\n            config.\n        loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic\n            head.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 seg_rescale_factor: float = 1 / 4.,\n                 loss_seg: ConfigType = dict(\n                     type='CrossEntropyLoss',\n                     ignore_index=255,\n                     loss_weight=1.0),\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.loss_seg = MODELS.build(loss_seg)\n        self.num_classes = num_classes\n        self.seg_rescale_factor = seg_rescale_factor\n\n    @abstractmethod\n    def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:\n        \"\"\"Placeholder of forward function.\n\n        Args:\n            x (Tensor): Feature maps.\n\n        Returns:\n            Dict[str, Tensor]: A dictionary, including features\n                and predicted scores. Required keys: 'seg_preds'\n                and 'feats'.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def loss(self, x: Union[Tensor, Tuple[Tensor]],\n             batch_data_samples: SampleList) -> Dict[str, Tensor]:\n        \"\"\"\n        Args:\n            x (Union[Tensor, Tuple[Tensor]]): Feature maps.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Args:\n            x (Tensor): Feature maps.\n\n        Returns:\n            Dict[str, Tensor]: The loss of semantic head.\n        \"\"\"\n        pass\n\n    def predict(self,\n                x: Union[Tensor, Tuple[Tensor]],\n                batch_img_metas: List[dict],\n                rescale: bool = False) -> List[Tensor]:\n        \"\"\"Test without Augmentation.\n\n        Args:\n            x (Union[Tensor, Tuple[Tensor]]): Feature maps.\n            batch_img_metas (List[dict]): List of image information.\n            rescale (bool): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[Tensor]: semantic segmentation logits.\n        \"\"\"\n        seg_preds = self.forward(x)['seg_preds']\n        seg_preds = F.interpolate(\n            seg_preds,\n            size=batch_img_metas[0]['batch_input_shape'],\n            mode='bilinear',\n            align_corners=False)\n        seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]\n\n        if rescale:\n            seg_pred_list = []\n            for i in range(len(batch_img_metas)):\n                h, w = batch_img_metas[i]['img_shape']\n                seg_pred = seg_preds[i][:, :h, :w]\n\n                h, w = batch_img_metas[i]['ori_shape']\n                seg_pred = F.interpolate(\n                    seg_pred[None],\n                    size=(h, w),\n                    mode='bilinear',\n                    align_corners=False)[0]\n                seg_pred_list.append(seg_pred)\n        else:\n            seg_pred_list = seg_preds\n\n        return seg_pred_list\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.model import ModuleList\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\nfrom ..layers import ConvUpsample\nfrom ..utils import interpolate_as\nfrom .base_semantic_head import BaseSemanticHead\n\n\n@MODELS.register_module()\nclass PanopticFPNHead(BaseSemanticHead):\n    \"\"\"PanopticFPNHead used in Panoptic FPN.\n\n    In this head, the number of output channels is ``num_stuff_classes\n    + 1``, including all stuff classes and one thing class. The stuff\n    classes will be reset from ``0`` to ``num_stuff_classes - 1``, the\n    thing classes will be merged to ``num_stuff_classes``-th channel.\n\n    Arg:\n        num_things_classes (int): Number of thing classes. Default: 80.\n        num_stuff_classes (int): Number of stuff classes. Default: 53.\n        in_channels (int): Number of channels in the input feature\n            map.\n        inner_channels (int): Number of channels in inner features.\n        start_level (int): The start level of the input features\n            used in PanopticFPN.\n        end_level (int): The end level of the used features, the\n            ``end_level``-th layer will not be used.\n        conv_cfg (Optional[Union[ConfigDict, dict]]): Dictionary to construct\n            and config conv layer.\n        norm_cfg (Union[ConfigDict, dict]): Dictionary to construct and config\n            norm layer. Use ``GN`` by default.\n        init_cfg (Optional[Union[ConfigDict, dict]]): Initialization config\n            dict.\n        loss_seg (Union[ConfigDict, dict]): the loss of the semantic head.\n    \"\"\"\n\n    def __init__(self,\n                 num_things_classes: int = 80,\n                 num_stuff_classes: int = 53,\n                 in_channels: int = 256,\n                 inner_channels: int = 128,\n                 start_level: int = 0,\n                 end_level: int = 4,\n                 conv_cfg: OptConfigType = None,\n                 norm_cfg: ConfigType = dict(\n                     type='GN', num_groups=32, requires_grad=True),\n                 loss_seg: ConfigType = dict(\n                     type='CrossEntropyLoss', ignore_index=-1,\n                     loss_weight=1.0),\n                 init_cfg: OptMultiConfig = None) -> None:\n        seg_rescale_factor = 1 / 2**(start_level + 2)\n        super().__init__(\n            num_classes=num_stuff_classes + 1,\n            seg_rescale_factor=seg_rescale_factor,\n            loss_seg=loss_seg,\n            init_cfg=init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        # Used feature layers are [start_level, end_level)\n        self.start_level = start_level\n        self.end_level = end_level\n        self.num_stages = end_level - start_level\n        self.inner_channels = inner_channels\n\n        self.conv_upsample_layers = ModuleList()\n        for i in range(start_level, end_level):\n            self.conv_upsample_layers.append(\n                ConvUpsample(\n                    in_channels,\n                    inner_channels,\n                    num_layers=i if i > 0 else 1,\n                    num_upsample=i if i > 0 else 0,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                ))\n        self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1)\n\n    def _set_things_to_void(self, gt_semantic_seg: Tensor) -> Tensor:\n        \"\"\"Merge thing classes to one class.\n\n        In PanopticFPN, the background labels will be reset from `0` to\n        `self.num_stuff_classes-1`, the foreground labels will be merged to\n        `self.num_stuff_classes`-th channel.\n        \"\"\"\n        gt_semantic_seg = gt_semantic_seg.int()\n        fg_mask = gt_semantic_seg < self.num_things_classes\n        bg_mask = (gt_semantic_seg >= self.num_things_classes) * (\n            gt_semantic_seg < self.num_things_classes + self.num_stuff_classes)\n\n        new_gt_seg = torch.clone(gt_semantic_seg)\n        new_gt_seg = torch.where(bg_mask,\n                                 gt_semantic_seg - self.num_things_classes,\n                                 new_gt_seg)\n        new_gt_seg = torch.where(fg_mask,\n                                 fg_mask.int() * self.num_stuff_classes,\n                                 new_gt_seg)\n        return new_gt_seg\n\n    def loss(self, x: Union[Tensor, Tuple[Tensor]],\n             batch_data_samples: SampleList) -> Dict[str, Tensor]:\n        \"\"\"\n        Args:\n            x (Union[Tensor, Tuple[Tensor]]): Feature maps.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            Dict[str, Tensor]: The loss of semantic head.\n        \"\"\"\n        seg_preds = self(x)['seg_preds']\n        gt_semantic_segs = [\n            data_sample.gt_sem_seg.sem_seg\n            for data_sample in batch_data_samples\n        ]\n\n        gt_semantic_segs = torch.stack(gt_semantic_segs)\n        if self.seg_rescale_factor != 1.0:\n            gt_semantic_segs = F.interpolate(\n                gt_semantic_segs.float(),\n                scale_factor=self.seg_rescale_factor,\n                mode='nearest').squeeze(1)\n\n        # Things classes will be merged to one class in PanopticFPN.\n        gt_semantic_segs = self._set_things_to_void(gt_semantic_segs)\n\n        if seg_preds.shape[-2:] != gt_semantic_segs.shape[-2:]:\n            seg_preds = interpolate_as(seg_preds, gt_semantic_segs)\n        seg_preds = seg_preds.permute((0, 2, 3, 1))\n\n        loss_seg = self.loss_seg(\n            seg_preds.reshape(-1, self.num_classes),  # => [NxHxW, C]\n            gt_semantic_segs.reshape(-1).long())\n\n        return dict(loss_seg=loss_seg)\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights.\"\"\"\n        super().init_weights()\n        nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)\n        self.conv_logits.bias.data.zero_()\n\n    def forward(self, x: Tuple[Tensor]) -> Dict[str, Tensor]:\n        \"\"\"Forward.\n\n        Args:\n            x (Tuple[Tensor]): Multi scale Feature maps.\n\n        Returns:\n            dict[str, Tensor]: semantic segmentation predictions and\n                feature maps.\n        \"\"\"\n        # the number of subnets must be not more than\n        # the length of features.\n        assert self.num_stages <= len(x)\n\n        feats = []\n        for i, layer in enumerate(self.conv_upsample_layers):\n            f = layer(x[self.start_level + i])\n            feats.append(f)\n\n        seg_feats = torch.sum(torch.stack(feats, dim=0), dim=0)\n        seg_preds = self.conv_logits(seg_feats)\n        out = dict(seg_preds=seg_preds, seg_feats=seg_feats)\n        return out\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_panoptic_fusion_head import \\\n    BasePanopticFusionHead  # noqa: F401,F403\nfrom .heuristic_fusion_head import HeuristicFusionHead  # noqa: F401,F403\nfrom .maskformer_fusion_head import MaskFormerFusionHead  # noqa: F401,F403\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\n\n\n@MODELS.register_module()\nclass BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for panoptic heads.\"\"\"\n\n    def __init__(self,\n                 num_things_classes: int = 80,\n                 num_stuff_classes: int = 53,\n                 test_cfg: OptConfigType = None,\n                 loss_panoptic: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        super().__init__(init_cfg=init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        self.num_classes = num_things_classes + num_stuff_classes\n        self.test_cfg = test_cfg\n\n        if loss_panoptic:\n            self.loss_panoptic = MODELS.build(loss_panoptic)\n        else:\n            self.loss_panoptic = None\n\n    @property\n    def with_loss(self) -> bool:\n        \"\"\"bool: whether the panoptic head contains loss function.\"\"\"\n        return self.loss_panoptic is not None\n\n    @abstractmethod\n    def loss(self, **kwargs):\n        \"\"\"Loss function.\"\"\"\n\n    @abstractmethod\n    def predict(self, **kwargs):\n        \"\"\"Predict function.\"\"\"\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nfrom mmengine.structures import InstanceData, PixelData\nfrom torch import Tensor\n\nfrom mmdet.evaluation.functional import INSTANCE_OFFSET\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import InstanceList, OptConfigType, OptMultiConfig, PixelList\nfrom .base_panoptic_fusion_head import BasePanopticFusionHead\n\n\n@MODELS.register_module()\nclass HeuristicFusionHead(BasePanopticFusionHead):\n    \"\"\"Fusion Head with Heuristic method.\"\"\"\n\n    def __init__(self,\n                 num_things_classes: int = 80,\n                 num_stuff_classes: int = 53,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        super().__init__(\n            num_things_classes=num_things_classes,\n            num_stuff_classes=num_stuff_classes,\n            test_cfg=test_cfg,\n            loss_panoptic=None,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def loss(self, **kwargs) -> dict:\n        \"\"\"HeuristicFusionHead has no training loss.\"\"\"\n        return dict()\n\n    def _lay_masks(self,\n                   mask_results: InstanceData,\n                   overlap_thr: float = 0.5) -> Tensor:\n        \"\"\"Lay instance masks to a result map.\n\n        Args:\n            mask_results (:obj:`InstanceData`): Instance segmentation results,\n                each contains ``bboxes``, ``labels``, ``scores`` and ``masks``.\n            overlap_thr (float): Threshold to determine whether two masks\n                overlap. default: 0.5.\n\n        Returns:\n            Tensor: The result map, (H, W).\n        \"\"\"\n        bboxes = mask_results.bboxes\n        scores = mask_results.scores\n        labels = mask_results.labels\n        masks = mask_results.masks\n\n        num_insts = bboxes.shape[0]\n        id_map = torch.zeros(\n            masks.shape[-2:], device=bboxes.device, dtype=torch.long)\n        if num_insts == 0:\n            return id_map, labels\n\n        # Sort by score to use heuristic fusion\n        order = torch.argsort(-scores)\n        bboxes = bboxes[order]\n        labels = labels[order]\n        segm_masks = masks[order]\n\n        instance_id = 1\n        left_labels = []\n        for idx in range(bboxes.shape[0]):\n            _cls = labels[idx]\n            _mask = segm_masks[idx]\n            instance_id_map = torch.ones_like(\n                _mask, dtype=torch.long) * instance_id\n            area = _mask.sum()\n            if area == 0:\n                continue\n\n            pasted = id_map > 0\n            intersect = (_mask * pasted).sum()\n            if (intersect / (area + 1e-5)) > overlap_thr:\n                continue\n\n            _part = _mask * (~pasted)\n            id_map = torch.where(_part, instance_id_map, id_map)\n            left_labels.append(_cls)\n            instance_id += 1\n\n        if len(left_labels) > 0:\n            instance_labels = torch.stack(left_labels)\n        else:\n            instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)\n        assert instance_id == (len(instance_labels) + 1)\n        return id_map, instance_labels\n\n    def _predict_single(self, mask_results: InstanceData, seg_preds: Tensor,\n                        **kwargs) -> PixelData:\n        \"\"\"Fuse the results of instance and semantic segmentations.\n\n        Args:\n            mask_results (:obj:`InstanceData`): Instance segmentation results,\n                each contains ``bboxes``, ``labels``, ``scores`` and ``masks``.\n            seg_preds (Tensor): The semantic segmentation results,\n                (num_stuff + 1, H, W).\n\n        Returns:\n            Tensor: The panoptic segmentation result, (H, W).\n        \"\"\"\n        id_map, labels = self._lay_masks(mask_results,\n                                         self.test_cfg.mask_overlap)\n\n        seg_results = seg_preds.argmax(dim=0)\n        seg_results = seg_results + self.num_things_classes\n\n        pan_results = seg_results\n        instance_id = 1\n        for idx in range(len(mask_results)):\n            _mask = id_map == (idx + 1)\n            if _mask.sum() == 0:\n                continue\n            _cls = labels[idx]\n            # simply trust detection\n            segment_id = _cls + instance_id * INSTANCE_OFFSET\n            pan_results[_mask] = segment_id\n            instance_id += 1\n\n        ids, counts = torch.unique(\n            pan_results % INSTANCE_OFFSET, return_counts=True)\n        stuff_ids = ids[ids >= self.num_things_classes]\n        stuff_counts = counts[ids >= self.num_things_classes]\n        ignore_stuff_ids = stuff_ids[\n            stuff_counts < self.test_cfg.stuff_area_limit]\n\n        assert pan_results.ndim == 2\n        pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(\n            1, 1, -1)).any(dim=2)] = self.num_classes\n\n        pan_results = PixelData(sem_seg=pan_results[None].int())\n        return pan_results\n\n    def predict(self, mask_results_list: InstanceList,\n                seg_preds_list: List[Tensor], **kwargs) -> PixelList:\n        \"\"\"Predict results by fusing the results of instance and semantic\n        segmentations.\n\n        Args:\n            mask_results_list (list[:obj:`InstanceData`]): Instance\n                segmentation results, each contains ``bboxes``, ``labels``,\n                ``scores`` and ``masks``.\n            seg_preds_list (Tensor): List of semantic segmentation results.\n\n        Returns:\n            List[PixelData]: Panoptic segmentation result.\n        \"\"\"\n        results_list = [\n            self._predict_single(mask_results_list[i], seg_preds_list[i])\n            for i in range(len(mask_results_list))\n        ]\n\n        return results_list\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData, PixelData\nfrom torch import Tensor\n\nfrom mmdet.evaluation.functional import INSTANCE_OFFSET\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.mask import mask2bbox\nfrom mmdet.utils import OptConfigType, OptMultiConfig\nfrom .base_panoptic_fusion_head import BasePanopticFusionHead\n\n\n@MODELS.register_module()\nclass MaskFormerFusionHead(BasePanopticFusionHead):\n    \"\"\"MaskFormer fusion head which postprocesses results for panoptic\n    segmentation, instance segmentation and semantic segmentation.\"\"\"\n\n    def __init__(self,\n                 num_things_classes: int = 80,\n                 num_stuff_classes: int = 53,\n                 test_cfg: OptConfigType = None,\n                 loss_panoptic: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs):\n        super().__init__(\n            num_things_classes=num_things_classes,\n            num_stuff_classes=num_stuff_classes,\n            test_cfg=test_cfg,\n            loss_panoptic=loss_panoptic,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def loss(self, **kwargs):\n        \"\"\"MaskFormerFusionHead has no training loss.\"\"\"\n        return dict()\n\n    def panoptic_postprocess(self, mask_cls: Tensor,\n                             mask_pred: Tensor) -> PixelData:\n        \"\"\"Panoptic segmengation inference.\n\n        Args:\n            mask_cls (Tensor): Classfication outputs of shape\n                (num_queries, cls_out_channels) for a image.\n                Note `cls_out_channels` should includes\n                background.\n            mask_pred (Tensor): Mask outputs of shape\n                (num_queries, h, w) for a image.\n\n        Returns:\n            :obj:`PixelData`: Panoptic segment result of shape \\\n                (h, w), each element in Tensor means: \\\n                ``segment_id = _cls + instance_id * INSTANCE_OFFSET``.\n        \"\"\"\n        object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8)\n        iou_thr = self.test_cfg.get('iou_thr', 0.8)\n        filter_low_score = self.test_cfg.get('filter_low_score', False)\n\n        scores, labels = F.softmax(mask_cls, dim=-1).max(-1)\n        mask_pred = mask_pred.sigmoid()\n\n        keep = labels.ne(self.num_classes) & (scores > object_mask_thr)\n        cur_scores = scores[keep]\n        cur_classes = labels[keep]\n        cur_masks = mask_pred[keep]\n\n        cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks\n\n        h, w = cur_masks.shape[-2:]\n        panoptic_seg = torch.full((h, w),\n                                  self.num_classes,\n                                  dtype=torch.int32,\n                                  device=cur_masks.device)\n        if cur_masks.shape[0] == 0:\n            # We didn't detect any mask :(\n            pass\n        else:\n            cur_mask_ids = cur_prob_masks.argmax(0)\n            instance_id = 1\n            for k in range(cur_classes.shape[0]):\n                pred_class = int(cur_classes[k].item())\n                isthing = pred_class < self.num_things_classes\n                mask = cur_mask_ids == k\n                mask_area = mask.sum().item()\n                original_area = (cur_masks[k] >= 0.5).sum().item()\n\n                if filter_low_score:\n                    mask = mask & (cur_masks[k] >= 0.5)\n\n                if mask_area > 0 and original_area > 0:\n                    if mask_area / original_area < iou_thr:\n                        continue\n\n                    if not isthing:\n                        # different stuff regions of same class will be\n                        # merged here, and stuff share the instance_id 0.\n                        panoptic_seg[mask] = pred_class\n                    else:\n                        panoptic_seg[mask] = (\n                            pred_class + instance_id * INSTANCE_OFFSET)\n                        instance_id += 1\n\n        return PixelData(sem_seg=panoptic_seg[None])\n\n    def semantic_postprocess(self, mask_cls: Tensor,\n                             mask_pred: Tensor) -> PixelData:\n        \"\"\"Semantic segmengation postprocess.\n\n        Args:\n            mask_cls (Tensor): Classfication outputs of shape\n                (num_queries, cls_out_channels) for a image.\n                Note `cls_out_channels` should includes\n                background.\n            mask_pred (Tensor): Mask outputs of shape\n                (num_queries, h, w) for a image.\n\n        Returns:\n            :obj:`PixelData`: Semantic segment result.\n        \"\"\"\n        # TODO add semantic segmentation result\n        raise NotImplementedError\n\n    def instance_postprocess(self, mask_cls: Tensor,\n                             mask_pred: Tensor) -> InstanceData:\n        \"\"\"Instance segmengation postprocess.\n\n        Args:\n            mask_cls (Tensor): Classfication outputs of shape\n                (num_queries, cls_out_channels) for a image.\n                Note `cls_out_channels` should includes\n                background.\n            mask_pred (Tensor): Mask outputs of shape\n                (num_queries, h, w) for a image.\n\n        Returns:\n            :obj:`InstanceData`: Instance segmentation results.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        max_per_image = self.test_cfg.get('max_per_image', 100)\n        num_queries = mask_cls.shape[0]\n        # shape (num_queries, num_class)\n        scores = F.softmax(mask_cls, dim=-1)[:, :-1]\n        # shape (num_queries * num_class, )\n        labels = torch.arange(self.num_classes, device=mask_cls.device).\\\n            unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)\n        scores_per_image, top_indices = scores.flatten(0, 1).topk(\n            max_per_image, sorted=False)\n        labels_per_image = labels[top_indices]\n\n        query_indices = top_indices // self.num_classes\n        mask_pred = mask_pred[query_indices]\n\n        # extract things\n        is_thing = labels_per_image < self.num_things_classes\n        scores_per_image = scores_per_image[is_thing]\n        labels_per_image = labels_per_image[is_thing]\n        mask_pred = mask_pred[is_thing]\n\n        mask_pred_binary = (mask_pred > 0).float()\n        mask_scores_per_image = (mask_pred.sigmoid() *\n                                 mask_pred_binary).flatten(1).sum(1) / (\n                                     mask_pred_binary.flatten(1).sum(1) + 1e-6)\n        det_scores = scores_per_image * mask_scores_per_image\n        mask_pred_binary = mask_pred_binary.bool()\n        bboxes = mask2bbox(mask_pred_binary)\n\n        results = InstanceData()\n        results.bboxes = bboxes\n        results.labels = labels_per_image\n        results.scores = det_scores\n        results.masks = mask_pred_binary\n        return results\n\n    def predict(self,\n                mask_cls_results: Tensor,\n                mask_pred_results: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = False,\n                **kwargs) -> List[dict]:\n        \"\"\"Test segment without test-time aumengtation.\n\n        Only the output of last decoder layers was used.\n\n        Args:\n            mask_cls_results (Tensor): Mask classification logits,\n                shape (batch_size, num_queries, cls_out_channels).\n                Note `cls_out_channels` should includes background.\n            mask_pred_results (Tensor): Mask logits, shape\n                (batch_size, num_queries, h, w).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): If True, return boxes in\n                original image space. Default False.\n\n        Returns:\n            list[dict]: Instance segmentation \\\n                results and panoptic segmentation results for each \\\n                image.\n\n            .. code-block:: none\n\n                [\n                    {\n                        'pan_results': PixelData,\n                        'ins_results': InstanceData,\n                        # semantic segmentation results are not supported yet\n                        'sem_results': PixelData\n                    },\n                    ...\n                ]\n        \"\"\"\n        batch_img_metas = [\n            data_sample.metainfo for data_sample in batch_data_samples\n        ]\n        panoptic_on = self.test_cfg.get('panoptic_on', True)\n        semantic_on = self.test_cfg.get('semantic_on', False)\n        instance_on = self.test_cfg.get('instance_on', False)\n        assert not semantic_on, 'segmantic segmentation '\\\n            'results are not supported yet.'\n\n        results = []\n        for mask_cls_result, mask_pred_result, meta in zip(\n                mask_cls_results, mask_pred_results, batch_img_metas):\n            # remove padding\n            img_height, img_width = meta['img_shape'][:2]\n            mask_pred_result = mask_pred_result[:, :img_height, :img_width]\n\n            if rescale:\n                # return result in original resolution\n                ori_height, ori_width = meta['ori_shape'][:2]\n                mask_pred_result = F.interpolate(\n                    mask_pred_result[:, None],\n                    size=(ori_height, ori_width),\n                    mode='bilinear',\n                    align_corners=False)[:, 0]\n\n            result = dict()\n            if panoptic_on:\n                pan_results = self.panoptic_postprocess(\n                    mask_cls_result, mask_pred_result)\n                result['pan_results'] = pan_results\n\n            if instance_on:\n                ins_results = self.instance_postprocess(\n                    mask_cls_result, mask_pred_result)\n                result['ins_results'] = ins_results\n\n            if semantic_on:\n                sem_results = self.semantic_postprocess(\n                    mask_cls_result, mask_pred_result)\n                result['sem_results'] = sem_results\n\n            results.append(result)\n\n        return results\n"
  },
  {
    "path": "mmdet/models/task_modules/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .assigners import *  # noqa: F401,F403\nfrom .builder import (ANCHOR_GENERATORS, BBOX_ASSIGNERS, BBOX_CODERS,\n                      BBOX_SAMPLERS, IOU_CALCULATORS, MATCH_COSTS,\n                      PRIOR_GENERATORS, build_anchor_generator, build_assigner,\n                      build_bbox_coder, build_iou_calculator, build_match_cost,\n                      build_prior_generator, build_sampler)\nfrom .coders import *  # noqa: F401,F403\nfrom .prior_generators import *  # noqa: F401,F403\nfrom .samplers import *  # noqa: F401,F403\n\n__all__ = [\n    'ANCHOR_GENERATORS', 'PRIOR_GENERATORS', 'BBOX_ASSIGNERS', 'BBOX_SAMPLERS',\n    'MATCH_COSTS', 'BBOX_CODERS', 'IOU_CALCULATORS', 'build_anchor_generator',\n    'build_prior_generator', 'build_assigner', 'build_sampler',\n    'build_iou_calculator', 'build_match_cost', 'build_bbox_coder'\n]\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .approx_max_iou_assigner import ApproxMaxIoUAssigner\nfrom .assign_result import AssignResult\nfrom .atss_assigner import ATSSAssigner\nfrom .base_assigner import BaseAssigner\nfrom .center_region_assigner import CenterRegionAssigner\nfrom .dynamic_soft_label_assigner import DynamicSoftLabelAssigner\nfrom .grid_assigner import GridAssigner\nfrom .hungarian_assigner import HungarianAssigner\nfrom .iou2d_calculator import BboxOverlaps2D\nfrom .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,\n                         DiceCost, FocalLossCost, IoUCost)\nfrom .max_iou_assigner import MaxIoUAssigner\nfrom .multi_instance_assigner import MultiInstanceAssigner\nfrom .point_assigner import PointAssigner\nfrom .region_assigner import RegionAssigner\nfrom .sim_ota_assigner import SimOTAAssigner\nfrom .task_aligned_assigner import TaskAlignedAssigner\nfrom .uniform_assigner import UniformAssigner\n\n__all__ = [\n    'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',\n    'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',\n    'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',\n    'TaskAlignedAssigner', 'BBoxL1Cost', 'ClassificationCost',\n    'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost', 'IoUCost',\n    'BboxOverlaps2D', 'DynamicSoftLabelAssigner', 'MultiInstanceAssigner'\n]\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/approx_max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Union\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom .assign_result import AssignResult\nfrom .max_iou_assigner import MaxIoUAssigner\n\n\n@TASK_UTILS.register_module()\nclass ApproxMaxIoUAssigner(MaxIoUAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with an integer indicating the ground truth\n     index. (semi-positive index: gt label (0-based), -1: background)\n\n    - -1: negative sample, no assigned gt\n    - semi-positive integer: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if\n            `gt_bboxes_ignore` is specified). Negative values mean not\n            ignoring any bboxes.\n        ignore_wrt_candidates (bool): Whether to compute the iof between\n            `bboxes` and `gt_bboxes_ignore`, or the contrary.\n        match_low_quality (bool): Whether to allow quality matches. This is\n            usually allowed for RPN and single stage detectors, but not allowed\n            in the second stage.\n        gpu_assign_thr (int): The upper bound of the number of GT for GPU\n            assign. When the number of gt is above this threshold, will assign\n            on CPU device. Negative values mean not assign on CPU.\n        iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps\n            Calculator.\n    \"\"\"\n\n    def __init__(\n        self,\n        pos_iou_thr: float,\n        neg_iou_thr: Union[float, tuple],\n        min_pos_iou: float = .0,\n        gt_max_assign_all: bool = True,\n        ignore_iof_thr: float = -1,\n        ignore_wrt_candidates: bool = True,\n        match_low_quality: bool = True,\n        gpu_assign_thr: int = -1,\n        iou_calculator: Union[ConfigDict, dict] = dict(type='BboxOverlaps2D')\n    ) -> None:\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.ignore_iof_thr = ignore_iof_thr\n        self.ignore_wrt_candidates = ignore_wrt_candidates\n        self.gpu_assign_thr = gpu_assign_thr\n        self.match_low_quality = match_low_quality\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to approxs.\n\n        This method assign a gt bbox to each group of approxs (bboxes),\n        each group of approxs is represent by a base approx (bbox) and\n        will be assigned with -1, or a semi-positive number.\n        background_label (-1) means negative sample,\n        semi-positive number is the index (0-based) of assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to background_label (-1)\n        2. use the max IoU of each group of approxs to assign\n        2. assign proposals whose iou with all gts < neg_iou_thr to background\n        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,\n           assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals (may be more than\n           one) to itself\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). ``approxs`` means the\n                group of approxs aligned with ``priors``, has shape\n                (n, num_approxs, 4).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        squares = pred_instances.priors\n        approxs = pred_instances.approxs\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        gt_bboxes_ignore = None if gt_instances_ignore is None else \\\n            gt_instances_ignore.get('bboxes', None)\n        approxs_per_octave = approxs.size(1)\n\n        num_squares = squares.size(0)\n        num_gts = gt_bboxes.size(0)\n\n        if num_squares == 0 or num_gts == 0:\n            # No predictions and/or truth, return empty assignment\n            overlaps = approxs.new(num_gts, num_squares)\n            assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n            return assign_result\n\n        # re-organize anchors by approxs_per_octave x num_squares\n        approxs = torch.transpose(approxs, 0, 1).contiguous().view(-1, 4)\n        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n            num_gts > self.gpu_assign_thr) else False\n        # compute overlap and assign gt on CPU when number of GT is large\n        if assign_on_cpu:\n            device = approxs.device\n            approxs = approxs.cpu()\n            gt_bboxes = gt_bboxes.cpu()\n            if gt_bboxes_ignore is not None:\n                gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n            if gt_labels is not None:\n                gt_labels = gt_labels.cpu()\n        all_overlaps = self.iou_calculator(approxs, gt_bboxes)\n\n        overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,\n                                        num_gts).max(dim=0)\n        overlaps = torch.transpose(overlaps, 0, 1)\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):\n            if self.ignore_wrt_candidates:\n                ignore_overlaps = self.iou_calculator(\n                    squares, gt_bboxes_ignore, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            else:\n                ignore_overlaps = self.iou_calculator(\n                    gt_bboxes_ignore, squares, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)\n            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1\n\n        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n        if assign_on_cpu:\n            assign_result.gt_inds = assign_result.gt_inds.to(device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n            if assign_result.labels is not None:\n                assign_result.labels = assign_result.labels.to(device)\n        return assign_result\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/assign_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.utils import util_mixins\n\n\nclass AssignResult(util_mixins.NiceRepr):\n    \"\"\"Stores assignments between predicted and truth boxes.\n\n    Attributes:\n        num_gts (int): the number of truth boxes considered when computing this\n            assignment\n        gt_inds (Tensor): for each predicted box indicates the 1-based\n            index of the assigned truth box. 0 means unassigned and -1 means\n            ignore.\n        max_overlaps (Tensor): the iou between the predicted box and its\n            assigned truth box.\n        labels (Tensor): If specified, for each predicted box\n            indicates the category label of the assigned truth box.\n\n    Example:\n        >>> # An assign result between 4 predicted boxes and 9 true boxes\n        >>> # where only two boxes were assigned.\n        >>> num_gts = 9\n        >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n        >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n        >>> labels = torch.LongTensor([0, 3, 4, 0])\n        >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n        >>> print(str(self))  # xdoctest: +IGNORE_WANT\n        <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n                      labels.shape=(4,))>\n        >>> # Force addition of gt labels (when adding gt as proposals)\n        >>> new_labels = torch.LongTensor([3, 4, 5])\n        >>> self.add_gt_(new_labels)\n        >>> print(str(self))  # xdoctest: +IGNORE_WANT\n        <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n                      labels.shape=(7,))>\n    \"\"\"\n\n    def __init__(self, num_gts: int, gt_inds: Tensor, max_overlaps: Tensor,\n                 labels: Tensor) -> None:\n        self.num_gts = num_gts\n        self.gt_inds = gt_inds\n        self.max_overlaps = max_overlaps\n        self.labels = labels\n        # Interface for possible user-defined properties\n        self._extra_properties = {}\n\n    @property\n    def num_preds(self):\n        \"\"\"int: the number of predictions in this assignment\"\"\"\n        return len(self.gt_inds)\n\n    def set_extra_property(self, key, value):\n        \"\"\"Set user-defined new property.\"\"\"\n        assert key not in self.info\n        self._extra_properties[key] = value\n\n    def get_extra_property(self, key):\n        \"\"\"Get user-defined property.\"\"\"\n        return self._extra_properties.get(key, None)\n\n    @property\n    def info(self):\n        \"\"\"dict: a dictionary of info about the object\"\"\"\n        basic_info = {\n            'num_gts': self.num_gts,\n            'num_preds': self.num_preds,\n            'gt_inds': self.gt_inds,\n            'max_overlaps': self.max_overlaps,\n            'labels': self.labels,\n        }\n        basic_info.update(self._extra_properties)\n        return basic_info\n\n    def __nice__(self):\n        \"\"\"str: a \"nice\" summary string describing this assign result\"\"\"\n        parts = []\n        parts.append(f'num_gts={self.num_gts!r}')\n        if self.gt_inds is None:\n            parts.append(f'gt_inds={self.gt_inds!r}')\n        else:\n            parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')\n        if self.max_overlaps is None:\n            parts.append(f'max_overlaps={self.max_overlaps!r}')\n        else:\n            parts.append('max_overlaps.shape='\n                         f'{tuple(self.max_overlaps.shape)!r}')\n        if self.labels is None:\n            parts.append(f'labels={self.labels!r}')\n        else:\n            parts.append(f'labels.shape={tuple(self.labels.shape)!r}')\n        return ', '.join(parts)\n\n    @classmethod\n    def random(cls, **kwargs):\n        \"\"\"Create random AssignResult for tests or debugging.\n\n        Args:\n            num_preds: number of predicted boxes\n            num_gts: number of true boxes\n            p_ignore (float): probability of a predicted box assigned to an\n                ignored truth\n            p_assigned (float): probability of a predicted box not being\n                assigned\n            p_use_label (float | bool): with labels or not\n            rng (None | int | numpy.random.RandomState): seed or state\n\n        Returns:\n            :obj:`AssignResult`: Randomly generated assign results.\n\n        Example:\n            >>> from mmdet.models.task_modules.assigners.assign_result import *  # NOQA\n            >>> self = AssignResult.random()\n            >>> print(self.info)\n        \"\"\"\n        from ..samplers.sampling_result import ensure_rng\n        rng = ensure_rng(kwargs.get('rng', None))\n\n        num_gts = kwargs.get('num_gts', None)\n        num_preds = kwargs.get('num_preds', None)\n        p_ignore = kwargs.get('p_ignore', 0.3)\n        p_assigned = kwargs.get('p_assigned', 0.7)\n        num_classes = kwargs.get('num_classes', 3)\n\n        if num_gts is None:\n            num_gts = rng.randint(0, 8)\n        if num_preds is None:\n            num_preds = rng.randint(0, 16)\n\n        if num_gts == 0:\n            max_overlaps = torch.zeros(num_preds, dtype=torch.float32)\n            gt_inds = torch.zeros(num_preds, dtype=torch.int64)\n            labels = torch.zeros(num_preds, dtype=torch.int64)\n\n        else:\n            import numpy as np\n\n            # Create an overlap for each predicted box\n            max_overlaps = torch.from_numpy(rng.rand(num_preds))\n\n            # Construct gt_inds for each predicted box\n            is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)\n            # maximum number of assignments constraints\n            n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))\n\n            assigned_idxs = np.where(is_assigned)[0]\n            rng.shuffle(assigned_idxs)\n            assigned_idxs = assigned_idxs[0:n_assigned]\n            assigned_idxs.sort()\n\n            is_assigned[:] = 0\n            is_assigned[assigned_idxs] = True\n\n            is_ignore = torch.from_numpy(\n                rng.rand(num_preds) < p_ignore) & is_assigned\n\n            gt_inds = torch.zeros(num_preds, dtype=torch.int64)\n\n            true_idxs = np.arange(num_gts)\n            rng.shuffle(true_idxs)\n            true_idxs = torch.from_numpy(true_idxs)\n            gt_inds[is_assigned] = true_idxs[:n_assigned].long()\n\n            gt_inds = torch.from_numpy(\n                rng.randint(1, num_gts + 1, size=num_preds))\n            gt_inds[is_ignore] = -1\n            gt_inds[~is_assigned] = 0\n            max_overlaps[~is_assigned] = 0\n\n            if num_classes == 0:\n                labels = torch.zeros(num_preds, dtype=torch.int64)\n            else:\n                labels = torch.from_numpy(\n                    # remind that we set FG labels to [0, num_class-1]\n                    # since mmdet v2.0\n                    # BG cat_id: num_class\n                    rng.randint(0, num_classes, size=num_preds))\n                labels[~is_assigned] = 0\n\n        self = cls(num_gts, gt_inds, max_overlaps, labels)\n        return self\n\n    def add_gt_(self, gt_labels):\n        \"\"\"Add ground truth as assigned results.\n\n        Args:\n            gt_labels (torch.Tensor): Labels of gt boxes\n        \"\"\"\n        self_inds = torch.arange(\n            1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)\n        self.gt_inds = torch.cat([self_inds, self.gt_inds])\n\n        self.max_overlaps = torch.cat(\n            [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])\n\n        self.labels = torch.cat([gt_labels, self.labels])\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/atss_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef bbox_center_distance(bboxes: Tensor, priors: Tensor) -> Tensor:\n    \"\"\"Compute the center distance between bboxes and priors.\n\n    Args:\n        bboxes (Tensor): Shape (n, 4) for , \"xyxy\" format.\n        priors (Tensor): Shape (n, 4) for priors, \"xyxy\" format.\n\n    Returns:\n        Tensor: Center distances between bboxes and priors.\n    \"\"\"\n    bbox_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0\n    bbox_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0\n    bbox_points = torch.stack((bbox_cx, bbox_cy), dim=1)\n\n    priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0\n    priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0\n    priors_points = torch.stack((priors_cx, priors_cy), dim=1)\n\n    distances = (priors_points[:, None, :] -\n                 bbox_points[None, :, :]).pow(2).sum(-1).sqrt()\n\n    return distances\n\n\n@TASK_UTILS.register_module()\nclass ATSSAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each prior.\n\n    Each proposals will be assigned with `0` or a positive integer\n    indicating the ground truth index.\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    If ``alpha`` is not None, it means that the dynamic cost\n    ATSSAssigner is adopted, which is currently only used in the DDOD.\n\n    Args:\n        topk (int): number of priors selected in each level\n        alpha (float, optional): param of cost rate for each proposal only\n            in DDOD. Defaults to None.\n        iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou\n            calculator. Defaults to ``dict(type='BboxOverlaps2D')``\n        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if\n            `gt_bboxes_ignore` is specified). Negative values mean not\n            ignoring any bboxes. Defaults to -1.\n    \"\"\"\n\n    def __init__(self,\n                 topk: int,\n                 alpha: Optional[float] = None,\n                 iou_calculator: ConfigType = dict(type='BboxOverlaps2D'),\n                 ignore_iof_thr: float = -1) -> None:\n        self.topk = topk\n        self.alpha = alpha\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n        self.ignore_iof_thr = ignore_iof_thr\n\n    # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py\n    def assign(\n            self,\n            pred_instances: InstanceData,\n            num_level_priors: List[int],\n            gt_instances: InstanceData,\n            gt_instances_ignore: Optional[InstanceData] = None\n    ) -> AssignResult:\n        \"\"\"Assign gt to priors.\n\n        The assignment is done in following steps\n\n        1. compute iou between all prior (prior of all pyramid levels) and gt\n        2. compute center distance between all prior and gt\n        3. on each pyramid level, for each gt, select k prior whose center\n           are closest to the gt center, so we total select k*l prior as\n           candidates for each gt\n        4. get corresponding iou for the these candidates, and compute the\n           mean and std, set mean + std as the iou threshold\n        5. select these candidates whose iou are greater than or equal to\n           the threshold as positive\n        6. limit the positive sample's center in gt\n\n        If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds`\n        are not None, the overlaps calculation in the first step\n        will also include dynamic cost, which is currently only used in\n        the DDOD.\n\n        Args:\n            pred_instances (:obj:`InstaceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors, points, or bboxes predicted by the model,\n                shape(n, 4).\n            num_level_priors (List): Number of bboxes in each level\n            gt_instances (:obj:`InstaceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            gt_instances_ignore (:obj:`InstaceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        gt_labels = gt_instances.labels\n        if gt_instances_ignore is not None:\n            gt_bboxes_ignore = gt_instances_ignore.bboxes\n        else:\n            gt_bboxes_ignore = None\n\n        INF = 100000000\n        priors = priors[:, :4]\n        num_gt, num_priors = gt_bboxes.size(0), priors.size(0)\n\n        message = 'Invalid alpha parameter because cls_scores or ' \\\n                  'bbox_preds are None. If you want to use the ' \\\n                  'cost-based ATSSAssigner,  please set cls_scores, ' \\\n                  'bbox_preds and self.alpha at the same time. '\n\n        # compute iou between all bbox and gt\n        if self.alpha is None:\n            # ATSSAssigner\n            overlaps = self.iou_calculator(priors, gt_bboxes)\n            if ('scores' in pred_instances or 'bboxes' in pred_instances):\n                warnings.warn(message)\n\n        else:\n            # Dynamic cost ATSSAssigner in DDOD\n            assert ('scores' in pred_instances\n                    and 'bboxes' in pred_instances), message\n            cls_scores = pred_instances.scores\n            bbox_preds = pred_instances.bboxes\n\n            # compute cls cost for bbox and GT\n            cls_cost = torch.sigmoid(cls_scores[:, gt_labels])\n\n            # compute iou between all bbox and gt\n            overlaps = self.iou_calculator(bbox_preds, gt_bboxes)\n\n            # make sure that we are in element-wise multiplication\n            assert cls_cost.shape == overlaps.shape\n\n            # overlaps is actually a cost matrix\n            overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha\n\n        # assign 0 by default\n        assigned_gt_inds = overlaps.new_full((num_priors, ),\n                                             0,\n                                             dtype=torch.long)\n\n        if num_gt == 0 or num_priors == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = overlaps.new_zeros((num_priors, ))\n            if num_gt == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            assigned_labels = overlaps.new_full((num_priors, ),\n                                                -1,\n                                                dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n        # compute center distance between all bbox and gt\n        distances = bbox_center_distance(gt_bboxes, priors)\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0):\n            ignore_overlaps = self.iou_calculator(\n                priors, gt_bboxes_ignore, mode='iof')\n            ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr\n            distances[ignore_idxs, :] = INF\n            assigned_gt_inds[ignore_idxs] = -1\n\n        # Selecting candidates based on the center distance\n        candidate_idxs = []\n        start_idx = 0\n        for level, priors_per_level in enumerate(num_level_priors):\n            # on each pyramid level, for each gt,\n            # select k bbox whose center are closest to the gt center\n            end_idx = start_idx + priors_per_level\n            distances_per_level = distances[start_idx:end_idx, :]\n            selectable_k = min(self.topk, priors_per_level)\n            _, topk_idxs_per_level = distances_per_level.topk(\n                selectable_k, dim=0, largest=False)\n            candidate_idxs.append(topk_idxs_per_level + start_idx)\n            start_idx = end_idx\n        candidate_idxs = torch.cat(candidate_idxs, dim=0)\n\n        # get corresponding iou for the these candidates, and compute the\n        # mean and std, set mean + std as the iou threshold\n        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]\n        overlaps_mean_per_gt = candidate_overlaps.mean(0)\n        overlaps_std_per_gt = candidate_overlaps.std(0)\n        overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt\n\n        is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]\n\n        # limit the positive sample's center in gt\n        for gt_idx in range(num_gt):\n            candidate_idxs[:, gt_idx] += gt_idx * num_priors\n        priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0\n        priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0\n        ep_priors_cx = priors_cx.view(1, -1).expand(\n            num_gt, num_priors).contiguous().view(-1)\n        ep_priors_cy = priors_cy.view(1, -1).expand(\n            num_gt, num_priors).contiguous().view(-1)\n        candidate_idxs = candidate_idxs.view(-1)\n\n        # calculate the left, top, right, bottom distance between positive\n        # prior center and gt side\n        l_ = ep_priors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]\n        t_ = ep_priors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]\n        r_ = gt_bboxes[:, 2] - ep_priors_cx[candidate_idxs].view(-1, num_gt)\n        b_ = gt_bboxes[:, 3] - ep_priors_cy[candidate_idxs].view(-1, num_gt)\n        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01\n\n        is_pos = is_pos & is_in_gts\n\n        # if an anchor box is assigned to multiple gts,\n        # the one with the highest IoU will be selected.\n        overlaps_inf = torch.full_like(overlaps,\n                                       -INF).t().contiguous().view(-1)\n        index = candidate_idxs.view(-1)[is_pos.view(-1)]\n        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n        overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n        assigned_gt_inds[\n            max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n\n        assigned_labels = assigned_gt_inds.new_full((num_priors, ), -1)\n        pos_inds = torch.nonzero(\n            assigned_gt_inds > 0, as_tuple=False).squeeze()\n        if pos_inds.numel() > 0:\n            assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -\n                                                  1]\n        return AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/base_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Optional\n\nfrom mmengine.structures import InstanceData\n\n\nclass BaseAssigner(metaclass=ABCMeta):\n    \"\"\"Base assigner that assigns boxes to ground truth boxes.\"\"\"\n\n    @abstractmethod\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs):\n        \"\"\"Assign boxes to either a ground truth boxes or a negative boxes.\"\"\"\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/center_region_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef scale_boxes(bboxes: Tensor, scale: float) -> Tensor:\n    \"\"\"Expand an array of boxes by a given scale.\n\n    Args:\n        bboxes (Tensor): Shape (m, 4)\n        scale (float): The scale factor of bboxes\n\n    Returns:\n        Tensor: Shape (m, 4). Scaled bboxes\n    \"\"\"\n    assert bboxes.size(1) == 4\n    w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5\n    h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5\n    x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5\n    y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5\n\n    w_half *= scale\n    h_half *= scale\n\n    boxes_scaled = torch.zeros_like(bboxes)\n    boxes_scaled[:, 0] = x_c - w_half\n    boxes_scaled[:, 2] = x_c + w_half\n    boxes_scaled[:, 1] = y_c - h_half\n    boxes_scaled[:, 3] = y_c + h_half\n    return boxes_scaled\n\n\ndef is_located_in(points: Tensor, bboxes: Tensor) -> Tensor:\n    \"\"\"Are points located in bboxes.\n\n    Args:\n        points (Tensor): Points, shape: (m, 2).\n        bboxes (Tensor): Bounding boxes, shape: (n, 4).\n\n    Return:\n        Tensor: Flags indicating if points are located in bboxes,\n        shape: (m, n).\n    \"\"\"\n    assert points.size(1) == 2\n    assert bboxes.size(1) == 4\n    return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \\\n           (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \\\n           (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \\\n           (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0))\n\n\ndef bboxes_area(bboxes: Tensor) -> Tensor:\n    \"\"\"Compute the area of an array of bboxes.\n\n    Args:\n        bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4)\n\n    Returns:\n        Tensor: Area of the bboxes. Shape: (m, )\n    \"\"\"\n    assert bboxes.size(1) == 4\n    w = (bboxes[:, 2] - bboxes[:, 0])\n    h = (bboxes[:, 3] - bboxes[:, 1])\n    areas = w * h\n    return areas\n\n\n@TASK_UTILS.register_module()\nclass CenterRegionAssigner(BaseAssigner):\n    \"\"\"Assign pixels at the center region of a bbox as positive.\n\n    Each proposals will be assigned with `-1`, `0`, or a positive integer\n    indicating the ground truth index.\n    - -1: negative samples\n    - semi-positive numbers: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_scale (float): Threshold within which pixels are\n            labelled as positive.\n        neg_scale (float): Threshold above which pixels are\n            labelled as positive.\n        min_pos_iof (float): Minimum iof of a pixel with a gt to be\n            labelled as positive. Default: 1e-2\n        ignore_gt_scale (float): Threshold within which the pixels\n            are ignored when the gt is labelled as shadowed. Default: 0.5\n        foreground_dominate (bool): If True, the bbox will be assigned as\n            positive when a gt's kernel region overlaps with another's shadowed\n            (ignored) region, otherwise it is set as ignored. Default to False.\n        iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps\n            Calculator.\n    \"\"\"\n\n    def __init__(\n        self,\n        pos_scale: float,\n        neg_scale: float,\n        min_pos_iof: float = 1e-2,\n        ignore_gt_scale: float = 0.5,\n        foreground_dominate: bool = False,\n        iou_calculator: ConfigType = dict(type='BboxOverlaps2D')\n    ) -> None:\n        self.pos_scale = pos_scale\n        self.neg_scale = neg_scale\n        self.min_pos_iof = min_pos_iof\n        self.ignore_gt_scale = ignore_gt_scale\n        self.foreground_dominate = foreground_dominate\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def get_gt_priorities(self, gt_bboxes: Tensor) -> Tensor:\n        \"\"\"Get gt priorities according to their areas.\n\n        Smaller gt has higher priority.\n\n        Args:\n            gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).\n\n        Returns:\n            Tensor: The priority of gts so that gts with larger priority is\n            more likely to be assigned. Shape (k, )\n        \"\"\"\n        gt_areas = bboxes_area(gt_bboxes)\n        # Rank all gt bbox areas. Smaller objects has larger priority\n        _, sort_idx = gt_areas.sort(descending=True)\n        sort_idx = sort_idx.argsort()\n        return sort_idx\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to bboxes.\n\n        This method assigns gts to every prior (proposal/anchor), each prior\n        will be assigned with -1, or a semi-positive number. -1 means\n        negative sample, semi-positive number is the index (0-based) of\n        assigned gt.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assigned result. Note that shadowed_labels\n            of shape (N, 2) is also added as an `assign_result` attribute.\n            `shadowed_labels` is a tensor composed of N pairs of anchor_ind,\n            class_label], where N is the number of anchors that lie in the\n            outer region of a gt, anchor_ind is the shadowed anchor index\n            and class_label is the shadowed class label.\n\n        Example:\n            >>> from mmengine.structures import InstanceData\n            >>> self = CenterRegionAssigner(0.2, 0.2)\n            >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10],\n            ...                                      [10, 10, 20, 20]])\n            >>> gt_instances = InstanceData()\n            >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 10]])\n            >>> gt_instances.labels = torch.Tensor([0])\n            >>> assign_result = self.assign(pred_instances, gt_instances)\n            >>> expected_gt_inds = torch.LongTensor([1, 0])\n            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)\n        \"\"\"\n        # There are in total 5 steps in the pixel assignment\n        # 1. Find core (the center region, say inner 0.2)\n        #     and shadow (the relatively ourter part, say inner 0.2-0.5)\n        #     regions of every gt.\n        # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions\n        # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in\n        #      the image.\n        #    3.1. For overlapping objects, the prior bboxes in gt_core is\n        #           assigned with the object with smallest area\n        # 4. Assign prior bboxes with class label according to its gt id.\n        #    4.1. Assign -1 to prior bboxes lying in shadowed gts\n        #    4.2. Assign positive prior boxes with the corresponding label\n        # 5. Find pixels lying in the shadow of an object and assign them with\n        #      background label, but set the loss weight of its corresponding\n        #      gt to zero.\n\n        # TODO not extract bboxes in assign.\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        gt_labels = gt_instances.labels\n\n        assert priors.size(1) == 4, 'priors must have size of 4'\n        # 1. Find core positive and shadow region of every gt\n        gt_core = scale_boxes(gt_bboxes, self.pos_scale)\n        gt_shadow = scale_boxes(gt_bboxes, self.neg_scale)\n\n        # 2. Find prior bboxes that lie in gt_core and gt_shadow regions\n        prior_centers = (priors[:, 2:4] + priors[:, 0:2]) / 2\n        # The center points lie within the gt boxes\n        is_prior_in_gt = is_located_in(prior_centers, gt_bboxes)\n        # Only calculate prior and gt_core IoF. This enables small prior bboxes\n        #   to match large gts\n        prior_and_gt_core_overlaps = self.iou_calculator(\n            priors, gt_core, mode='iof')\n        # The center point of effective priors should be within the gt box\n        is_prior_in_gt_core = is_prior_in_gt & (\n            prior_and_gt_core_overlaps > self.min_pos_iof)  # shape (n, k)\n\n        is_prior_in_gt_shadow = (\n            self.iou_calculator(priors, gt_shadow, mode='iof') >\n            self.min_pos_iof)\n        # Rule out center effective positive pixels\n        is_prior_in_gt_shadow &= (~is_prior_in_gt_core)\n\n        num_gts, num_priors = gt_bboxes.size(0), priors.size(0)\n        if num_gts == 0 or num_priors == 0:\n            # If no gts exist, assign all pixels to negative\n            assigned_gt_ids = \\\n                is_prior_in_gt_core.new_zeros((num_priors,),\n                                              dtype=torch.long)\n            pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2))\n        else:\n            # Step 3: assign a one-hot gt id to each pixel, and smaller objects\n            #    have high priority to assign the pixel.\n            sort_idx = self.get_gt_priorities(gt_bboxes)\n            assigned_gt_ids, pixels_in_gt_shadow = \\\n                self.assign_one_hot_gt_indices(is_prior_in_gt_core,\n                                               is_prior_in_gt_shadow,\n                                               gt_priority=sort_idx)\n\n        if (gt_instances_ignore is not None\n                and gt_instances_ignore.bboxes.numel() > 0):\n            # No ground truth or boxes, return empty assignment\n            gt_bboxes_ignore = gt_instances_ignore.bboxes\n            gt_bboxes_ignore = scale_boxes(\n                gt_bboxes_ignore, scale=self.ignore_gt_scale)\n            is_prior_in_ignored_gts = is_located_in(prior_centers,\n                                                    gt_bboxes_ignore)\n            is_prior_in_ignored_gts = is_prior_in_ignored_gts.any(dim=1)\n            assigned_gt_ids[is_prior_in_ignored_gts] = -1\n\n        # 4. Assign prior bboxes with class label according to its gt id.\n        # Default assigned label is the background (-1)\n        assigned_labels = assigned_gt_ids.new_full((num_priors, ), -1)\n        pos_inds = torch.nonzero(assigned_gt_ids > 0, as_tuple=False).squeeze()\n        if pos_inds.numel() > 0:\n            assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] -\n                                                  1]\n        # 5. Find pixels lying in the shadow of an object\n        shadowed_pixel_labels = pixels_in_gt_shadow.clone()\n        if pixels_in_gt_shadow.numel() > 0:\n            pixel_idx, gt_idx =\\\n                pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1]\n            assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \\\n                'Some pixels are dually assigned to ignore and gt!'\n            shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1]\n            override = (\n                assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1])\n            if self.foreground_dominate:\n                # When a pixel is both positive and shadowed, set it as pos\n                shadowed_pixel_labels = shadowed_pixel_labels[~override]\n            else:\n                # When a pixel is both pos and shadowed, set it as shadowed\n                assigned_labels[pixel_idx[override]] = -1\n                assigned_gt_ids[pixel_idx[override]] = 0\n\n        assign_result = AssignResult(\n            num_gts, assigned_gt_ids, None, labels=assigned_labels)\n        # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2)\n        assign_result.set_extra_property('shadowed_labels',\n                                         shadowed_pixel_labels)\n        return assign_result\n\n    def assign_one_hot_gt_indices(\n            self,\n            is_prior_in_gt_core: Tensor,\n            is_prior_in_gt_shadow: Tensor,\n            gt_priority: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:\n        \"\"\"Assign only one gt index to each prior box.\n\n        Gts with large gt_priority are more likely to be assigned.\n\n        Args:\n            is_prior_in_gt_core (Tensor): Bool tensor indicating the prior\n                center is in the core area of a gt (e.g. 0-0.2).\n                Shape: (num_prior, num_gt).\n            is_prior_in_gt_shadow (Tensor): Bool tensor indicating the prior\n                center is in the shadowed area of a gt (e.g. 0.2-0.5).\n                Shape: (num_prior, num_gt).\n            gt_priority (Tensor): Priorities of gts. The gt with a higher\n                priority is more likely to be assigned to the bbox when the\n                bbox match with multiple gts. Shape: (num_gt, ).\n\n        Returns:\n            tuple: Returns (assigned_gt_inds, shadowed_gt_inds).\n\n            - assigned_gt_inds: The assigned gt index of each prior bbox \\\n            (i.e. index from 1 to num_gts). Shape: (num_prior, ).\n            - shadowed_gt_inds: shadowed gt indices. It is a tensor of \\\n            shape (num_ignore, 2) with first column being the shadowed prior \\\n            bbox indices and the second column the shadowed gt \\\n            indices (1-based).\n        \"\"\"\n        num_bboxes, num_gts = is_prior_in_gt_core.shape\n\n        if gt_priority is None:\n            gt_priority = torch.arange(\n                num_gts, device=is_prior_in_gt_core.device)\n        assert gt_priority.size(0) == num_gts\n        # The bigger gt_priority, the more preferable to be assigned\n        # The assigned inds are by default 0 (background)\n        assigned_gt_inds = is_prior_in_gt_core.new_zeros((num_bboxes, ),\n                                                         dtype=torch.long)\n        # Shadowed bboxes are assigned to be background. But the corresponding\n        #   label is ignored during loss calculation, which is done through\n        #   shadowed_gt_inds\n        shadowed_gt_inds = torch.nonzero(is_prior_in_gt_shadow, as_tuple=False)\n        if is_prior_in_gt_core.sum() == 0:  # No gt match\n            shadowed_gt_inds[:, 1] += 1  # 1-based. For consistency issue\n            return assigned_gt_inds, shadowed_gt_inds\n\n        # The priority of each prior box and gt pair. If one prior box is\n        #  matched bo multiple gts. Only the pair with the highest priority\n        #  is saved\n        pair_priority = is_prior_in_gt_core.new_full((num_bboxes, num_gts),\n                                                     -1,\n                                                     dtype=torch.long)\n\n        # Each bbox could match with multiple gts.\n        # The following codes deal with this situation\n        # Matched  bboxes (to any gt). Shape: (num_pos_anchor, )\n        inds_of_match = torch.any(is_prior_in_gt_core, dim=1)\n        # The matched gt index of each positive bbox. Length >= num_pos_anchor\n        #   , since one bbox could match multiple gts\n        matched_bbox_gt_inds = torch.nonzero(\n            is_prior_in_gt_core, as_tuple=False)[:, 1]\n        # Assign priority to each bbox-gt pair.\n        pair_priority[is_prior_in_gt_core] = gt_priority[matched_bbox_gt_inds]\n        _, argmax_priority = pair_priority[inds_of_match].max(dim=1)\n        assigned_gt_inds[inds_of_match] = argmax_priority + 1  # 1-based\n        # Zero-out the assigned anchor box to filter the shadowed gt indices\n        is_prior_in_gt_core[inds_of_match, argmax_priority] = 0\n        # Concat the shadowed indices due to overlapping with that out side of\n        #   effective scale. shape: (total_num_ignore, 2)\n        shadowed_gt_inds = torch.cat(\n            (shadowed_gt_inds,\n             torch.nonzero(is_prior_in_gt_core, as_tuple=False)),\n            dim=0)\n        # Change `is_prior_in_gt_core` back to keep arguments intact.\n        is_prior_in_gt_core[inds_of_match, argmax_priority] = 1\n        # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds`\n        if shadowed_gt_inds.numel() > 0:\n            shadowed_gt_inds[:, 1] += 1\n        return assigned_gt_inds, shadowed_gt_inds\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/dynamic_soft_label_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import BaseBoxes\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\nINF = 100000000\nEPS = 1.0e-7\n\n\ndef center_of_mass(masks: Tensor, eps: float = 1e-7) -> Tensor:\n    \"\"\"Compute the masks center of mass.\n\n    Args:\n        masks: Mask tensor, has shape (num_masks, H, W).\n        eps: a small number to avoid normalizer to be zero.\n            Defaults to 1e-7.\n    Returns:\n        Tensor: The masks center of mass. Has shape (num_masks, 2).\n    \"\"\"\n    n, h, w = masks.shape\n    grid_h = torch.arange(h, device=masks.device)[:, None]\n    grid_w = torch.arange(w, device=masks.device)\n    normalizer = masks.sum(dim=(1, 2)).float().clamp(min=eps)\n    center_y = (masks * grid_h).sum(dim=(1, 2)) / normalizer\n    center_x = (masks * grid_w).sum(dim=(1, 2)) / normalizer\n    center = torch.cat([center_x[:, None], center_y[:, None]], dim=1)\n    return center\n\n\n@TASK_UTILS.register_module()\nclass DynamicSoftLabelAssigner(BaseAssigner):\n    \"\"\"Computes matching between predictions and ground truth with dynamic soft\n    label assignment.\n\n    Args:\n        soft_center_radius (float): Radius of the soft center prior.\n            Defaults to 3.0.\n        topk (int): Select top-k predictions to calculate dynamic k\n            best matches for each gt. Defaults to 13.\n        iou_weight (float): The scale factor of iou cost. Defaults to 3.0.\n        iou_calculator (ConfigType): Config of overlaps Calculator.\n            Defaults to dict(type='BboxOverlaps2D').\n    \"\"\"\n\n    def __init__(\n        self,\n        soft_center_radius: float = 3.0,\n        topk: int = 13,\n        iou_weight: float = 3.0,\n        iou_calculator: ConfigType = dict(type='BboxOverlaps2D')\n    ) -> None:\n        self.soft_center_radius = soft_center_radius\n        self.topk = topk\n        self.iou_weight = iou_weight\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to priors.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n        Returns:\n            obj:`AssignResult`: The assigned result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        num_gt = gt_bboxes.size(0)\n\n        decoded_bboxes = pred_instances.bboxes\n        pred_scores = pred_instances.scores\n        priors = pred_instances.priors\n        num_bboxes = decoded_bboxes.size(0)\n\n        # assign 0 by default\n        assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ),\n                                                   0,\n                                                   dtype=torch.long)\n        if num_gt == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))\n            if num_gt == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            assigned_labels = decoded_bboxes.new_full((num_bboxes, ),\n                                                      -1,\n                                                      dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n        prior_center = priors[:, :2]\n        if isinstance(gt_bboxes, BaseBoxes):\n            is_in_gts = gt_bboxes.find_inside_points(prior_center)\n        else:\n            # Tensor boxes will be treated as horizontal boxes by defaults\n            lt_ = prior_center[:, None] - gt_bboxes[:, :2]\n            rb_ = gt_bboxes[:, 2:] - prior_center[:, None]\n\n            deltas = torch.cat([lt_, rb_], dim=-1)\n            is_in_gts = deltas.min(dim=-1).values > 0\n\n        valid_mask = is_in_gts.sum(dim=1) > 0\n\n        valid_decoded_bbox = decoded_bboxes[valid_mask]\n        valid_pred_scores = pred_scores[valid_mask]\n        num_valid = valid_decoded_bbox.size(0)\n\n        if num_valid == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))\n            assigned_labels = decoded_bboxes.new_full((num_bboxes, ),\n                                                      -1,\n                                                      dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n        if hasattr(gt_instances, 'masks'):\n            gt_center = center_of_mass(gt_instances.masks, eps=EPS)\n        elif isinstance(gt_bboxes, BaseBoxes):\n            gt_center = gt_bboxes.centers\n        else:\n            # Tensor boxes will be treated as horizontal boxes by defaults\n            gt_center = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2.0\n        valid_prior = priors[valid_mask]\n        strides = valid_prior[:, 2]\n        distance = (valid_prior[:, None, :2] - gt_center[None, :, :]\n                    ).pow(2).sum(-1).sqrt() / strides[:, None]\n        soft_center_prior = torch.pow(10, distance - self.soft_center_radius)\n\n        pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes)\n        iou_cost = -torch.log(pairwise_ious + EPS) * self.iou_weight\n\n        gt_onehot_label = (\n            F.one_hot(gt_labels.to(torch.int64),\n                      pred_scores.shape[-1]).float().unsqueeze(0).repeat(\n                          num_valid, 1, 1))\n        valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1)\n\n        soft_label = gt_onehot_label * pairwise_ious[..., None]\n        scale_factor = soft_label - valid_pred_scores.sigmoid()\n        soft_cls_cost = F.binary_cross_entropy_with_logits(\n            valid_pred_scores, soft_label,\n            reduction='none') * scale_factor.abs().pow(2.0)\n        soft_cls_cost = soft_cls_cost.sum(dim=-1)\n\n        cost_matrix = soft_cls_cost + iou_cost + soft_center_prior\n\n        matched_pred_ious, matched_gt_inds = self.dynamic_k_matching(\n            cost_matrix, pairwise_ious, num_gt, valid_mask)\n\n        # convert to AssignResult format\n        assigned_gt_inds[valid_mask] = matched_gt_inds + 1\n        assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n        assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long()\n        max_overlaps = assigned_gt_inds.new_full((num_bboxes, ),\n                                                 -INF,\n                                                 dtype=torch.float32)\n        max_overlaps[valid_mask] = matched_pred_ious\n        return AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n    def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor,\n                           num_gt: int,\n                           valid_mask: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Use IoU and matching cost to calculate the dynamic top-k positive\n        targets. Same as SimOTA.\n\n        Args:\n            cost (Tensor): Cost matrix.\n            pairwise_ious (Tensor): Pairwise iou matrix.\n            num_gt (int): Number of gt.\n            valid_mask (Tensor): Mask for valid bboxes.\n\n        Returns:\n            tuple: matched ious and gt indexes.\n        \"\"\"\n        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)\n        # select candidate topk ious for dynamic-k calculation\n        candidate_topk = min(self.topk, pairwise_ious.size(0))\n        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)\n        # calculate dynamic k for each gt\n        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n        for gt_idx in range(num_gt):\n            _, pos_idx = torch.topk(\n                cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)\n            matching_matrix[:, gt_idx][pos_idx] = 1\n\n        del topk_ious, dynamic_ks, pos_idx\n\n        prior_match_gt_mask = matching_matrix.sum(1) > 1\n        if prior_match_gt_mask.sum() > 0:\n            cost_min, cost_argmin = torch.min(\n                cost[prior_match_gt_mask, :], dim=1)\n            matching_matrix[prior_match_gt_mask, :] *= 0\n            matching_matrix[prior_match_gt_mask, cost_argmin] = 1\n        # get foreground mask inside box and center prior\n        fg_mask_inboxes = matching_matrix.sum(1) > 0\n        valid_mask[valid_mask.clone()] = fg_mask_inboxes\n\n        matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)\n        matched_pred_ious = (matching_matrix *\n                             pairwise_ious).sum(1)[fg_mask_inboxes]\n        return matched_pred_ious, matched_gt_inds\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/grid_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, Union\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@TASK_UTILS.register_module()\nclass GridAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, `0`, or a positive integer\n    indicating the ground truth index.\n\n    - -1: don't care\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple[float, float]): IoU threshold for negative\n        bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n            Defaults to 0.\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n        iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps\n            Calculator.\n    \"\"\"\n\n    def __init__(\n        self,\n        pos_iou_thr: float,\n        neg_iou_thr: Union[float, Tuple[float, float]],\n        min_pos_iou: float = .0,\n        gt_max_assign_all: bool = True,\n        iou_calculator: ConfigType = dict(type='BboxOverlaps2D')\n    ) -> None:\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to bboxes. The process is very much like the max iou\n        assigner, except that positive samples are constrained within the cell\n        that the gt boxes fell in.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, 0, or a positive number. -1 means don't care,\n        0 means negative sample, positive number is the index (1-based) of\n        assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to -1\n        2. assign proposals whose iou with all gts <= neg_iou_thr to 0\n        3. for each bbox within a cell, if the iou with its nearest gt >\n            pos_iou_thr and the center of that gt falls inside the cell,\n            assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals within the cell the\n            gt bbox falls in to itself.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n\n        priors = pred_instances.priors\n        responsible_flags = pred_instances.responsible_flags\n\n        num_gts, num_priors = gt_bboxes.size(0), priors.size(0)\n\n        # compute iou between all gt and priors\n        overlaps = self.iou_calculator(gt_bboxes, priors)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = overlaps.new_full((num_priors, ),\n                                             -1,\n                                             dtype=torch.long)\n\n        if num_gts == 0 or num_priors == 0:\n            # No ground truth or priors, return empty assignment\n            max_overlaps = overlaps.new_zeros((num_priors, ))\n            if num_gts == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            assigned_labels = overlaps.new_full((num_priors, ),\n                                                -1,\n                                                dtype=torch.long)\n            return AssignResult(\n                num_gts,\n                assigned_gt_inds,\n                max_overlaps,\n                labels=assigned_labels)\n\n        # 2. assign negative: below\n        # for each anchor, which gt best overlaps with it\n        # for each anchor, the max iou of all gts\n        # shape of max_overlaps == argmax_overlaps == num_priors\n        max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n\n        if isinstance(self.neg_iou_thr, float):\n            assigned_gt_inds[(max_overlaps >= 0)\n                             & (max_overlaps <= self.neg_iou_thr)] = 0\n        elif isinstance(self.neg_iou_thr, (tuple, list)):\n            assert len(self.neg_iou_thr) == 2\n            assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])\n                             & (max_overlaps <= self.neg_iou_thr[1])] = 0\n\n        # 3. assign positive: falls into responsible cell and above\n        # positive IOU threshold, the order matters.\n        # the prior condition of comparison is to filter out all\n        # unrelated anchors, i.e. not responsible_flags\n        overlaps[:, ~responsible_flags.type(torch.bool)] = -1.\n\n        # calculate max_overlaps again, but this time we only consider IOUs\n        # for anchors responsible for prediction\n        max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n\n        # for each gt, which anchor best overlaps with it\n        # for each gt, the max iou of all proposals\n        # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts\n        gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)\n\n        pos_inds = (max_overlaps > self.pos_iou_thr) & responsible_flags.type(\n            torch.bool)\n        assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1\n\n        # 4. assign positive to max overlapped anchors within responsible cell\n        for i in range(num_gts):\n            if gt_max_overlaps[i] > self.min_pos_iou:\n                if self.gt_max_assign_all:\n                    max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \\\n                         responsible_flags.type(torch.bool)\n                    assigned_gt_inds[max_iou_inds] = i + 1\n                elif responsible_flags[gt_argmax_overlaps[i]]:\n                    assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1\n\n        # assign labels of positive anchors\n        assigned_labels = assigned_gt_inds.new_full((num_priors, ), -1)\n        pos_inds = torch.nonzero(\n            assigned_gt_inds > 0, as_tuple=False).squeeze()\n        if pos_inds.numel() > 0:\n            assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -\n                                                  1]\n\n        return AssignResult(\n            num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/hungarian_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Union\n\nimport torch\nfrom mmengine import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@TASK_UTILS.register_module()\nclass HungarianAssigner(BaseAssigner):\n    \"\"\"Computes one-to-one matching between predictions and ground truth.\n\n    This class computes an assignment between the targets and the predictions\n    based on the costs. The costs are weighted sum of some components.\n    For DETR the costs are weighted sum of classification cost, regression L1\n    cost and regression iou cost. The targets don't include the no_object, so\n    generally there are more predictions than targets. After the one-to-one\n    matching, the un-matched are treated as backgrounds. Thus each query\n    prediction will be assigned with `0` or a positive integer indicating the\n    ground truth index:\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        match_costs (:obj:`ConfigDict` or dict or \\\n            List[Union[:obj:`ConfigDict`, dict]]): Match cost configs.\n    \"\"\"\n\n    def __init__(\n        self, match_costs: Union[List[Union[dict, ConfigDict]], dict,\n                                 ConfigDict]\n    ) -> None:\n\n        if isinstance(match_costs, dict):\n            match_costs = [match_costs]\n        elif isinstance(match_costs, list):\n            assert len(match_costs) > 0, \\\n                'match_costs must not be a empty list.'\n\n        self.match_costs = [\n            TASK_UTILS.build(match_cost) for match_cost in match_costs\n        ]\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               img_meta: Optional[dict] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Computes one-to-one matching based on the weighted costs.\n\n        This method assign each query prediction to a ground truth or\n        background. The `assigned_gt_inds` with -1 means don't care,\n        0 means negative sample, and positive number is the index (1-based)\n        of assigned gt.\n        The assignment is done in the following steps, the order matters.\n\n        1. assign every prediction to -1\n        2. compute the weighted costs\n        3. do Hungarian matching on CPU based on the costs\n        4. assign all to 0 (background) first, then for each matched pair\n           between predictions and gts, treat this prediction as foreground\n           and assign the corresponding gt index (plus 1) to it.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places. It may includes ``masks``, with shape\n                (n, h, w) or (n, l).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                ``labels``, with shape (k, ) and ``masks``, with shape\n                (k, h, w) or (k, l).\n            img_meta (dict): Image information.\n\n        Returns:\n            :obj:`AssignResult`: The assigned result.\n        \"\"\"\n        assert isinstance(gt_instances.labels, Tensor)\n        num_gts, num_preds = len(gt_instances), len(pred_instances)\n        gt_labels = gt_instances.labels\n        device = gt_labels.device\n\n        # 1. assign -1 by default\n        assigned_gt_inds = torch.full((num_preds, ),\n                                      -1,\n                                      dtype=torch.long,\n                                      device=device)\n        assigned_labels = torch.full((num_preds, ),\n                                     -1,\n                                     dtype=torch.long,\n                                     device=device)\n\n        if num_gts == 0 or num_preds == 0:\n            # No ground truth or boxes, return empty assignment\n            if num_gts == 0:\n                # No ground truth, assign all to background\n                assigned_gt_inds[:] = 0\n            return AssignResult(\n                num_gts=num_gts,\n                gt_inds=assigned_gt_inds,\n                max_overlaps=None,\n                labels=assigned_labels)\n\n        # 2. compute weighted cost\n        cost_list = []\n        for match_cost in self.match_costs:\n            cost = match_cost(\n                pred_instances=pred_instances,\n                gt_instances=gt_instances,\n                img_meta=img_meta)\n            cost_list.append(cost)\n        cost = torch.stack(cost_list).sum(dim=0)\n\n        # 3. do Hungarian matching on CPU using linear_sum_assignment\n        cost = cost.detach().cpu()\n        if linear_sum_assignment is None:\n            raise ImportError('Please run \"pip install scipy\" '\n                              'to install scipy first.')\n\n        matched_row_inds, matched_col_inds = linear_sum_assignment(cost)\n        matched_row_inds = torch.from_numpy(matched_row_inds).to(device)\n        matched_col_inds = torch.from_numpy(matched_col_inds).to(device)\n\n        # 4. assign backgrounds and foregrounds\n        # assign all indices to backgrounds first\n        assigned_gt_inds[:] = 0\n        # assign foregrounds based on matching results\n        assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n        assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n        return AssignResult(\n            num_gts=num_gts,\n            gt_inds=assigned_gt_inds,\n            max_overlaps=None,\n            labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/iou2d_calculator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import bbox_overlaps, get_box_tensor\n\n\ndef cast_tensor_type(x, scale=1., dtype=None):\n    if dtype == 'fp16':\n        # scale is for preventing overflows\n        x = (x / scale).half()\n    return x\n\n\n@TASK_UTILS.register_module()\nclass BboxOverlaps2D:\n    \"\"\"2D Overlaps (e.g. IoUs, GIoUs) Calculator.\"\"\"\n\n    def __init__(self, scale=1., dtype=None):\n        self.scale = scale\n        self.dtype = dtype\n\n    def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):\n        \"\"\"Calculate IoU between 2D bboxes.\n\n        Args:\n            bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n                in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,\n                y2, score> format.\n            bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n                in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2,\n                score> format, or be empty. If ``is_aligned `` is ``True``,\n                then m and n must be equal.\n            mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n                over foreground), or \"giou\" (generalized intersection over\n                union).\n            is_aligned (bool, optional): If True, then m and n must be equal.\n                Default False.\n\n        Returns:\n            Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n        \"\"\"\n        bboxes1 = get_box_tensor(bboxes1)\n        bboxes2 = get_box_tensor(bboxes2)\n        assert bboxes1.size(-1) in [0, 4, 5]\n        assert bboxes2.size(-1) in [0, 4, 5]\n        if bboxes2.size(-1) == 5:\n            bboxes2 = bboxes2[..., :4]\n        if bboxes1.size(-1) == 5:\n            bboxes1 = bboxes1[..., :4]\n\n        if self.dtype == 'fp16':\n            # change tensor type to save cpu and cuda memory and keep speed\n            bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)\n            bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)\n            overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n            if not overlaps.is_cuda and overlaps.dtype == torch.float16:\n                # resume cpu float32\n                overlaps = overlaps.float()\n            return overlaps\n\n        return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n\n    def __repr__(self):\n        \"\"\"str: a string describing the module\"\"\"\n        repr_str = self.__class__.__name__ + f'(' \\\n            f'scale={self.scale}, dtype={self.dtype})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/match_cost.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import abstractmethod\nfrom typing import Optional, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import bbox_overlaps, bbox_xyxy_to_cxcywh\n\n\nclass BaseMatchCost:\n    \"\"\"Base match cost class.\n\n    Args:\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n    \"\"\"\n\n    def __init__(self, weight: Union[float, int] = 1.) -> None:\n        self.weight = weight\n\n    @abstractmethod\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs) -> Tensor:\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            img_meta (dict, optional): Image information.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        pass\n\n\n@TASK_UTILS.register_module()\nclass BBoxL1Cost(BaseMatchCost):\n    \"\"\"BBoxL1Cost.\n\n    Note: ``bboxes`` in ``InstanceData`` passed in is of format 'xyxy'\n    and its coordinates are unnormalized.\n\n    Args:\n        box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN.\n            Defaults to 'xyxy'.\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n\n    Examples:\n        >>> from mmdet.models.task_modules.assigners.\n        ... match_costs.match_cost import BBoxL1Cost\n        >>> import torch\n        >>> self = BBoxL1Cost()\n        >>> bbox_pred = torch.rand(1, 4)\n        >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])\n        >>> factor = torch.tensor([10, 8, 10, 8])\n        >>> self(bbox_pred, gt_bboxes, factor)\n        tensor([[1.6172, 1.6422]])\n    \"\"\"\n\n    def __init__(self,\n                 box_format: str = 'xyxy',\n                 weight: Union[float, int] = 1.) -> None:\n        super().__init__(weight=weight)\n        assert box_format in ['xyxy', 'xywh']\n        self.box_format = box_format\n\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs) -> Tensor:\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): ``bboxes`` inside is\n                predicted boxes with unnormalized coordinate\n                (x, y, x, y).\n            gt_instances (:obj:`InstanceData`): ``bboxes`` inside is gt\n                bboxes with unnormalized coordinate (x, y, x, y).\n            img_meta (Optional[dict]): Image information. Defaults to None.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        pred_bboxes = pred_instances.bboxes\n        gt_bboxes = gt_instances.bboxes\n\n        # convert box format\n        if self.box_format == 'xywh':\n            gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)\n            pred_bboxes = bbox_xyxy_to_cxcywh(pred_bboxes)\n\n        # normalized\n        img_h, img_w = img_meta['img_shape']\n        factor = gt_bboxes.new_tensor([img_w, img_h, img_w,\n                                       img_h]).unsqueeze(0)\n        gt_bboxes = gt_bboxes / factor\n        pred_bboxes = pred_bboxes / factor\n\n        bbox_cost = torch.cdist(pred_bboxes, gt_bboxes, p=1)\n        return bbox_cost * self.weight\n\n\n@TASK_UTILS.register_module()\nclass IoUCost(BaseMatchCost):\n    \"\"\"IoUCost.\n\n    Note: ``bboxes`` in ``InstanceData`` passed in is of format 'xyxy'\n    and its coordinates are unnormalized.\n\n    Args:\n        iou_mode (str): iou mode such as 'iou', 'giou'. Defaults to 'giou'.\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n\n    Examples:\n        >>> from mmdet.models.task_modules.assigners.\n        ... match_costs.match_cost import IoUCost\n        >>> import torch\n        >>> self = IoUCost()\n        >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])\n        >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])\n        >>> self(bboxes, gt_bboxes)\n        tensor([[-0.1250,  0.1667],\n            [ 0.1667, -0.5000]])\n    \"\"\"\n\n    def __init__(self, iou_mode: str = 'giou', weight: Union[float, int] = 1.):\n        super().__init__(weight=weight)\n        self.iou_mode = iou_mode\n\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs):\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): ``bboxes`` inside is\n                predicted boxes with unnormalized coordinate\n                (x, y, x, y).\n            gt_instances (:obj:`InstanceData`): ``bboxes`` inside is gt\n                bboxes with unnormalized coordinate (x, y, x, y).\n            img_meta (Optional[dict]): Image information. Defaults to None.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        pred_bboxes = pred_instances.bboxes\n        gt_bboxes = gt_instances.bboxes\n\n        overlaps = bbox_overlaps(\n            pred_bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)\n        # The 1 is a constant that doesn't change the matching, so omitted.\n        iou_cost = -overlaps\n        return iou_cost * self.weight\n\n\n@TASK_UTILS.register_module()\nclass ClassificationCost(BaseMatchCost):\n    \"\"\"ClsSoftmaxCost.\n\n    Args:\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n\n    Examples:\n        >>> from mmdet.models.task_modules.assigners.\n        ...  match_costs.match_cost import ClassificationCost\n        >>> import torch\n        >>> self = ClassificationCost()\n        >>> cls_pred = torch.rand(4, 3)\n        >>> gt_labels = torch.tensor([0, 1, 2])\n        >>> factor = torch.tensor([10, 8, 10, 8])\n        >>> self(cls_pred, gt_labels)\n        tensor([[-0.3430, -0.3525, -0.3045],\n            [-0.3077, -0.2931, -0.3992],\n            [-0.3664, -0.3455, -0.2881],\n            [-0.3343, -0.2701, -0.3956]])\n    \"\"\"\n\n    def __init__(self, weight: Union[float, int] = 1) -> None:\n        super().__init__(weight=weight)\n\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs) -> Tensor:\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): ``scores`` inside is\n                predicted classification logits, of shape\n                (num_queries, num_class).\n            gt_instances (:obj:`InstanceData`): ``labels`` inside should have\n                shape (num_gt, ).\n            img_meta (Optional[dict]): _description_. Defaults to None.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        pred_scores = pred_instances.scores\n        gt_labels = gt_instances.labels\n\n        pred_scores = pred_scores.softmax(-1)\n        cls_cost = -pred_scores[:, gt_labels]\n\n        return cls_cost * self.weight\n\n\n@TASK_UTILS.register_module()\nclass FocalLossCost(BaseMatchCost):\n    \"\"\"FocalLossCost.\n\n    Args:\n        alpha (Union[float, int]): focal_loss alpha. Defaults to 0.25.\n        gamma (Union[float, int]): focal_loss gamma. Defaults to 2.\n        eps (float): Defaults to 1e-12.\n        binary_input (bool): Whether the input is binary. Currently,\n            binary_input = True is for masks input, binary_input = False\n            is for label input. Defaults to False.\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 alpha: Union[float, int] = 0.25,\n                 gamma: Union[float, int] = 2,\n                 eps: float = 1e-12,\n                 binary_input: bool = False,\n                 weight: Union[float, int] = 1.) -> None:\n        super().__init__(weight=weight)\n        self.alpha = alpha\n        self.gamma = gamma\n        self.eps = eps\n        self.binary_input = binary_input\n\n    def _focal_loss_cost(self, cls_pred: Tensor, gt_labels: Tensor) -> Tensor:\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classification logits, shape\n                (num_queries, num_class).\n            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n        Returns:\n            torch.Tensor: cls_cost value with weight\n        \"\"\"\n        cls_pred = cls_pred.sigmoid()\n        neg_cost = -(1 - cls_pred + self.eps).log() * (\n            1 - self.alpha) * cls_pred.pow(self.gamma)\n        pos_cost = -(cls_pred + self.eps).log() * self.alpha * (\n            1 - cls_pred).pow(self.gamma)\n\n        cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]\n        return cls_cost * self.weight\n\n    def _mask_focal_loss_cost(self, cls_pred, gt_labels) -> Tensor:\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classification logits.\n                in shape (num_queries, d1, ..., dn), dtype=torch.float32.\n            gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn),\n                dtype=torch.long. Labels should be binary.\n\n        Returns:\n            Tensor: Focal cost matrix with weight in shape\\\n                (num_queries, num_gt).\n        \"\"\"\n        cls_pred = cls_pred.flatten(1)\n        gt_labels = gt_labels.flatten(1).float()\n        n = cls_pred.shape[1]\n        cls_pred = cls_pred.sigmoid()\n        neg_cost = -(1 - cls_pred + self.eps).log() * (\n            1 - self.alpha) * cls_pred.pow(self.gamma)\n        pos_cost = -(cls_pred + self.eps).log() * self.alpha * (\n            1 - cls_pred).pow(self.gamma)\n\n        cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \\\n            torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels))\n        return cls_cost / n * self.weight\n\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs) -> Tensor:\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Predicted instances which\n                must contain ``scores`` or ``masks``.\n            gt_instances (:obj:`InstanceData`): Ground truth which must contain\n                ``labels`` or ``mask``.\n            img_meta (Optional[dict]): Image information. Defaults to None.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        if self.binary_input:\n            pred_masks = pred_instances.masks\n            gt_masks = gt_instances.masks\n            return self._mask_focal_loss_cost(pred_masks, gt_masks)\n        else:\n            pred_scores = pred_instances.scores\n            gt_labels = gt_instances.labels\n            return self._focal_loss_cost(pred_scores, gt_labels)\n\n\n@TASK_UTILS.register_module()\nclass DiceCost(BaseMatchCost):\n    \"\"\"Cost of mask assignments based on dice losses.\n\n    Args:\n        pred_act (bool): Whether to apply sigmoid to mask_pred.\n            Defaults to False.\n        eps (float): Defaults to 1e-3.\n        naive_dice (bool): If True, use the naive dice loss\n            in which the power of the number in the denominator is\n            the first power. If False, use the second power that\n            is adopted by K-Net and SOLO. Defaults to True.\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 pred_act: bool = False,\n                 eps: float = 1e-3,\n                 naive_dice: bool = True,\n                 weight: Union[float, int] = 1.) -> None:\n        super().__init__(weight=weight)\n        self.pred_act = pred_act\n        self.eps = eps\n        self.naive_dice = naive_dice\n\n    def _binary_mask_dice_loss(self, mask_preds: Tensor,\n                               gt_masks: Tensor) -> Tensor:\n        \"\"\"\n        Args:\n            mask_preds (Tensor): Mask prediction in shape (num_queries, *).\n            gt_masks (Tensor): Ground truth in shape (num_gt, *)\n                store 0 or 1, 0 for negative class and 1 for\n                positive class.\n\n        Returns:\n            Tensor: Dice cost matrix in shape (num_queries, num_gt).\n        \"\"\"\n        mask_preds = mask_preds.flatten(1)\n        gt_masks = gt_masks.flatten(1).float()\n        numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks)\n        if self.naive_dice:\n            denominator = mask_preds.sum(-1)[:, None] + \\\n                gt_masks.sum(-1)[None, :]\n        else:\n            denominator = mask_preds.pow(2).sum(1)[:, None] + \\\n                gt_masks.pow(2).sum(1)[None, :]\n        loss = 1 - (numerator + self.eps) / (denominator + self.eps)\n        return loss\n\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs) -> Tensor:\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Predicted instances which\n                must contain ``masks``.\n            gt_instances (:obj:`InstanceData`): Ground truth which must contain\n                ``mask``.\n            img_meta (Optional[dict]): Image information. Defaults to None.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        pred_masks = pred_instances.masks\n        gt_masks = gt_instances.masks\n\n        if self.pred_act:\n            pred_masks = pred_masks.sigmoid()\n        dice_cost = self._binary_mask_dice_loss(pred_masks, gt_masks)\n        return dice_cost * self.weight\n\n\n@TASK_UTILS.register_module()\nclass CrossEntropyLossCost(BaseMatchCost):\n    \"\"\"CrossEntropyLossCost.\n\n    Args:\n        use_sigmoid (bool): Whether the prediction uses sigmoid\n                of softmax. Defaults to True.\n        weight (Union[float, int]): Cost weight. Defaults to 1.\n    \"\"\"\n\n    def __init__(self,\n                 use_sigmoid: bool = True,\n                 weight: Union[float, int] = 1.) -> None:\n        super().__init__(weight=weight)\n        self.use_sigmoid = use_sigmoid\n\n    def _binary_cross_entropy(self, cls_pred: Tensor,\n                              gt_labels: Tensor) -> Tensor:\n        \"\"\"\n        Args:\n            cls_pred (Tensor): The prediction with shape (num_queries, 1, *) or\n                (num_queries, *).\n            gt_labels (Tensor): The learning label of prediction with\n                shape (num_gt, *).\n\n        Returns:\n            Tensor: Cross entropy cost matrix in shape (num_queries, num_gt).\n        \"\"\"\n        cls_pred = cls_pred.flatten(1).float()\n        gt_labels = gt_labels.flatten(1).float()\n        n = cls_pred.shape[1]\n        pos = F.binary_cross_entropy_with_logits(\n            cls_pred, torch.ones_like(cls_pred), reduction='none')\n        neg = F.binary_cross_entropy_with_logits(\n            cls_pred, torch.zeros_like(cls_pred), reduction='none')\n        cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \\\n            torch.einsum('nc,mc->nm', neg, 1 - gt_labels)\n        cls_cost = cls_cost / n\n\n        return cls_cost\n\n    def __call__(self,\n                 pred_instances: InstanceData,\n                 gt_instances: InstanceData,\n                 img_meta: Optional[dict] = None,\n                 **kwargs) -> Tensor:\n        \"\"\"Compute match cost.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Predicted instances which\n                must contain ``scores`` or ``masks``.\n            gt_instances (:obj:`InstanceData`): Ground truth which must contain\n                ``labels`` or ``masks``.\n            img_meta (Optional[dict]): Image information. Defaults to None.\n\n        Returns:\n            Tensor: Match Cost matrix of shape (num_preds, num_gts).\n        \"\"\"\n        pred_masks = pred_instances.masks\n        gt_masks = gt_instances.masks\n        if self.use_sigmoid:\n            cls_cost = self._binary_cross_entropy(pred_masks, gt_masks)\n        else:\n            raise NotImplementedError\n\n        return cls_cost * self.weight\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Union\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@TASK_UTILS.register_module()\nclass MaxIoUAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, or a semi-positive integer\n    indicating the ground truth index.\n\n    - -1: negative sample, no assigned gt\n    - semi-positive integer: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n            `min_pos_iou` is set to avoid assigning bboxes that have extremely\n            small iou with GT as positive samples. It brings about 0.3 mAP\n            improvements in 1x schedule but does not affect the performance of\n            3x schedule. More comparisons can be found in\n            `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_.\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if\n            `gt_bboxes_ignore` is specified). Negative values mean not\n            ignoring any bboxes.\n        ignore_wrt_candidates (bool): Whether to compute the iof between\n            `bboxes` and `gt_bboxes_ignore`, or the contrary.\n        match_low_quality (bool): Whether to allow low quality matches. This is\n            usually allowed for RPN and single stage detectors, but not allowed\n            in the second stage. Details are demonstrated in Step 4.\n        gpu_assign_thr (int): The upper bound of the number of GT for GPU\n            assign. When the number of gt is above this threshold, will assign\n            on CPU device. Negative values mean not assign on CPU.\n        iou_calculator (dict): Config of overlaps Calculator.\n    \"\"\"\n\n    def __init__(self,\n                 pos_iou_thr: float,\n                 neg_iou_thr: Union[float, tuple],\n                 min_pos_iou: float = .0,\n                 gt_max_assign_all: bool = True,\n                 ignore_iof_thr: float = -1,\n                 ignore_wrt_candidates: bool = True,\n                 match_low_quality: bool = True,\n                 gpu_assign_thr: float = -1,\n                 iou_calculator: dict = dict(type='BboxOverlaps2D')):\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.ignore_iof_thr = ignore_iof_thr\n        self.ignore_wrt_candidates = ignore_wrt_candidates\n        self.gpu_assign_thr = gpu_assign_thr\n        self.match_low_quality = match_low_quality\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to bboxes.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, or a semi-positive number. -1 means negative\n        sample, semi-positive number is the index (0-based) of assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to the background\n        2. assign proposals whose iou with all gts < neg_iou_thr to 0\n        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,\n           assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals (may be more than\n           one) to itself\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n\n        Example:\n            >>> from mmengine.structures import InstanceData\n            >>> self = MaxIoUAssigner(0.5, 0.5)\n            >>> pred_instances = InstanceData()\n            >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10],\n            ...                                      [10, 10, 20, 20]])\n            >>> gt_instances = InstanceData()\n            >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]])\n            >>> gt_instances.labels = torch.Tensor([0])\n            >>> assign_result = self.assign(pred_instances, gt_instances)\n            >>> expected_gt_inds = torch.LongTensor([1, 0])\n            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        gt_labels = gt_instances.labels\n        if gt_instances_ignore is not None:\n            gt_bboxes_ignore = gt_instances_ignore.bboxes\n        else:\n            gt_bboxes_ignore = None\n\n        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n            gt_bboxes.shape[0] > self.gpu_assign_thr) else False\n        # compute overlap and assign gt on CPU when number of GT is large\n        if assign_on_cpu:\n            device = priors.device\n            priors = priors.cpu()\n            gt_bboxes = gt_bboxes.cpu()\n            gt_labels = gt_labels.cpu()\n            if gt_bboxes_ignore is not None:\n                gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n\n        overlaps = self.iou_calculator(gt_bboxes, priors)\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0):\n            if self.ignore_wrt_candidates:\n                ignore_overlaps = self.iou_calculator(\n                    priors, gt_bboxes_ignore, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            else:\n                ignore_overlaps = self.iou_calculator(\n                    gt_bboxes_ignore, priors, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)\n            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1\n\n        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n        if assign_on_cpu:\n            assign_result.gt_inds = assign_result.gt_inds.to(device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n            if assign_result.labels is not None:\n                assign_result.labels = assign_result.labels.to(device)\n        return assign_result\n\n    def assign_wrt_overlaps(self, overlaps: Tensor,\n                            gt_labels: Tensor) -> AssignResult:\n        \"\"\"Assign w.r.t. the overlaps of priors with gts.\n\n        Args:\n            overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,\n                shape(k, n).\n            gt_labels (Tensor): Labels of k gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n                                             -1,\n                                             dtype=torch.long)\n\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = overlaps.new_zeros((num_bboxes, ))\n            assigned_labels = overlaps.new_full((num_bboxes, ),\n                                                -1,\n                                                dtype=torch.long)\n            if num_gts == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            return AssignResult(\n                num_gts=num_gts,\n                gt_inds=assigned_gt_inds,\n                max_overlaps=max_overlaps,\n                labels=assigned_labels)\n\n        # for each anchor, which gt best overlaps with it\n        # for each anchor, the max iou of all gts\n        max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n        # for each gt, which anchor best overlaps with it\n        # for each gt, the max iou of all proposals\n        gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)\n\n        # 2. assign negative: below\n        # the negative inds are set to be 0\n        if isinstance(self.neg_iou_thr, float):\n            assigned_gt_inds[(max_overlaps >= 0)\n                             & (max_overlaps < self.neg_iou_thr)] = 0\n        elif isinstance(self.neg_iou_thr, tuple):\n            assert len(self.neg_iou_thr) == 2\n            assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])\n                             & (max_overlaps < self.neg_iou_thr[1])] = 0\n\n        # 3. assign positive: above positive IoU threshold\n        pos_inds = max_overlaps >= self.pos_iou_thr\n        assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1\n\n        if self.match_low_quality:\n            # Low-quality matching will overwrite the assigned_gt_inds assigned\n            # in Step 3. Thus, the assigned gt might not be the best one for\n            # prediction.\n            # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,\n            # bbox 1 will be assigned as the best target for bbox A in step 3.\n            # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's\n            # assigned_gt_inds will be overwritten to be bbox 2.\n            # This might be the reason that it is not used in ROI Heads.\n            for i in range(num_gts):\n                if gt_max_overlaps[i] >= self.min_pos_iou:\n                    if self.gt_max_assign_all:\n                        max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]\n                        assigned_gt_inds[max_iou_inds] = i + 1\n                    else:\n                        assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1\n\n        assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n        pos_inds = torch.nonzero(\n            assigned_gt_inds > 0, as_tuple=False).squeeze()\n        if pos_inds.numel() > 0:\n            assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -\n                                                  1]\n\n        return AssignResult(\n            num_gts=num_gts,\n            gt_inds=assigned_gt_inds,\n            max_overlaps=max_overlaps,\n            labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/multi_instance_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom .assign_result import AssignResult\nfrom .max_iou_assigner import MaxIoUAssigner\n\n\n@TASK_UTILS.register_module()\nclass MultiInstanceAssigner(MaxIoUAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each proposal bbox. If\n    we need to use a proposal box to generate multiple predict boxes,\n    `MultiInstanceAssigner` can assign multiple gt to each proposal box.\n\n    Args:\n        num_instance (int): How many bboxes are predicted by each proposal box.\n    \"\"\"\n\n    def __init__(self, num_instance: int = 2, **kwargs):\n        super().__init__(**kwargs)\n        self.num_instance = num_instance\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to bboxes.\n\n        This method assign gt bboxes to every bbox (proposal/anchor), each bbox\n        is assigned a set of gts, and the number of gts in this set is defined\n        by `self.num_instance`.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        # Set the FG label to 1 and add ignored annotations\n        gt_labels = gt_instances.labels + 1\n        if gt_instances_ignore is not None:\n            gt_bboxes_ignore = gt_instances_ignore.bboxes\n            if hasattr(gt_instances_ignore, 'labels'):\n                gt_labels_ignore = gt_instances_ignore.labels\n            else:\n                gt_labels_ignore = torch.ones_like(gt_bboxes_ignore)[:, 0] * -1\n        else:\n            gt_bboxes_ignore = None\n            gt_labels_ignore = None\n\n        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n            gt_bboxes.shape[0] > self.gpu_assign_thr) else False\n        # compute overlap and assign gt on CPU when number of GT is large\n        if assign_on_cpu:\n            device = priors.device\n            priors = priors.cpu()\n            gt_bboxes = gt_bboxes.cpu()\n            gt_labels = gt_labels.cpu()\n            if gt_bboxes_ignore is not None:\n                gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n                gt_labels_ignore = gt_labels_ignore.cpu()\n\n        if gt_bboxes_ignore is not None:\n            all_bboxes = torch.cat([gt_bboxes, gt_bboxes_ignore], dim=0)\n            all_labels = torch.cat([gt_labels, gt_labels_ignore], dim=0)\n        else:\n            all_bboxes = gt_bboxes\n            all_labels = gt_labels\n        all_priors = torch.cat([priors, all_bboxes], dim=0)\n\n        overlaps_normal = self.iou_calculator(\n            all_priors, all_bboxes, mode='iou')\n        overlaps_ignore = self.iou_calculator(\n            all_priors, all_bboxes, mode='iof')\n        gt_ignore_mask = all_labels.eq(-1).repeat(all_priors.shape[0], 1)\n        overlaps_normal = overlaps_normal * ~gt_ignore_mask\n        overlaps_ignore = overlaps_ignore * gt_ignore_mask\n\n        overlaps_normal, overlaps_normal_indices = overlaps_normal.sort(\n            descending=True, dim=1)\n        overlaps_ignore, overlaps_ignore_indices = overlaps_ignore.sort(\n            descending=True, dim=1)\n\n        # select the roi with the higher score\n        max_overlaps_normal = overlaps_normal[:, :self.num_instance].flatten()\n        gt_assignment_normal = overlaps_normal_indices[:, :self.\n                                                       num_instance].flatten()\n        max_overlaps_ignore = overlaps_ignore[:, :self.num_instance].flatten()\n        gt_assignment_ignore = overlaps_ignore_indices[:, :self.\n                                                       num_instance].flatten()\n\n        # ignore or not\n        ignore_assign_mask = (max_overlaps_normal < self.pos_iou_thr) * (\n            max_overlaps_ignore > max_overlaps_normal)\n        overlaps = (max_overlaps_normal * ~ignore_assign_mask) + (\n            max_overlaps_ignore * ignore_assign_mask)\n        gt_assignment = (gt_assignment_normal * ~ignore_assign_mask) + (\n            gt_assignment_ignore * ignore_assign_mask)\n\n        assigned_labels = all_labels[gt_assignment]\n        fg_mask = (overlaps >= self.pos_iou_thr) * (assigned_labels != -1)\n        bg_mask = (overlaps < self.neg_iou_thr) * (overlaps >= 0)\n        assigned_labels[fg_mask] = 1\n        assigned_labels[bg_mask] = 0\n\n        overlaps = overlaps.reshape(-1, self.num_instance)\n        gt_assignment = gt_assignment.reshape(-1, self.num_instance)\n        assigned_labels = assigned_labels.reshape(-1, self.num_instance)\n\n        assign_result = AssignResult(\n            num_gts=all_bboxes.size(0),\n            gt_inds=gt_assignment,\n            max_overlaps=overlaps,\n            labels=assigned_labels)\n\n        if assign_on_cpu:\n            assign_result.gt_inds = assign_result.gt_inds.to(device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n            if assign_result.labels is not None:\n                assign_result.labels = assign_result.labels.to(device)\n        return assign_result\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/point_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@TASK_UTILS.register_module()\nclass PointAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each point.\n\n    Each proposals will be assigned with `0`, or a positive integer\n    indicating the ground truth index.\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n    \"\"\"\n\n    def __init__(self, scale: int = 4, pos_num: int = 3) -> None:\n        self.scale = scale\n        self.pos_num = pos_num\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to points.\n\n        This method assign a gt bbox to every points set, each points set\n        will be assigned with  the background_label (-1), or a label number.\n        -1 is background, and semi-positive number is the index (0-based) of\n        assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every points to the background_label (-1)\n        2. A point is assigned to some gt bbox if\n            (i) the point is within the k closest points to the gt bbox\n            (ii) the distance between this point and the gt is smaller than\n                other gt bboxes\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n\n\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        # points to be assigned, shape(n, 3) while last\n        # dimension stands for (x, y, stride).\n        points = pred_instances.priors\n\n        num_points = points.shape[0]\n        num_gts = gt_bboxes.shape[0]\n\n        if num_gts == 0 or num_points == 0:\n            # If no truth assign everything to the background\n            assigned_gt_inds = points.new_full((num_points, ),\n                                               0,\n                                               dtype=torch.long)\n            assigned_labels = points.new_full((num_points, ),\n                                              -1,\n                                              dtype=torch.long)\n            return AssignResult(\n                num_gts=num_gts,\n                gt_inds=assigned_gt_inds,\n                max_overlaps=None,\n                labels=assigned_labels)\n\n        points_xy = points[:, :2]\n        points_stride = points[:, 2]\n        points_lvl = torch.log2(\n            points_stride).int()  # [3...,4...,5...,6...,7...]\n        lvl_min, lvl_max = points_lvl.min(), points_lvl.max()\n\n        # assign gt box\n        gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2\n        gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)\n        scale = self.scale\n        gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +\n                          torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()\n        gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)\n\n        # stores the assigned gt index of each point\n        assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)\n        # stores the assigned gt dist (to this point) of each point\n        assigned_gt_dist = points.new_full((num_points, ), float('inf'))\n        points_range = torch.arange(points.shape[0])\n\n        for idx in range(num_gts):\n            gt_lvl = gt_bboxes_lvl[idx]\n            # get the index of points in this level\n            lvl_idx = gt_lvl == points_lvl\n            points_index = points_range[lvl_idx]\n            # get the points in this level\n            lvl_points = points_xy[lvl_idx, :]\n            # get the center point of gt\n            gt_point = gt_bboxes_xy[[idx], :]\n            # get width and height of gt\n            gt_wh = gt_bboxes_wh[[idx], :]\n            # compute the distance between gt center and\n            #   all points in this level\n            points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)\n            # find the nearest k points to gt center in this level\n            min_dist, min_dist_index = torch.topk(\n                points_gt_dist, self.pos_num, largest=False)\n            # the index of nearest k points to gt center in this level\n            min_dist_points_index = points_index[min_dist_index]\n            # The less_than_recorded_index stores the index\n            #   of min_dist that is less then the assigned_gt_dist. Where\n            #   assigned_gt_dist stores the dist from previous assigned gt\n            #   (if exist) to each point.\n            less_than_recorded_index = min_dist < assigned_gt_dist[\n                min_dist_points_index]\n            # The min_dist_points_index stores the index of points satisfy:\n            #   (1) it is k nearest to current gt center in this level.\n            #   (2) it is closer to current gt center than other gt center.\n            min_dist_points_index = min_dist_points_index[\n                less_than_recorded_index]\n            # assign the result\n            assigned_gt_inds[min_dist_points_index] = idx + 1\n            assigned_gt_dist[min_dist_points_index] = min_dist[\n                less_than_recorded_index]\n\n        assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)\n        pos_inds = torch.nonzero(\n            assigned_gt_inds > 0, as_tuple=False).squeeze()\n        if pos_inds.numel() > 0:\n            assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -\n                                                  1]\n\n        return AssignResult(\n            num_gts=num_gts,\n            gt_inds=assigned_gt_inds,\n            max_overlaps=None,\n            labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/region_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom ..prior_generators import anchor_inside_flags\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef calc_region(\n        bbox: Tensor,\n        ratio: float,\n        stride: int,\n        featmap_size: Optional[Tuple[int, int]] = None) -> Tuple[Tensor]:\n    \"\"\"Calculate region of the box defined by the ratio, the ratio is from the\n    center of the box to every edge.\"\"\"\n    # project bbox on the feature\n    f_bbox = bbox / stride\n    x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])\n    y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])\n    x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])\n    y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])\n    if featmap_size is not None:\n        x1 = x1.clamp(min=0, max=featmap_size[1])\n        y1 = y1.clamp(min=0, max=featmap_size[0])\n        x2 = x2.clamp(min=0, max=featmap_size[1])\n        y2 = y2.clamp(min=0, max=featmap_size[0])\n    return (x1, y1, x2, y2)\n\n\ndef anchor_ctr_inside_region_flags(anchors: Tensor, stride: int,\n                                   region: Tuple[Tensor]) -> Tensor:\n    \"\"\"Get the flag indicate whether anchor centers are inside regions.\"\"\"\n    x1, y1, x2, y2 = region\n    f_anchors = anchors / stride\n    x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5\n    y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5\n    flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)\n    return flags\n\n\n@TASK_UTILS.register_module()\nclass RegionAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, `0`, or a positive integer\n    indicating the ground truth index.\n\n    - -1: don't care\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        center_ratio (float): ratio of the region in the center of the bbox to\n            define positive sample.\n        ignore_ratio (float): ratio of the region to define ignore samples.\n    \"\"\"\n\n    def __init__(self,\n                 center_ratio: float = 0.2,\n                 ignore_ratio: float = 0.5) -> None:\n        self.center_ratio = center_ratio\n        self.ignore_ratio = ignore_ratio\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               img_meta: dict,\n               featmap_sizes: List[Tuple[int, int]],\n               num_level_anchors: List[int],\n               anchor_scale: int,\n               anchor_strides: List[int],\n               gt_instances_ignore: Optional[InstanceData] = None,\n               allowed_border: int = 0) -> AssignResult:\n        \"\"\"Assign gt to anchors.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, 0, or a positive number. -1 means don't care,\n        0 means negative sample, positive number is the index (1-based) of\n        assigned gt.\n\n        The assignment is done in following steps, and the order matters.\n\n        1. Assign every anchor to 0 (negative)\n        2. (For each gt_bboxes) Compute ignore flags based on ignore_region\n           then assign -1 to anchors w.r.t. ignore flags\n        3. (For each gt_bboxes) Compute pos flags based on center_region then\n           assign gt_bboxes to anchors w.r.t. pos flags\n        4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor\n           level then assign -1 to anchors w.r.t. ignore flags\n        5. Assign anchor outside of image to -1\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            img_meta (dict): Meta info of image.\n            featmap_sizes (list[tuple[int, int]]): Feature map size each level.\n            num_level_anchors (list[int]): The number of anchors in each level.\n            anchor_scale (int): Scale of the anchor.\n            anchor_strides (list[int]): Stride of the anchor.\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n            allowed_border (int, optional): The border to allow the valid\n                anchor. Defaults to 0.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        if gt_instances_ignore is not None:\n            raise NotImplementedError\n\n        num_gts = len(gt_instances)\n        num_bboxes = len(pred_instances)\n\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        flat_anchors = pred_instances.priors\n        flat_valid_flags = pred_instances.valid_flags\n        mlvl_anchors = torch.split(flat_anchors, num_level_anchors)\n\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))\n            assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),\n                                                   dtype=torch.long)\n            assigned_labels = gt_bboxes.new_full((num_bboxes, ),\n                                                 -1,\n                                                 dtype=torch.long)\n            return AssignResult(\n                num_gts=num_gts,\n                gt_inds=assigned_gt_inds,\n                max_overlaps=max_overlaps,\n                labels=assigned_labels)\n\n        num_lvls = len(mlvl_anchors)\n        r1 = (1 - self.center_ratio) / 2\n        r2 = (1 - self.ignore_ratio) / 2\n\n        scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                           (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n        min_anchor_size = scale.new_full(\n            (1, ), float(anchor_scale * anchor_strides[0]))\n        target_lvls = torch.floor(\n            torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)\n        target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()\n\n        # 1. assign 0 (negative) by default\n        mlvl_assigned_gt_inds = []\n        mlvl_ignore_flags = []\n        for lvl in range(num_lvls):\n            assigned_gt_inds = gt_bboxes.new_full((num_level_anchors[lvl], ),\n                                                  0,\n                                                  dtype=torch.long)\n            ignore_flags = torch.zeros_like(assigned_gt_inds)\n            mlvl_assigned_gt_inds.append(assigned_gt_inds)\n            mlvl_ignore_flags.append(ignore_flags)\n\n        for gt_id in range(num_gts):\n            lvl = target_lvls[gt_id].item()\n            featmap_size = featmap_sizes[lvl]\n            stride = anchor_strides[lvl]\n            anchors = mlvl_anchors[lvl]\n            gt_bbox = gt_bboxes[gt_id, :4]\n\n            # Compute regions\n            ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)\n            ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)\n\n            # 2. Assign -1 to ignore flags\n            ignore_flags = anchor_ctr_inside_region_flags(\n                anchors, stride, ignore_region)\n            mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n            # 3. Assign gt_bboxes to pos flags\n            pos_flags = anchor_ctr_inside_region_flags(anchors, stride,\n                                                       ctr_region)\n            mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1\n\n            # 4. Assign -1 to ignore adjacent lvl\n            if lvl > 0:\n                d_lvl = lvl - 1\n                d_anchors = mlvl_anchors[d_lvl]\n                d_featmap_size = featmap_sizes[d_lvl]\n                d_stride = anchor_strides[d_lvl]\n                d_ignore_region = calc_region(gt_bbox, r2, d_stride,\n                                              d_featmap_size)\n                ignore_flags = anchor_ctr_inside_region_flags(\n                    d_anchors, d_stride, d_ignore_region)\n                mlvl_ignore_flags[d_lvl][ignore_flags] = 1\n            if lvl < num_lvls - 1:\n                u_lvl = lvl + 1\n                u_anchors = mlvl_anchors[u_lvl]\n                u_featmap_size = featmap_sizes[u_lvl]\n                u_stride = anchor_strides[u_lvl]\n                u_ignore_region = calc_region(gt_bbox, r2, u_stride,\n                                              u_featmap_size)\n                ignore_flags = anchor_ctr_inside_region_flags(\n                    u_anchors, u_stride, u_ignore_region)\n                mlvl_ignore_flags[u_lvl][ignore_flags] = 1\n\n        # 4. (cont.) Assign -1 to ignore adjacent lvl\n        for lvl in range(num_lvls):\n            ignore_flags = mlvl_ignore_flags[lvl]\n            mlvl_assigned_gt_inds[lvl][ignore_flags == 1] = -1\n\n        # 5. Assign -1 to anchor outside of image\n        flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)\n        assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==\n                flat_valid_flags.shape[0])\n        inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,\n                                           img_meta['img_shape'],\n                                           allowed_border)\n        outside_flags = ~inside_flags\n        flat_assigned_gt_inds[outside_flags] = -1\n\n        assigned_labels = torch.zeros_like(flat_assigned_gt_inds)\n        pos_flags = flat_assigned_gt_inds > 0\n        assigned_labels[pos_flags] = gt_labels[flat_assigned_gt_inds[pos_flags]\n                                               - 1]\n\n        return AssignResult(\n            num_gts=num_gts,\n            gt_inds=flat_assigned_gt_inds,\n            max_overlaps=None,\n            labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/sim_ota_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\nINF = 100000.0\nEPS = 1.0e-7\n\n\n@TASK_UTILS.register_module()\nclass SimOTAAssigner(BaseAssigner):\n    \"\"\"Computes matching between predictions and ground truth.\n\n    Args:\n        center_radius (float): Ground truth center size\n            to judge whether a prior is in center. Defaults to 2.5.\n        candidate_topk (int): The candidate top-k which used to\n            get top-k ious to calculate dynamic-k. Defaults to 10.\n        iou_weight (float): The scale factor for regression\n            iou cost. Defaults to 3.0.\n        cls_weight (float): The scale factor for classification\n            cost. Defaults to 1.0.\n        iou_calculator (ConfigType): Config of overlaps Calculator.\n            Defaults to dict(type='BboxOverlaps2D').\n    \"\"\"\n\n    def __init__(self,\n                 center_radius: float = 2.5,\n                 candidate_topk: int = 10,\n                 iou_weight: float = 3.0,\n                 cls_weight: float = 1.0,\n                 iou_calculator: ConfigType = dict(type='BboxOverlaps2D')):\n        self.center_radius = center_radius\n        self.candidate_topk = candidate_topk\n        self.iou_weight = iou_weight\n        self.cls_weight = cls_weight\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to priors using SimOTA.\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n        Returns:\n            obj:`AssignResult`: The assigned result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        num_gt = gt_bboxes.size(0)\n\n        decoded_bboxes = pred_instances.bboxes\n        pred_scores = pred_instances.scores\n        priors = pred_instances.priors\n        num_bboxes = decoded_bboxes.size(0)\n\n        # assign 0 by default\n        assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ),\n                                                   0,\n                                                   dtype=torch.long)\n        if num_gt == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))\n            assigned_labels = decoded_bboxes.new_full((num_bboxes, ),\n                                                      -1,\n                                                      dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n        valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info(\n            priors, gt_bboxes)\n        valid_decoded_bbox = decoded_bboxes[valid_mask]\n        valid_pred_scores = pred_scores[valid_mask]\n        num_valid = valid_decoded_bbox.size(0)\n        if num_valid == 0:\n            # No valid bboxes, return empty assignment\n            max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))\n            assigned_labels = decoded_bboxes.new_full((num_bboxes, ),\n                                                      -1,\n                                                      dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n        pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes)\n        iou_cost = -torch.log(pairwise_ious + EPS)\n\n        gt_onehot_label = (\n            F.one_hot(gt_labels.to(torch.int64),\n                      pred_scores.shape[-1]).float().unsqueeze(0).repeat(\n                          num_valid, 1, 1))\n\n        valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1)\n        # disable AMP autocast and calculate BCE with FP32 to avoid overflow\n        with torch.cuda.amp.autocast(enabled=False):\n            cls_cost = (\n                F.binary_cross_entropy(\n                    valid_pred_scores.to(dtype=torch.float32),\n                    gt_onehot_label,\n                    reduction='none',\n                ).sum(-1).to(dtype=valid_pred_scores.dtype))\n\n        cost_matrix = (\n            cls_cost * self.cls_weight + iou_cost * self.iou_weight +\n            (~is_in_boxes_and_center) * INF)\n\n        matched_pred_ious, matched_gt_inds = \\\n            self.dynamic_k_matching(\n                cost_matrix, pairwise_ious, num_gt, valid_mask)\n\n        # convert to AssignResult format\n        assigned_gt_inds[valid_mask] = matched_gt_inds + 1\n        assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n        assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long()\n        max_overlaps = assigned_gt_inds.new_full((num_bboxes, ),\n                                                 -INF,\n                                                 dtype=torch.float32)\n        max_overlaps[valid_mask] = matched_pred_ious\n        return AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n    def get_in_gt_and_in_center_info(\n            self, priors: Tensor, gt_bboxes: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Get the information of which prior is in gt bboxes and gt center\n        priors.\"\"\"\n        num_gt = gt_bboxes.size(0)\n\n        repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt)\n        repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt)\n        repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt)\n        repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt)\n\n        # is prior centers in gt bboxes, shape: [n_prior, n_gt]\n        l_ = repeated_x - gt_bboxes[:, 0]\n        t_ = repeated_y - gt_bboxes[:, 1]\n        r_ = gt_bboxes[:, 2] - repeated_x\n        b_ = gt_bboxes[:, 3] - repeated_y\n\n        deltas = torch.stack([l_, t_, r_, b_], dim=1)\n        is_in_gts = deltas.min(dim=1).values > 0\n        is_in_gts_all = is_in_gts.sum(dim=1) > 0\n\n        # is prior centers in gt centers\n        gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0\n        gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0\n        ct_box_l = gt_cxs - self.center_radius * repeated_stride_x\n        ct_box_t = gt_cys - self.center_radius * repeated_stride_y\n        ct_box_r = gt_cxs + self.center_radius * repeated_stride_x\n        ct_box_b = gt_cys + self.center_radius * repeated_stride_y\n\n        cl_ = repeated_x - ct_box_l\n        ct_ = repeated_y - ct_box_t\n        cr_ = ct_box_r - repeated_x\n        cb_ = ct_box_b - repeated_y\n\n        ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1)\n        is_in_cts = ct_deltas.min(dim=1).values > 0\n        is_in_cts_all = is_in_cts.sum(dim=1) > 0\n\n        # in boxes or in centers, shape: [num_priors]\n        is_in_gts_or_centers = is_in_gts_all | is_in_cts_all\n\n        # both in boxes and centers, shape: [num_fg, num_gt]\n        is_in_boxes_and_centers = (\n            is_in_gts[is_in_gts_or_centers, :]\n            & is_in_cts[is_in_gts_or_centers, :])\n        return is_in_gts_or_centers, is_in_boxes_and_centers\n\n    def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor,\n                           num_gt: int,\n                           valid_mask: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Use IoU and matching cost to calculate the dynamic top-k positive\n        targets.\"\"\"\n        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)\n        # select candidate topk ious for dynamic-k calculation\n        candidate_topk = min(self.candidate_topk, pairwise_ious.size(0))\n        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)\n        # calculate dynamic k for each gt\n        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n        for gt_idx in range(num_gt):\n            _, pos_idx = torch.topk(\n                cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)\n            matching_matrix[:, gt_idx][pos_idx] = 1\n\n        del topk_ious, dynamic_ks, pos_idx\n\n        prior_match_gt_mask = matching_matrix.sum(1) > 1\n        if prior_match_gt_mask.sum() > 0:\n            cost_min, cost_argmin = torch.min(\n                cost[prior_match_gt_mask, :], dim=1)\n            matching_matrix[prior_match_gt_mask, :] *= 0\n            matching_matrix[prior_match_gt_mask, cost_argmin] = 1\n        # get foreground mask inside box and center prior\n        fg_mask_inboxes = matching_matrix.sum(1) > 0\n        valid_mask[valid_mask.clone()] = fg_mask_inboxes\n\n        matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)\n        matched_pred_ious = (matching_matrix *\n                             pairwise_ious).sum(1)[fg_mask_inboxes]\n        return matched_pred_ious, matched_gt_inds\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/task_aligned_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\nINF = 100000000\n\n\n@TASK_UTILS.register_module()\nclass TaskAlignedAssigner(BaseAssigner):\n    \"\"\"Task aligned assigner used in the paper:\n    `TOOD: Task-aligned One-stage Object Detection.\n    <https://arxiv.org/abs/2108.07755>`_.\n\n    Assign a corresponding gt bbox or background to each predicted bbox.\n    Each bbox will be assigned with `0` or a positive integer\n    indicating the ground truth index.\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        topk (int): number of bbox selected in each level\n        iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou\n            calculator. Defaults to ``dict(type='BboxOverlaps2D')``\n    \"\"\"\n\n    def __init__(self,\n                 topk: int,\n                 iou_calculator: ConfigType = dict(type='BboxOverlaps2D')):\n        assert topk >= 1\n        self.topk = topk\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               alpha: int = 1,\n               beta: int = 6) -> AssignResult:\n        \"\"\"Assign gt to bboxes.\n\n        The assignment is done in following steps\n\n        1. compute alignment metric between all bbox (bbox of all pyramid\n           levels) and gt\n        2. select top-k bbox as candidates for each gt\n        3. limit the positive sample's center in gt (because the anchor-free\n           detector only can predict positive distance)\n\n\n        Args:\n            pred_instances (:obj:`InstaceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors, points, or bboxes predicted by the model,\n                shape(n, 4).\n            gt_instances (:obj:`InstaceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            gt_instances_ignore (:obj:`InstaceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n            alpha (int): Hyper-parameters related to alignment_metrics.\n                Defaults to 1.\n            beta (int): Hyper-parameters related to alignment_metrics.\n                Defaults to 6.\n\n        Returns:\n            :obj:`TaskAlignedAssignResult`: The assign result.\n        \"\"\"\n        priors = pred_instances.priors\n        decode_bboxes = pred_instances.bboxes\n        pred_scores = pred_instances.scores\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n\n        priors = priors[:, :4]\n        num_gt, num_bboxes = gt_bboxes.size(0), priors.size(0)\n        # compute alignment metric between all bbox and gt\n        overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach()\n        bbox_scores = pred_scores[:, gt_labels].detach()\n        # assign 0 by default\n        assigned_gt_inds = priors.new_full((num_bboxes, ), 0, dtype=torch.long)\n        assign_metrics = priors.new_zeros((num_bboxes, ))\n\n        if num_gt == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = priors.new_zeros((num_bboxes, ))\n            if num_gt == 0:\n                # No gt boxes, assign everything to background\n                assigned_gt_inds[:] = 0\n            assigned_labels = priors.new_full((num_bboxes, ),\n                                              -1,\n                                              dtype=torch.long)\n            assign_result = AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n            assign_result.assign_metrics = assign_metrics\n            return assign_result\n\n        # select top-k bboxes as candidates for each gt\n        alignment_metrics = bbox_scores**alpha * overlaps**beta\n        topk = min(self.topk, alignment_metrics.size(0))\n        _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True)\n        candidate_metrics = alignment_metrics[candidate_idxs,\n                                              torch.arange(num_gt)]\n        is_pos = candidate_metrics > 0\n\n        # limit the positive sample's center in gt\n        priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0\n        priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0\n        for gt_idx in range(num_gt):\n            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes\n        ep_priors_cx = priors_cx.view(1, -1).expand(\n            num_gt, num_bboxes).contiguous().view(-1)\n        ep_priors_cy = priors_cy.view(1, -1).expand(\n            num_gt, num_bboxes).contiguous().view(-1)\n        candidate_idxs = candidate_idxs.view(-1)\n\n        # calculate the left, top, right, bottom distance between positive\n        # bbox center and gt side\n        l_ = ep_priors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]\n        t_ = ep_priors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]\n        r_ = gt_bboxes[:, 2] - ep_priors_cx[candidate_idxs].view(-1, num_gt)\n        b_ = gt_bboxes[:, 3] - ep_priors_cy[candidate_idxs].view(-1, num_gt)\n        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01\n        is_pos = is_pos & is_in_gts\n\n        # if an anchor box is assigned to multiple gts,\n        # the one with the highest iou will be selected.\n        overlaps_inf = torch.full_like(overlaps,\n                                       -INF).t().contiguous().view(-1)\n        index = candidate_idxs.view(-1)[is_pos.view(-1)]\n        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n        overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n        assigned_gt_inds[\n            max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n        assign_metrics[max_overlaps != -INF] = alignment_metrics[\n            max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]]\n\n        assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n        pos_inds = torch.nonzero(\n            assigned_gt_inds > 0, as_tuple=False).squeeze()\n        if pos_inds.numel() > 0:\n            assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -\n                                                  1]\n        assign_result = AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n        assign_result.assign_metrics = assign_metrics\n        return assign_result\n"
  },
  {
    "path": "mmdet/models/task_modules/assigners/uniform_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import bbox_xyxy_to_cxcywh\nfrom mmdet.utils import ConfigType\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@TASK_UTILS.register_module()\nclass UniformAssigner(BaseAssigner):\n    \"\"\"Uniform Matching between the priors and gt boxes, which can achieve\n    balance in positive priors, and gt_bboxes_ignore was not considered for\n    now.\n\n    Args:\n        pos_ignore_thr (float): the threshold to ignore positive priors\n        neg_ignore_thr (float): the threshold to ignore negative priors\n        match_times(int): Number of positive priors for each gt box.\n           Defaults to 4.\n        iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou\n            calculator. Defaults to ``dict(type='BboxOverlaps2D')``\n    \"\"\"\n\n    def __init__(self,\n                 pos_ignore_thr: float,\n                 neg_ignore_thr: float,\n                 match_times: int = 4,\n                 iou_calculator: ConfigType = dict(type='BboxOverlaps2D')):\n        self.match_times = match_times\n        self.pos_ignore_thr = pos_ignore_thr\n        self.neg_ignore_thr = neg_ignore_thr\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def assign(\n            self,\n            pred_instances: InstanceData,\n            gt_instances: InstanceData,\n            gt_instances_ignore: Optional[InstanceData] = None\n    ) -> AssignResult:\n        \"\"\"Assign gt to priors.\n\n        The assignment is done in following steps\n\n        1. assign -1 by default\n        2. compute the L1 cost between boxes. Note that we use priors and\n           predict boxes both\n        3. compute the ignore indexes use gt_bboxes and predict boxes\n        4. compute the ignore indexes of positive sample use priors and\n           predict boxes\n\n\n        Args:\n            pred_instances (:obj:`InstaceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be priors, points, or bboxes predicted by the model,\n                shape(n, 4).\n            gt_instances (:obj:`InstaceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            gt_instances_ignore (:obj:`InstaceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n\n        gt_bboxes = gt_instances.bboxes\n        gt_labels = gt_instances.labels\n        priors = pred_instances.priors\n        bbox_pred = pred_instances.decoder_priors\n\n        num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),\n                                              0,\n                                              dtype=torch.long)\n        assigned_labels = bbox_pred.new_full((num_bboxes, ),\n                                             -1,\n                                             dtype=torch.long)\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            if num_gts == 0:\n                # No ground truth, assign all to background\n                assigned_gt_inds[:] = 0\n            assign_result = AssignResult(\n                num_gts, assigned_gt_inds, None, labels=assigned_labels)\n            assign_result.set_extra_property(\n                'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool))\n            assign_result.set_extra_property('pos_predicted_boxes',\n                                             bbox_pred.new_empty((0, 4)))\n            assign_result.set_extra_property('target_boxes',\n                                             bbox_pred.new_empty((0, 4)))\n            return assign_result\n\n        # 2. Compute the L1 cost between boxes\n        # Note that we use priors and predict boxes both\n        cost_bbox = torch.cdist(\n            bbox_xyxy_to_cxcywh(bbox_pred),\n            bbox_xyxy_to_cxcywh(gt_bboxes),\n            p=1)\n        cost_bbox_priors = torch.cdist(\n            bbox_xyxy_to_cxcywh(priors), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)\n\n        # We found that topk function has different results in cpu and\n        # cuda mode. In order to ensure consistency with the source code,\n        # we also use cpu mode.\n        # TODO: Check whether the performance of cpu and cuda are the same.\n        C = cost_bbox.cpu()\n        C1 = cost_bbox_priors.cpu()\n\n        # self.match_times x n\n        index = torch.topk(\n            C,  # c=b,n,x c[i]=n,x\n            k=self.match_times,\n            dim=0,\n            largest=False)[1]\n\n        # self.match_times x n\n        index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1]\n        # (self.match_times*2) x n\n        indexes = torch.cat((index, index1),\n                            dim=1).reshape(-1).to(bbox_pred.device)\n\n        pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes)\n        anchor_overlaps = self.iou_calculator(priors, gt_bboxes)\n        pred_max_overlaps, _ = pred_overlaps.max(dim=1)\n        anchor_max_overlaps, _ = anchor_overlaps.max(dim=0)\n\n        # 3. Compute the ignore indexes use gt_bboxes and predict boxes\n        ignore_idx = pred_max_overlaps > self.neg_ignore_thr\n        assigned_gt_inds[ignore_idx] = -1\n\n        # 4. Compute the ignore indexes of positive sample use priors\n        # and predict boxes\n        pos_gt_index = torch.arange(\n            0, C1.size(1),\n            device=bbox_pred.device).repeat(self.match_times * 2)\n        pos_ious = anchor_overlaps[indexes, pos_gt_index]\n        pos_ignore_idx = pos_ious < self.pos_ignore_thr\n\n        pos_gt_index_with_ignore = pos_gt_index + 1\n        pos_gt_index_with_ignore[pos_ignore_idx] = -1\n        assigned_gt_inds[indexes] = pos_gt_index_with_ignore\n\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n        else:\n            assigned_labels = None\n\n        assign_result = AssignResult(\n            num_gts,\n            assigned_gt_inds,\n            anchor_max_overlaps,\n            labels=assigned_labels)\n        assign_result.set_extra_property('pos_idx', ~pos_ignore_idx)\n        assign_result.set_extra_property('pos_predicted_boxes',\n                                         bbox_pred[indexes])\n        assign_result.set_extra_property('target_boxes',\n                                         gt_bboxes[pos_gt_index])\n        return assign_result\n"
  },
  {
    "path": "mmdet/models/task_modules/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nfrom mmdet.registry import TASK_UTILS\n\nPRIOR_GENERATORS = TASK_UTILS\nANCHOR_GENERATORS = TASK_UTILS\nBBOX_ASSIGNERS = TASK_UTILS\nBBOX_SAMPLERS = TASK_UTILS\nBBOX_CODERS = TASK_UTILS\nMATCH_COSTS = TASK_UTILS\nIOU_CALCULATORS = TASK_UTILS\n\n\ndef build_bbox_coder(cfg, **default_args):\n    \"\"\"Builder of box coder.\"\"\"\n    warnings.warn('``build_sampler`` would be deprecated soon, please use '\n                  '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n\n\ndef build_iou_calculator(cfg, default_args=None):\n    \"\"\"Builder of IoU calculator.\"\"\"\n    warnings.warn(\n        '``build_iou_calculator`` would be deprecated soon, please use '\n        '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n\n\ndef build_match_cost(cfg, default_args=None):\n    \"\"\"Builder of IoU calculator.\"\"\"\n    warnings.warn('``build_match_cost`` would be deprecated soon, please use '\n                  '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n\n\ndef build_assigner(cfg, **default_args):\n    \"\"\"Builder of box assigner.\"\"\"\n    warnings.warn('``build_assigner`` would be deprecated soon, please use '\n                  '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n\n\ndef build_sampler(cfg, **default_args):\n    \"\"\"Builder of box sampler.\"\"\"\n    warnings.warn('``build_sampler`` would be deprecated soon, please use '\n                  '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n\n\ndef build_prior_generator(cfg, default_args=None):\n    warnings.warn(\n        '``build_prior_generator`` would be deprecated soon, please use '\n        '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n\n\ndef build_anchor_generator(cfg, default_args=None):\n    warnings.warn(\n        '``build_anchor_generator`` would be deprecated soon, please use '\n        '``mmdet.registry.TASK_UTILS.build()`` ')\n    return TASK_UTILS.build(cfg, default_args=default_args)\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_bbox_coder import BaseBBoxCoder\nfrom .bucketing_bbox_coder import BucketingBBoxCoder\nfrom .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder\nfrom .distance_point_bbox_coder import DistancePointBBoxCoder\nfrom .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder\nfrom .pseudo_bbox_coder import PseudoBBoxCoder\nfrom .tblr_bbox_coder import TBLRBBoxCoder\nfrom .yolo_bbox_coder import YOLOBBoxCoder\n\n__all__ = [\n    'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',\n    'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',\n    'BucketingBBoxCoder', 'DistancePointBBoxCoder'\n]\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/base_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseBBoxCoder(metaclass=ABCMeta):\n    \"\"\"Base bounding box coder.\n\n    Args:\n        use_box_type (bool): Whether to warp decoded boxes with the\n            box type data structure. Defaults to False.\n    \"\"\"\n\n    # The size of the last of dimension of the encoded tensor.\n    encode_size = 4\n\n    def __init__(self, use_box_type: bool = False, **kwargs):\n        self.use_box_type = use_box_type\n\n    @abstractmethod\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Encode deltas between bboxes and ground truth boxes.\"\"\"\n\n    @abstractmethod\n    def decode(self, bboxes, bboxes_pred):\n        \"\"\"Decode the predicted bboxes according to prediction and base\n        boxes.\"\"\"\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/bucketing_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, bbox_rescale, get_box_tensor\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass BucketingBBoxCoder(BaseBBoxCoder):\n    \"\"\"Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).\n\n    Boundary Localization with Bucketing and Bucketing Guided Rescoring\n    are implemented here.\n\n    Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n    Args:\n        num_buckets (int): Number of buckets.\n        scale_factor (int): Scale factor of proposals to generate buckets.\n        offset_topk (int): Topk buckets are used to generate\n             bucket fine regression targets. Defaults to 2.\n        offset_upperbound (float): Offset upperbound to generate\n             bucket fine regression targets.\n             To avoid too large offset displacements. Defaults to 1.0.\n        cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.\n             Defaults to True.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 num_buckets,\n                 scale_factor,\n                 offset_topk=2,\n                 offset_upperbound=1.0,\n                 cls_ignore_neighbor=True,\n                 clip_border=True,\n                 **kwargs):\n        super().__init__(**kwargs)\n        self.num_buckets = num_buckets\n        self.scale_factor = scale_factor\n        self.offset_topk = offset_topk\n        self.offset_upperbound = offset_upperbound\n        self.cls_ignore_neighbor = cls_ignore_neighbor\n        self.clip_border = clip_border\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get bucketing estimation and fine regression targets during\n        training.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes,\n                e.g., object proposals.\n            gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the\n                transformation, e.g., ground truth boxes.\n\n        Returns:\n           encoded_bboxes(tuple[Tensor]): bucketing estimation\n            and fine regression targets and weights\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,\n                                     self.scale_factor, self.offset_topk,\n                                     self.offset_upperbound,\n                                     self.cls_ignore_neighbor)\n        return encoded_bboxes\n\n    def decode(self, bboxes, pred_bboxes, max_shape=None):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n        Args:\n            boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.\n            pred_bboxes (torch.Tensor): Predictions for bucketing estimation\n                and fine regression\n            max_shape (tuple[int], optional): Maximum shape of boxes.\n                Defaults to None.\n\n        Returns:\n            Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        assert len(pred_bboxes) == 2\n        cls_preds, offset_preds = pred_bboxes\n        assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(\n            0) == bboxes.size(0)\n        bboxes, loc_confidence = bucket2bbox(bboxes, cls_preds, offset_preds,\n                                             self.num_buckets,\n                                             self.scale_factor, max_shape,\n                                             self.clip_border)\n        if self.use_box_type:\n            bboxes = HorizontalBoxes(bboxes, clone=False)\n        return bboxes, loc_confidence\n\n\ndef generat_buckets(proposals, num_buckets, scale_factor=1.0):\n    \"\"\"Generate buckets w.r.t bucket number and scale factor of proposals.\n\n    Args:\n        proposals (Tensor): Shape (n, 4)\n        num_buckets (int): Number of buckets.\n        scale_factor (float): Scale factor to rescale proposals.\n\n    Returns:\n        tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,\n         t_buckets, d_buckets)\n\n            - bucket_w: Width of buckets on x-axis. Shape (n, ).\n            - bucket_h: Height of buckets on y-axis. Shape (n, ).\n            - l_buckets: Left buckets. Shape (n, ceil(side_num/2)).\n            - r_buckets: Right buckets. Shape (n, ceil(side_num/2)).\n            - t_buckets: Top buckets. Shape (n, ceil(side_num/2)).\n            - d_buckets: Down buckets. Shape (n, ceil(side_num/2)).\n    \"\"\"\n    proposals = bbox_rescale(proposals, scale_factor)\n\n    # number of buckets in each side\n    side_num = int(np.ceil(num_buckets / 2.0))\n    pw = proposals[..., 2] - proposals[..., 0]\n    ph = proposals[..., 3] - proposals[..., 1]\n    px1 = proposals[..., 0]\n    py1 = proposals[..., 1]\n    px2 = proposals[..., 2]\n    py2 = proposals[..., 3]\n\n    bucket_w = pw / num_buckets\n    bucket_h = ph / num_buckets\n\n    # left buckets\n    l_buckets = px1[:, None] + (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]\n    # right buckets\n    r_buckets = px2[:, None] - (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]\n    # top buckets\n    t_buckets = py1[:, None] + (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]\n    # down buckets\n    d_buckets = py2[:, None] - (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]\n    return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets\n\n\ndef bbox2bucket(proposals,\n                gt,\n                num_buckets,\n                scale_factor,\n                offset_topk=2,\n                offset_upperbound=1.0,\n                cls_ignore_neighbor=True):\n    \"\"\"Generate buckets estimation and fine regression targets.\n\n    Args:\n        proposals (Tensor): Shape (n, 4)\n        gt (Tensor): Shape (n, 4)\n        num_buckets (int): Number of buckets.\n        scale_factor (float): Scale factor to rescale proposals.\n        offset_topk (int): Topk buckets are used to generate\n             bucket fine regression targets. Defaults to 2.\n        offset_upperbound (float): Offset allowance to generate\n             bucket fine regression targets.\n             To avoid too large offset displacements. Defaults to 1.0.\n        cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.\n             Defaults to True.\n\n    Returns:\n        tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).\n\n            - offsets: Fine regression targets. \\\n                Shape (n, num_buckets*2).\n            - offsets_weights: Fine regression weights. \\\n                Shape (n, num_buckets*2).\n            - bucket_labels: Bucketing estimation labels. \\\n                Shape (n, num_buckets*2).\n            - cls_weights: Bucketing estimation weights. \\\n                Shape (n, num_buckets*2).\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    # generate buckets\n    proposals = proposals.float()\n    gt = gt.float()\n    (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,\n     d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)\n\n    gx1 = gt[..., 0]\n    gy1 = gt[..., 1]\n    gx2 = gt[..., 2]\n    gy2 = gt[..., 3]\n\n    # generate offset targets and weights\n    # offsets from buckets to gts\n    l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]\n    r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]\n    t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]\n    d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]\n\n    # select top-k nearest buckets\n    l_topk, l_label = l_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n    r_topk, r_label = r_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n    t_topk, t_label = t_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n    d_topk, d_label = d_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n\n    offset_l_weights = l_offsets.new_zeros(l_offsets.size())\n    offset_r_weights = r_offsets.new_zeros(r_offsets.size())\n    offset_t_weights = t_offsets.new_zeros(t_offsets.size())\n    offset_d_weights = d_offsets.new_zeros(d_offsets.size())\n    inds = torch.arange(0, proposals.size(0)).to(proposals).long()\n\n    # generate offset weights of top-k nearest buckets\n    for k in range(offset_topk):\n        if k >= 1:\n            offset_l_weights[inds, l_label[:,\n                                           k]] = (l_topk[:, k] <\n                                                  offset_upperbound).float()\n            offset_r_weights[inds, r_label[:,\n                                           k]] = (r_topk[:, k] <\n                                                  offset_upperbound).float()\n            offset_t_weights[inds, t_label[:,\n                                           k]] = (t_topk[:, k] <\n                                                  offset_upperbound).float()\n            offset_d_weights[inds, d_label[:,\n                                           k]] = (d_topk[:, k] <\n                                                  offset_upperbound).float()\n        else:\n            offset_l_weights[inds, l_label[:, k]] = 1.0\n            offset_r_weights[inds, r_label[:, k]] = 1.0\n            offset_t_weights[inds, t_label[:, k]] = 1.0\n            offset_d_weights[inds, d_label[:, k]] = 1.0\n\n    offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)\n    offsets_weights = torch.cat([\n        offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights\n    ],\n                                dim=-1)\n\n    # generate bucket labels and weight\n    side_num = int(np.ceil(num_buckets / 2.0))\n    labels = torch.stack(\n        [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)\n\n    batch_size = labels.size(0)\n    bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,\n                                                              -1).float()\n    bucket_cls_l_weights = (l_offsets.abs() < 1).float()\n    bucket_cls_r_weights = (r_offsets.abs() < 1).float()\n    bucket_cls_t_weights = (t_offsets.abs() < 1).float()\n    bucket_cls_d_weights = (d_offsets.abs() < 1).float()\n    bucket_cls_weights = torch.cat([\n        bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,\n        bucket_cls_d_weights\n    ],\n                                   dim=-1)\n    # ignore second nearest buckets for cls if necessary\n    if cls_ignore_neighbor:\n        bucket_cls_weights = (~((bucket_cls_weights == 1) &\n                                (bucket_labels == 0))).float()\n    else:\n        bucket_cls_weights[:] = 1.0\n    return offsets, offsets_weights, bucket_labels, bucket_cls_weights\n\n\ndef bucket2bbox(proposals,\n                cls_preds,\n                offset_preds,\n                num_buckets,\n                scale_factor=1.0,\n                max_shape=None,\n                clip_border=True):\n    \"\"\"Apply bucketing estimation (cls preds) and fine regression (offset\n    preds) to generate det bboxes.\n\n    Args:\n        proposals (Tensor): Boxes to be transformed. Shape (n, 4)\n        cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).\n        offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).\n        num_buckets (int): Number of buckets.\n        scale_factor (float): Scale factor to rescale proposals.\n        max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n\n    Returns:\n        tuple[Tensor]: (bboxes, loc_confidence).\n\n            - bboxes: predicted bboxes. Shape (n, 4)\n            - loc_confidence: localization confidence of predicted bboxes.\n                Shape (n,).\n    \"\"\"\n\n    side_num = int(np.ceil(num_buckets / 2.0))\n    cls_preds = cls_preds.view(-1, side_num)\n    offset_preds = offset_preds.view(-1, side_num)\n\n    scores = F.softmax(cls_preds, dim=1)\n    score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)\n\n    rescaled_proposals = bbox_rescale(proposals, scale_factor)\n\n    pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]\n    ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]\n    px1 = rescaled_proposals[..., 0]\n    py1 = rescaled_proposals[..., 1]\n    px2 = rescaled_proposals[..., 2]\n    py2 = rescaled_proposals[..., 3]\n\n    bucket_w = pw / num_buckets\n    bucket_h = ph / num_buckets\n\n    score_inds_l = score_label[0::4, 0]\n    score_inds_r = score_label[1::4, 0]\n    score_inds_t = score_label[2::4, 0]\n    score_inds_d = score_label[3::4, 0]\n    l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w\n    r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w\n    t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h\n    d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h\n\n    offsets = offset_preds.view(-1, 4, side_num)\n    inds = torch.arange(proposals.size(0)).to(proposals).long()\n    l_offsets = offsets[:, 0, :][inds, score_inds_l]\n    r_offsets = offsets[:, 1, :][inds, score_inds_r]\n    t_offsets = offsets[:, 2, :][inds, score_inds_t]\n    d_offsets = offsets[:, 3, :][inds, score_inds_d]\n\n    x1 = l_buckets - l_offsets * bucket_w\n    x2 = r_buckets - r_offsets * bucket_w\n    y1 = t_buckets - t_offsets * bucket_h\n    y2 = d_buckets - d_offsets * bucket_h\n\n    if clip_border and max_shape is not None:\n        x1 = x1.clamp(min=0, max=max_shape[1] - 1)\n        y1 = y1.clamp(min=0, max=max_shape[0] - 1)\n        x2 = x2.clamp(min=0, max=max_shape[1] - 1)\n        y2 = y2.clamp(min=0, max=max_shape[0] - 1)\n    bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],\n                       dim=-1)\n\n    # bucketing guided rescoring\n    loc_confidence = score_topk[:, 0]\n    top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1\n    loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()\n    loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)\n\n    return bboxes, loc_confidence\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/delta_xywh_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, get_box_tensor\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass DeltaXYWHBBoxCoder(BaseBBoxCoder):\n    \"\"\"Delta XYWH BBox coder.\n\n    Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,\n    this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and\n    decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).\n\n    Args:\n        target_means (Sequence[float]): Denormalizing means of target for\n            delta coordinates\n        target_stds (Sequence[float]): Denormalizing standard deviation of\n            target for delta coordinates\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n        add_ctr_clamp (bool): Whether to add center clamp, when added, the\n            predicted box is clamped is its center is too far away from\n            the original anchor's center. Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n    \"\"\"\n\n    def __init__(self,\n                 target_means=(0., 0., 0., 0.),\n                 target_stds=(1., 1., 1., 1.),\n                 clip_border=True,\n                 add_ctr_clamp=False,\n                 ctr_clamp=32,\n                 **kwargs):\n        super().__init__(**kwargs)\n        self.means = target_means\n        self.stds = target_stds\n        self.clip_border = clip_border\n        self.add_ctr_clamp = add_ctr_clamp\n        self.ctr_clamp = ctr_clamp\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,\n                e.g., object proposals.\n            gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the\n                transformation, e.g., ground-truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)\n        return encoded_bboxes\n\n    def decode(self,\n               bboxes,\n               pred_bboxes,\n               max_shape=None,\n               wh_ratio_clip=16 / 1000):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. Shape\n                (B, N, 4) or (N, 4)\n            pred_bboxes (Tensor): Encoded offsets with respect to each roi.\n               Has shape (B, N, num_classes * 4) or (B, N, 4) or\n               (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H\n               when rois is a grid of anchors.Offset encoding follows [1]_.\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n               Sequence[int]],optional): Maximum bounds for boxes, specifies\n               (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then\n               the max_shape should be a Sequence[Sequence[int]]\n               and the length of max_shape should also be B.\n            wh_ratio_clip (float, optional): The allowed ratio between\n                width and height.\n\n        Returns:\n            Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        assert pred_bboxes.size(0) == bboxes.size(0)\n        if pred_bboxes.ndim == 3:\n            assert pred_bboxes.size(1) == bboxes.size(1)\n\n        if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():\n            # single image decode\n            decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means,\n                                        self.stds, max_shape, wh_ratio_clip,\n                                        self.clip_border, self.add_ctr_clamp,\n                                        self.ctr_clamp)\n        else:\n            if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():\n                warnings.warn(\n                    'DeprecationWarning: onnx_delta2bbox is deprecated '\n                    'in the case of batch decoding and non-ONNX, '\n                    'please use “delta2bbox” instead. In order to improve '\n                    'the decoding speed, the batch function will no '\n                    'longer be supported. ')\n            decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means,\n                                             self.stds, max_shape,\n                                             wh_ratio_clip, self.clip_border,\n                                             self.add_ctr_clamp,\n                                             self.ctr_clamp)\n\n        if self.use_box_type:\n            assert decoded_bboxes.size(-1) == 4, \\\n                ('Cannot warp decoded boxes with box type when decoded boxes'\n                 'have shape of (N, num_classes * 4)')\n            decoded_bboxes = HorizontalBoxes(decoded_bboxes)\n        return decoded_bboxes\n\n\ndef bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):\n    \"\"\"Compute deltas of proposals w.r.t. gt.\n\n    We usually compute the deltas of x, y, w, h of proposals w.r.t ground\n    truth bboxes to get regression target.\n    This is the inverse function of :func:`delta2bbox`.\n\n    Args:\n        proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)\n        gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n\n    Returns:\n        Tensor: deltas with shape (N, 4), where columns represent dx, dy,\n            dw, dh.\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    proposals = proposals.float()\n    gt = gt.float()\n    px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n    py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n    pw = proposals[..., 2] - proposals[..., 0]\n    ph = proposals[..., 3] - proposals[..., 1]\n\n    gx = (gt[..., 0] + gt[..., 2]) * 0.5\n    gy = (gt[..., 1] + gt[..., 3]) * 0.5\n    gw = gt[..., 2] - gt[..., 0]\n    gh = gt[..., 3] - gt[..., 1]\n\n    dx = (gx - px) / pw\n    dy = (gy - py) / ph\n    dw = torch.log(gw / pw)\n    dh = torch.log(gh / ph)\n    deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n    means = deltas.new_tensor(means).unsqueeze(0)\n    stds = deltas.new_tensor(stds).unsqueeze(0)\n    deltas = deltas.sub_(means).div_(stds)\n\n    return deltas\n\n\ndef delta2bbox(rois,\n               deltas,\n               means=(0., 0., 0., 0.),\n               stds=(1., 1., 1., 1.),\n               max_shape=None,\n               wh_ratio_clip=16 / 1000,\n               clip_border=True,\n               add_ctr_clamp=False,\n               ctr_clamp=32):\n    \"\"\"Apply deltas to shift/scale base boxes.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of :func:`bbox2delta`.\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4).\n        deltas (Tensor): Encoded offsets relative to each roi.\n            Has shape (N, num_classes * 4) or (N, 4). Note\n            N = num_base_anchors * W * H, when rois is a grid of\n            anchors. Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates.\n            Default (0., 0., 0., 0.).\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates. Default (1., 1., 1., 1.).\n        max_shape (tuple[int, int]): Maximum bounds for boxes, specifies\n           (H, W). Default None.\n        wh_ratio_clip (float): Maximum aspect ratio for boxes. Default\n            16 / 1000.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Default True.\n        add_ctr_clamp (bool): Whether to add center clamp. When set to True,\n            the center of the prediction bounding box will be clamped to\n            avoid being too far away from the center of the anchor.\n            Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n\n    Returns:\n        Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4\n           represent tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))\n        tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                [0.1409, 0.1409, 2.8591, 2.8591],\n                [0.0000, 0.3161, 4.1945, 0.6839],\n                [5.0000, 5.0000, 5.0000, 5.0000]])\n    \"\"\"\n    num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4\n    if num_bboxes == 0:\n        return deltas\n\n    deltas = deltas.reshape(-1, 4)\n\n    means = deltas.new_tensor(means).view(1, -1)\n    stds = deltas.new_tensor(stds).view(1, -1)\n    denorm_deltas = deltas * stds + means\n\n    dxy = denorm_deltas[:, :2]\n    dwh = denorm_deltas[:, 2:]\n\n    # Compute width/height of each roi\n    rois_ = rois.repeat(1, num_classes).reshape(-1, 4)\n    pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)\n    pwh = (rois_[:, 2:] - rois_[:, :2])\n\n    dxy_wh = pwh * dxy\n\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    if add_ctr_clamp:\n        dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)\n        dwh = torch.clamp(dwh, max=max_ratio)\n    else:\n        dwh = dwh.clamp(min=-max_ratio, max=max_ratio)\n\n    gxy = pxy + dxy_wh\n    gwh = pwh * dwh.exp()\n    x1y1 = gxy - (gwh * 0.5)\n    x2y2 = gxy + (gwh * 0.5)\n    bboxes = torch.cat([x1y1, x2y2], dim=-1)\n    if clip_border and max_shape is not None:\n        bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])\n        bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])\n    bboxes = bboxes.reshape(num_bboxes, -1)\n    return bboxes\n\n\ndef onnx_delta2bbox(rois,\n                    deltas,\n                    means=(0., 0., 0., 0.),\n                    stds=(1., 1., 1., 1.),\n                    max_shape=None,\n                    wh_ratio_clip=16 / 1000,\n                    clip_border=True,\n                    add_ctr_clamp=False,\n                    ctr_clamp=32):\n    \"\"\"Apply deltas to shift/scale base boxes.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of :func:`bbox2delta`.\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)\n        deltas (Tensor): Encoded offsets with respect to each roi.\n            Has shape (B, N, num_classes * 4) or (B, N, 4) or\n            (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H\n            when rois is a grid of anchors.Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates.\n            Default (0., 0., 0., 0.).\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates. Default (1., 1., 1., 1.).\n        max_shape (Sequence[int] or torch.Tensor or Sequence[\n            Sequence[int]],optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If rois shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B. Default None.\n        wh_ratio_clip (float): Maximum aspect ratio for boxes.\n            Default 16 / 1000.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Default True.\n        add_ctr_clamp (bool): Whether to add center clamp, when added, the\n            predicted box is clamped is its center is too far away from\n            the original anchor's center. Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n\n    Returns:\n        Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or\n           (N, num_classes * 4) or (N, 4), where 4 represent\n           tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))\n        tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                [0.1409, 0.1409, 2.8591, 2.8591],\n                [0.0000, 0.3161, 4.1945, 0.6839],\n                [5.0000, 5.0000, 5.0000, 5.0000]])\n    \"\"\"\n    means = deltas.new_tensor(means).view(1,\n                                          -1).repeat(1,\n                                                     deltas.size(-1) // 4)\n    stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)\n    denorm_deltas = deltas * stds + means\n    dx = denorm_deltas[..., 0::4]\n    dy = denorm_deltas[..., 1::4]\n    dw = denorm_deltas[..., 2::4]\n    dh = denorm_deltas[..., 3::4]\n\n    x1, y1 = rois[..., 0], rois[..., 1]\n    x2, y2 = rois[..., 2], rois[..., 3]\n    # Compute center of each roi\n    px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)\n    py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)\n    # Compute width/height of each roi\n    pw = (x2 - x1).unsqueeze(-1).expand_as(dw)\n    ph = (y2 - y1).unsqueeze(-1).expand_as(dh)\n\n    dx_width = pw * dx\n    dy_height = ph * dy\n\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    if add_ctr_clamp:\n        dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)\n        dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)\n        dw = torch.clamp(dw, max=max_ratio)\n        dh = torch.clamp(dh, max=max_ratio)\n    else:\n        dw = dw.clamp(min=-max_ratio, max=max_ratio)\n        dh = dh.clamp(min=-max_ratio, max=max_ratio)\n    # Use exp(network energy) to enlarge/shrink each roi\n    gw = pw * dw.exp()\n    gh = ph * dh.exp()\n    # Use network energy to shift the center of each roi\n    gx = px + dx_width\n    gy = py + dy_height\n    # Convert center-xy/width/height to top-left, bottom-right\n    x1 = gx - gw * 0.5\n    y1 = gy - gh * 0.5\n    x2 = gx + gw * 0.5\n    y2 = gy + gh * 0.5\n\n    bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n\n    if clip_border and max_shape is not None:\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            from mmdet.core.export import dynamic_clip_for_onnx\n            x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)\n            bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = x1.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(x1)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = x1.new_tensor(0)\n        max_xy = torch.cat(\n            [max_shape] * (deltas.size(-1) // 2),\n            dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/distance_point_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import (HorizontalBoxes, bbox2distance,\n                                   distance2bbox, get_box_tensor)\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass DistancePointBBoxCoder(BaseBBoxCoder):\n    \"\"\"Distance Point BBox coder.\n\n    This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n    right) and decode it back to the original.\n\n    Args:\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self, clip_border=True, **kwargs):\n        super().__init__(**kwargs)\n        self.clip_border = clip_border\n\n    def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):\n        \"\"\"Encode bounding box to distances.\n\n        Args:\n            points (Tensor): Shape (N, 2), The format is [x, y].\n            gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format\n                is \"xyxy\"\n            max_dis (float): Upper bound of the distance. Default None.\n            eps (float): a small value to ensure target < max_dis, instead <=.\n                Default 0.1.\n\n        Returns:\n            Tensor: Box transformation deltas. The shape is (N, 4).\n        \"\"\"\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert points.size(0) == gt_bboxes.size(0)\n        assert points.size(-1) == 2\n        assert gt_bboxes.size(-1) == 4\n        return bbox2distance(points, gt_bboxes, max_dis, eps)\n\n    def decode(self, points, pred_bboxes, max_shape=None):\n        \"\"\"Decode distance prediction to bounding box.\n\n        Args:\n            points (Tensor): Shape (B, N, 2) or (N, 2).\n            pred_bboxes (Tensor): Distance from the given point to 4\n                boundaries (left, top, right, bottom). Shape (B, N, 4)\n                or (N, 4)\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n                Sequence[int]],optional): Maximum bounds for boxes, specifies\n                (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n                the max_shape should be a Sequence[Sequence[int]],\n                and the length of max_shape should also be B.\n                Default None.\n        Returns:\n            Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or\n            (B, N, 4)\n        \"\"\"\n        assert points.size(0) == pred_bboxes.size(0)\n        assert points.size(-1) == 2\n        assert pred_bboxes.size(-1) == 4\n        if self.clip_border is False:\n            max_shape = None\n        bboxes = distance2bbox(points, pred_bboxes, max_shape)\n\n        if self.use_box_type:\n            bboxes = HorizontalBoxes(bboxes)\n        return bboxes\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/legacy_delta_xywh_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, get_box_tensor\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):\n    \"\"\"Legacy Delta XYWH BBox coder used in MMDet V1.x.\n\n    Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,\n    y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)\n    back to original bbox (x1, y1, x2, y2).\n\n    Note:\n        The main difference between :class`LegacyDeltaXYWHBBoxCoder` and\n        :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and\n        height calculation. We suggest to only use this coder when testing with\n        MMDet V1.x models.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Args:\n        target_means (Sequence[float]): denormalizing means of target for\n            delta coordinates\n        target_stds (Sequence[float]): denormalizing standard deviation of\n            target for delta coordinates\n    \"\"\"\n\n    def __init__(self,\n                 target_means=(0., 0., 0., 0.),\n                 target_stds=(1., 1., 1., 1.),\n                 **kwargs):\n        super().__init__(**kwargs)\n        self.means = target_means\n        self.stds = target_stds\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes,\n                e.g., object proposals.\n            gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the\n                transformation, e.g., ground-truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,\n                                           self.stds)\n        return encoded_bboxes\n\n    def decode(self,\n               bboxes,\n               pred_bboxes,\n               max_shape=None,\n               wh_ratio_clip=16 / 1000):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.\n            pred_bboxes (torch.Tensor): Encoded boxes with shape\n            max_shape (tuple[int], optional): Maximum shape of boxes.\n                Defaults to None.\n            wh_ratio_clip (float, optional): The allowed ratio between\n                width and height.\n\n        Returns:\n            Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        assert pred_bboxes.size(0) == bboxes.size(0)\n        decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,\n                                           self.stds, max_shape, wh_ratio_clip)\n\n        if self.use_box_type:\n            assert decoded_bboxes.size(-1) == 4, \\\n                ('Cannot warp decoded boxes with box type when decoded boxes'\n                 'have shape of (N, num_classes * 4)')\n            decoded_bboxes = HorizontalBoxes(decoded_bboxes)\n        return decoded_bboxes\n\n\ndef legacy_bbox2delta(proposals,\n                      gt,\n                      means=(0., 0., 0., 0.),\n                      stds=(1., 1., 1., 1.)):\n    \"\"\"Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.\n\n    We usually compute the deltas of x, y, w, h of proposals w.r.t ground\n    truth bboxes to get regression target.\n    This is the inverse function of `delta2bbox()`\n\n    Args:\n        proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)\n        gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n\n    Returns:\n        Tensor: deltas with shape (N, 4), where columns represent dx, dy,\n            dw, dh.\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    proposals = proposals.float()\n    gt = gt.float()\n    px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n    py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n    pw = proposals[..., 2] - proposals[..., 0] + 1.0\n    ph = proposals[..., 3] - proposals[..., 1] + 1.0\n\n    gx = (gt[..., 0] + gt[..., 2]) * 0.5\n    gy = (gt[..., 1] + gt[..., 3]) * 0.5\n    gw = gt[..., 2] - gt[..., 0] + 1.0\n    gh = gt[..., 3] - gt[..., 1] + 1.0\n\n    dx = (gx - px) / pw\n    dy = (gy - py) / ph\n    dw = torch.log(gw / pw)\n    dh = torch.log(gh / ph)\n    deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n    means = deltas.new_tensor(means).unsqueeze(0)\n    stds = deltas.new_tensor(stds).unsqueeze(0)\n    deltas = deltas.sub_(means).div_(stds)\n\n    return deltas\n\n\ndef legacy_delta2bbox(rois,\n                      deltas,\n                      means=(0., 0., 0., 0.),\n                      stds=(1., 1., 1., 1.),\n                      max_shape=None,\n                      wh_ratio_clip=16 / 1000):\n    \"\"\"Apply deltas to shift/scale base boxes in the MMDet V1.x manner.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of `bbox2delta()`\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4)\n        deltas (Tensor): Encoded offsets with respect to each roi.\n            Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when\n            rois is a grid of anchors. Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n        max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)\n        wh_ratio_clip (float): Maximum aspect ratio for boxes.\n\n    Returns:\n        Tensor: Boxes with shape (N, 4), where columns represent\n            tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))\n        tensor([[0.0000, 0.0000, 1.5000, 1.5000],\n                [0.0000, 0.0000, 5.2183, 5.2183],\n                [0.0000, 0.1321, 7.8891, 0.8679],\n                [5.3967, 2.4251, 6.0033, 3.7749]])\n    \"\"\"\n    means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)\n    stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)\n    denorm_deltas = deltas * stds + means\n    dx = denorm_deltas[:, 0::4]\n    dy = denorm_deltas[:, 1::4]\n    dw = denorm_deltas[:, 2::4]\n    dh = denorm_deltas[:, 3::4]\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    dw = dw.clamp(min=-max_ratio, max=max_ratio)\n    dh = dh.clamp(min=-max_ratio, max=max_ratio)\n    # Compute center of each roi\n    px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)\n    py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)\n    # Compute width/height of each roi\n    pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)\n    ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)\n    # Use exp(network energy) to enlarge/shrink each roi\n    gw = pw * dw.exp()\n    gh = ph * dh.exp()\n    # Use network energy to shift the center of each roi\n    gx = px + pw * dx\n    gy = py + ph * dy\n    # Convert center-xy/width/height to top-left, bottom-right\n\n    # The true legacy box coder should +- 0.5 here.\n    # However, current implementation improves the performance when testing\n    # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)\n    x1 = gx - gw * 0.5\n    y1 = gy - gh * 0.5\n    x2 = gx + gw * 0.5\n    y2 = gy + gh * 0.5\n    if max_shape is not None:\n        x1 = x1.clamp(min=0, max=max_shape[1] - 1)\n        y1 = y1.clamp(min=0, max=max_shape[0] - 1)\n        x2 = x2.clamp(min=0, max=max_shape[1] - 1)\n        y2 = y2.clamp(min=0, max=max_shape[0] - 1)\n    bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)\n    return bboxes\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/pseudo_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, get_box_tensor\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass PseudoBBoxCoder(BaseBBoxCoder):\n    \"\"\"Pseudo bounding box coder.\"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"torch.Tensor: return the given ``bboxes``\"\"\"\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        return gt_bboxes\n\n    def decode(self, bboxes, pred_bboxes):\n        \"\"\"torch.Tensor: return the given ``pred_bboxes``\"\"\"\n        if self.use_box_type:\n            pred_bboxes = HorizontalBoxes(pred_bboxes)\n        return pred_bboxes\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/tblr_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, get_box_tensor\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass TBLRBBoxCoder(BaseBBoxCoder):\n    \"\"\"TBLR BBox coder.\n\n    Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_,\n    this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n    right) and decode it back to the original.\n\n    Args:\n        normalizer (list | float): Normalization factor to be\n          divided with when coding the coordinates. If it is a list, it should\n          have length of 4 indicating normalization factor in tblr dims.\n          Otherwise it is a unified float factor for all dims. Default: 4.0\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self, normalizer=4.0, clip_border=True, **kwargs):\n        super().__init__(**kwargs)\n        self.normalizer = normalizer\n        self.clip_border = clip_border\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,\n        bottom, right) order.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes,\n                e.g., object proposals.\n            gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the\n                transformation, e.g., ground truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = bboxes2tblr(\n            bboxes, gt_bboxes, normalizer=self.normalizer)\n        return encoded_bboxes\n\n    def decode(self, bboxes, pred_bboxes, max_shape=None):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.Shape\n                (B, N, 4) or (N, 4)\n            pred_bboxes (torch.Tensor): Encoded boxes with shape\n               (B, N, 4) or (N, 4)\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n               Sequence[int]],optional): Maximum bounds for boxes, specifies\n               (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then\n               the max_shape should be a Sequence[Sequence[int]]\n               and the length of max_shape should also be B.\n\n        Returns:\n            Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        decoded_bboxes = tblr2bboxes(\n            bboxes,\n            pred_bboxes,\n            normalizer=self.normalizer,\n            max_shape=max_shape,\n            clip_border=self.clip_border)\n\n        if self.use_box_type:\n            decoded_bboxes = HorizontalBoxes(decoded_bboxes)\n        return decoded_bboxes\n\n\ndef bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):\n    \"\"\"Encode ground truth boxes to tblr coordinate.\n\n    It first convert the gt coordinate to tblr format,\n     (top, bottom, left, right), relative to prior box centers.\n     The tblr coordinate may be normalized by the side length of prior bboxes\n     if `normalize_by_wh` is specified as True, and it is then normalized by\n     the `normalizer` factor.\n\n    Args:\n        priors (Tensor): Prior boxes in point form\n            Shape: (num_proposals,4).\n        gts (Tensor): Coords of ground truth for each prior in point-form\n            Shape: (num_proposals, 4).\n        normalizer (Sequence[float] | float): normalization parameter of\n            encoded boxes. If it is a list, it has to have length = 4.\n            Default: 4.0\n        normalize_by_wh (bool): Whether to normalize tblr coordinate by the\n            side length (wh) of prior bboxes.\n\n    Return:\n        encoded boxes (Tensor), Shape: (num_proposals, 4)\n    \"\"\"\n\n    # dist b/t match center and prior's center\n    if not isinstance(normalizer, float):\n        normalizer = torch.tensor(normalizer, device=priors.device)\n        assert len(normalizer) == 4, 'Normalizer must have length = 4'\n    assert priors.size(0) == gts.size(0)\n    prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2\n    xmin, ymin, xmax, ymax = gts.split(1, dim=1)\n    top = prior_centers[:, 1].unsqueeze(1) - ymin\n    bottom = ymax - prior_centers[:, 1].unsqueeze(1)\n    left = prior_centers[:, 0].unsqueeze(1) - xmin\n    right = xmax - prior_centers[:, 0].unsqueeze(1)\n    loc = torch.cat((top, bottom, left, right), dim=1)\n    if normalize_by_wh:\n        # Normalize tblr by anchor width and height\n        wh = priors[:, 2:4] - priors[:, 0:2]\n        w, h = torch.split(wh, 1, dim=1)\n        loc[:, :2] /= h  # tb is normalized by h\n        loc[:, 2:] /= w  # lr is normalized by w\n    # Normalize tblr by the given normalization factor\n    return loc / normalizer\n\n\ndef tblr2bboxes(priors,\n                tblr,\n                normalizer=4.0,\n                normalize_by_wh=True,\n                max_shape=None,\n                clip_border=True):\n    \"\"\"Decode tblr outputs to prediction boxes.\n\n    The process includes 3 steps: 1) De-normalize tblr coordinates by\n    multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the\n    prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert\n    tblr (top, bottom, left, right) pair relative to the center of priors back\n    to (xmin, ymin, xmax, ymax) coordinate.\n\n    Args:\n        priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)\n          Shape: (N,4) or (B, N, 4).\n        tblr (Tensor): Coords of network output in tblr form\n          Shape: (N, 4) or (B, N, 4).\n        normalizer (Sequence[float] | float): Normalization parameter of\n          encoded boxes. By list, it represents the normalization factors at\n          tblr dims. By float, it is the unified normalization factor at all\n          dims. Default: 4.0\n        normalize_by_wh (bool): Whether the tblr coordinates have been\n          normalized by the side length (wh) of prior bboxes.\n        max_shape (Sequence[int] or torch.Tensor or Sequence[\n            Sequence[int]],optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n\n    Return:\n        encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)\n    \"\"\"\n    if not isinstance(normalizer, float):\n        normalizer = torch.tensor(normalizer, device=priors.device)\n        assert len(normalizer) == 4, 'Normalizer must have length = 4'\n    assert priors.size(0) == tblr.size(0)\n    if priors.ndim == 3:\n        assert priors.size(1) == tblr.size(1)\n\n    loc_decode = tblr * normalizer\n    prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2\n    if normalize_by_wh:\n        wh = priors[..., 2:4] - priors[..., 0:2]\n        w, h = torch.split(wh, 1, dim=-1)\n        # Inplace operation with slice would failed for exporting to ONNX\n        th = h * loc_decode[..., :2]  # tb\n        tw = w * loc_decode[..., 2:]  # lr\n        loc_decode = torch.cat([th, tw], dim=-1)\n    # Cannot be exported using onnx when loc_decode.split(1, dim=-1)\n    top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)\n    xmin = prior_centers[..., 0].unsqueeze(-1) - left\n    xmax = prior_centers[..., 0].unsqueeze(-1) + right\n    ymin = prior_centers[..., 1].unsqueeze(-1) - top\n    ymax = prior_centers[..., 1].unsqueeze(-1) + bottom\n\n    bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)\n\n    if clip_border and max_shape is not None:\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            from mmdet.core.export import dynamic_clip_for_onnx\n            xmin, ymin, xmax, ymax = dynamic_clip_for_onnx(\n                xmin, ymin, xmax, ymax, max_shape)\n            bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = priors.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(priors)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = priors.new_tensor(0)\n        max_xy = torch.cat([max_shape, max_shape],\n                           dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n"
  },
  {
    "path": "mmdet/models/task_modules/coders/yolo_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, get_box_tensor\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@TASK_UTILS.register_module()\nclass YOLOBBoxCoder(BaseBBoxCoder):\n    \"\"\"YOLO BBox coder.\n\n    Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide\n    image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).\n    cx, cy in [0., 1.], denotes relative center position w.r.t the center of\n    bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.\n\n    Args:\n        eps (float): Min value of cx, cy when encoding.\n    \"\"\"\n\n    def __init__(self, eps=1e-6, **kwargs):\n        super().__init__(**kwargs)\n        self.eps = eps\n\n    def encode(self, bboxes, gt_bboxes, stride):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,\n                e.g., anchors.\n            gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the\n                transformation, e.g., ground-truth boxes.\n            stride (torch.Tensor | int): Stride of bboxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5\n        y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5\n        w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]\n        h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]\n        x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5\n        y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5\n        w = bboxes[..., 2] - bboxes[..., 0]\n        h = bboxes[..., 3] - bboxes[..., 1]\n        w_target = torch.log((w_gt / w).clamp(min=self.eps))\n        h_target = torch.log((h_gt / h).clamp(min=self.eps))\n        x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(\n            self.eps, 1 - self.eps)\n        y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(\n            self.eps, 1 - self.eps)\n        encoded_bboxes = torch.stack(\n            [x_center_target, y_center_target, w_target, h_target], dim=-1)\n        return encoded_bboxes\n\n    def decode(self, bboxes, pred_bboxes, stride):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,\n                e.g. anchors.\n            pred_bboxes (torch.Tensor): Encoded boxes with shape\n            stride (torch.Tensor | int): Strides of bboxes.\n\n        Returns:\n            Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        assert pred_bboxes.size(-1) == bboxes.size(-1) == 4\n        xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (\n            pred_bboxes[..., :2] - 0.5) * stride\n        whs = (bboxes[..., 2:] -\n               bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()\n        decoded_bboxes = torch.stack(\n            (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -\n             whs[..., 1], xy_centers[..., 0] + whs[..., 0],\n             xy_centers[..., 1] + whs[..., 1]),\n            dim=-1)\n\n        if self.use_box_type:\n            decoded_bboxes = HorizontalBoxes(decoded_bboxes)\n        return decoded_bboxes\n"
  },
  {
    "path": "mmdet/models/task_modules/prior_generators/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,\n                               SSDAnchorGenerator, YOLOAnchorGenerator)\nfrom .point_generator import MlvlPointGenerator, PointGenerator\nfrom .utils import anchor_inside_flags, calc_region\n\n__all__ = [\n    'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',\n    'PointGenerator', 'calc_region', 'YOLOAnchorGenerator',\n    'MlvlPointGenerator', 'SSDAnchorGenerator'\n]\n"
  },
  {
    "path": "mmdet/models/task_modules/prior_generators/anchor_generator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom mmengine.utils import is_tuple_of\nfrom torch import Tensor\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes\n\nDeviceType = Union[str, torch.device]\n\n\n@TASK_UTILS.register_module()\nclass AnchorGenerator:\n    \"\"\"Standard anchor generator for 2D anchor-based detectors.\n\n    Args:\n        strides (list[int] | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels in order (w, h).\n        ratios (list[float]): The list of ratios between the height and width\n            of anchors in a single level.\n        scales (list[int], Optional): Anchor scales for anchors\n            in a single level. It cannot be set at the same time\n            if `octave_base_scale` and `scales_per_octave` are set.\n        base_sizes (list[int], Optional): The basic sizes\n            of anchors in multiple levels.\n            If None is given, strides will be used as base_sizes.\n            (If strides are non square, the shortest stride is taken.)\n        scale_major (bool): Whether to multiply scales first when generating\n            base anchors. If true, the anchors in the same row will have the\n            same scales. By default it is True in V2.0\n        octave_base_scale (int, Optional): The base scale of octave.\n        scales_per_octave (int, Optional): Number of scales for each octave.\n            `octave_base_scale` and `scales_per_octave` are usually used in\n            retinanet and the `scales` should be None when they are set.\n        centers (list[tuple[float]], Optional): The centers of the anchor\n            relative to the feature grid center in multiple feature levels.\n            By default it is set to be None and not used. If a list of tuple of\n            float is given, they will be used to shift the centers of anchors.\n        center_offset (float): The offset of center in proportion to anchors'\n            width and height. By default it is 0 in V2.0.\n        use_box_type (bool): Whether to warp anchors with the box type data\n            structure. Defaults to False.\n\n    Examples:\n        >>> from mmdet.models.task_modules.\n        ... prior_generators import AnchorGenerator\n        >>> self = AnchorGenerator([16], [1.], [1.], [9])\n        >>> all_anchors = self.grid_priors([(2, 2)], device='cpu')\n        >>> print(all_anchors)\n        [tensor([[-4.5000, -4.5000,  4.5000,  4.5000],\n                [11.5000, -4.5000, 20.5000,  4.5000],\n                [-4.5000, 11.5000,  4.5000, 20.5000],\n                [11.5000, 11.5000, 20.5000, 20.5000]])]\n        >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])\n        >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu')\n        >>> print(all_anchors)\n        [tensor([[-4.5000, -4.5000,  4.5000,  4.5000],\n                [11.5000, -4.5000, 20.5000,  4.5000],\n                [-4.5000, 11.5000,  4.5000, 20.5000],\n                [11.5000, 11.5000, 20.5000, 20.5000]]), \\\n        tensor([[-9., -9., 9., 9.]])]\n    \"\"\"\n\n    def __init__(self,\n                 strides: Union[List[int], List[Tuple[int, int]]],\n                 ratios: List[float],\n                 scales: Optional[List[int]] = None,\n                 base_sizes: Optional[List[int]] = None,\n                 scale_major: bool = True,\n                 octave_base_scale: Optional[int] = None,\n                 scales_per_octave: Optional[int] = None,\n                 centers: Optional[List[Tuple[float, float]]] = None,\n                 center_offset: float = 0.,\n                 use_box_type: bool = False) -> None:\n        # check center and center_offset\n        if center_offset != 0:\n            assert centers is None, 'center cannot be set when center_offset' \\\n                                    f'!=0, {centers} is given.'\n        if not (0 <= center_offset <= 1):\n            raise ValueError('center_offset should be in range [0, 1], '\n                             f'{center_offset} is given.')\n        if centers is not None:\n            assert len(centers) == len(strides), \\\n                'The number of strides should be the same as centers, got ' \\\n                f'{strides} and {centers}'\n\n        # calculate base sizes of anchors\n        self.strides = [_pair(stride) for stride in strides]\n        self.base_sizes = [min(stride) for stride in self.strides\n                           ] if base_sizes is None else base_sizes\n        assert len(self.base_sizes) == len(self.strides), \\\n            'The number of strides should be the same as base sizes, got ' \\\n            f'{self.strides} and {self.base_sizes}'\n\n        # calculate scales of anchors\n        assert ((octave_base_scale is not None\n                 and scales_per_octave is not None) ^ (scales is not None)), \\\n            'scales and octave_base_scale with scales_per_octave cannot' \\\n            ' be set at the same time'\n        if scales is not None:\n            self.scales = torch.Tensor(scales)\n        elif octave_base_scale is not None and scales_per_octave is not None:\n            octave_scales = np.array(\n                [2**(i / scales_per_octave) for i in range(scales_per_octave)])\n            scales = octave_scales * octave_base_scale\n            self.scales = torch.Tensor(scales)\n        else:\n            raise ValueError('Either scales or octave_base_scale with '\n                             'scales_per_octave should be set')\n\n        self.octave_base_scale = octave_base_scale\n        self.scales_per_octave = scales_per_octave\n        self.ratios = torch.Tensor(ratios)\n        self.scale_major = scale_major\n        self.centers = centers\n        self.center_offset = center_offset\n        self.base_anchors = self.gen_base_anchors()\n        self.use_box_type = use_box_type\n\n    @property\n    def num_base_anchors(self) -> List[int]:\n        \"\"\"list[int]: total number of base anchors in a feature grid\"\"\"\n        return self.num_base_priors\n\n    @property\n    def num_base_priors(self) -> List[int]:\n        \"\"\"list[int]: The number of priors (anchors) at a point\n        on the feature grid\"\"\"\n        return [base_anchors.size(0) for base_anchors in self.base_anchors]\n\n    @property\n    def num_levels(self) -> int:\n        \"\"\"int: number of feature levels that the generator will be applied\"\"\"\n        return len(self.strides)\n\n    def gen_base_anchors(self) -> List[Tensor]:\n        \"\"\"Generate base anchors.\n\n        Returns:\n            list(torch.Tensor): Base anchors of a feature grid in multiple \\\n                feature levels.\n        \"\"\"\n        multi_level_base_anchors = []\n        for i, base_size in enumerate(self.base_sizes):\n            center = None\n            if self.centers is not None:\n                center = self.centers[i]\n            multi_level_base_anchors.append(\n                self.gen_single_level_base_anchors(\n                    base_size,\n                    scales=self.scales,\n                    ratios=self.ratios,\n                    center=center))\n        return multi_level_base_anchors\n\n    def gen_single_level_base_anchors(self,\n                                      base_size: Union[int, float],\n                                      scales: Tensor,\n                                      ratios: Tensor,\n                                      center: Optional[Tuple[float]] = None) \\\n            -> Tensor:\n        \"\"\"Generate base anchors of a single level.\n\n        Args:\n            base_size (int | float): Basic size of an anchor.\n            scales (torch.Tensor): Scales of the anchor.\n            ratios (torch.Tensor): The ratio between the height\n                and width of anchors in a single level.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature maps.\n        \"\"\"\n        w = base_size\n        h = base_size\n        if center is None:\n            x_center = self.center_offset * w\n            y_center = self.center_offset * h\n        else:\n            x_center, y_center = center\n\n        h_ratios = torch.sqrt(ratios)\n        w_ratios = 1 / h_ratios\n        if self.scale_major:\n            ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)\n            hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)\n        else:\n            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)\n            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)\n\n        # use float anchor and the anchor's center is aligned with the\n        # pixel center\n        base_anchors = [\n            x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,\n            y_center + 0.5 * hs\n        ]\n        base_anchors = torch.stack(base_anchors, dim=-1)\n\n        return base_anchors\n\n    def _meshgrid(self,\n                  x: Tensor,\n                  y: Tensor,\n                  row_major: bool = True) -> Tuple[Tensor]:\n        \"\"\"Generate mesh grid of x and y.\n\n        Args:\n            x (torch.Tensor): Grids of x dimension.\n            y (torch.Tensor): Grids of y dimension.\n            row_major (bool): Whether to return y grids first.\n                Defaults to True.\n\n        Returns:\n            tuple[torch.Tensor]: The mesh grids of x and y.\n        \"\"\"\n        # use shape instead of len to keep tracing while exporting to onnx\n        xx = x.repeat(y.shape[0])\n        yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)\n        if row_major:\n            return xx, yy\n        else:\n            return yy, xx\n\n    def grid_priors(self,\n                    featmap_sizes: List[Tuple],\n                    dtype: torch.dtype = torch.float32,\n                    device: DeviceType = 'cuda') -> List[Tensor]:\n        \"\"\"Generate grid anchors in multiple feature levels.\n\n        Args:\n            featmap_sizes (list[tuple]): List of feature map sizes in\n                multiple feature levels.\n            dtype (:obj:`torch.dtype`): Dtype of priors.\n                Defaults to torch.float32.\n            device (str | torch.device): The device where the anchors\n                will be put on.\n\n        Return:\n            list[torch.Tensor]: Anchors in multiple feature levels. \\\n                The sizes of each tensor should be [N, 4], where \\\n                N = width * height * num_base_anchors, width and height \\\n                are the sizes of the corresponding feature level, \\\n                num_base_anchors is the number of anchors for that level.\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_anchors = []\n        for i in range(self.num_levels):\n            anchors = self.single_level_grid_priors(\n                featmap_sizes[i], level_idx=i, dtype=dtype, device=device)\n            multi_level_anchors.append(anchors)\n        return multi_level_anchors\n\n    def single_level_grid_priors(self,\n                                 featmap_size: Tuple[int, int],\n                                 level_idx: int,\n                                 dtype: torch.dtype = torch.float32,\n                                 device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate grid anchors of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_priors``.\n\n        Args:\n            featmap_size (tuple[int, int]): Size of the feature maps.\n            level_idx (int): The index of corresponding feature map level.\n            dtype (obj:`torch.dtype`): Date type of points.Defaults to\n                ``torch.float32``.\n            device (str | torch.device): The device the tensor will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: Anchors in the overall feature maps.\n        \"\"\"\n\n        base_anchors = self.base_anchors[level_idx].to(device).to(dtype)\n        feat_h, feat_w = featmap_size\n        stride_w, stride_h = self.strides[level_idx]\n        # First create Range with the default dtype, than convert to\n        # target `dtype` for onnx exporting.\n        shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w\n        shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h\n\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)\n        # first feat_w elements correspond to the first row of shifts\n        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n        # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n        all_anchors = all_anchors.view(-1, 4)\n        # first A rows correspond to A anchors of (0, 0) in feature map,\n        # then (0, 1), (0, 2), ...\n        if self.use_box_type:\n            all_anchors = HorizontalBoxes(all_anchors)\n        return all_anchors\n\n    def sparse_priors(self,\n                      prior_idxs: Tensor,\n                      featmap_size: Tuple[int, int],\n                      level_idx: int,\n                      dtype: torch.dtype = torch.float32,\n                      device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate sparse anchors according to the ``prior_idxs``.\n\n        Args:\n            prior_idxs (Tensor): The index of corresponding anchors\n                in the feature map.\n            featmap_size (tuple[int, int]): feature map size arrange as (h, w).\n            level_idx (int): The level index of corresponding feature\n                map.\n            dtype (obj:`torch.dtype`): Date type of points.Defaults to\n                ``torch.float32``.\n            device (str | torch.device): The device where the points is\n                located.\n        Returns:\n            Tensor: Anchor with shape (N, 4), N should be equal to\n                the length of ``prior_idxs``.\n        \"\"\"\n\n        height, width = featmap_size\n        num_base_anchors = self.num_base_anchors[level_idx]\n        base_anchor_id = prior_idxs % num_base_anchors\n        x = (prior_idxs //\n             num_base_anchors) % width * self.strides[level_idx][0]\n        y = (prior_idxs // width //\n             num_base_anchors) % height * self.strides[level_idx][1]\n        priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \\\n            self.base_anchors[level_idx][base_anchor_id, :].to(device)\n\n        return priors\n\n    def grid_anchors(self,\n                     featmap_sizes: List[Tuple],\n                     device: DeviceType = 'cuda') -> List[Tensor]:\n        \"\"\"Generate grid anchors in multiple feature levels.\n\n        Args:\n            featmap_sizes (list[tuple]): List of feature map sizes in\n                multiple feature levels.\n            device (str | torch.device): Device where the anchors will be\n                put on.\n\n        Return:\n            list[torch.Tensor]: Anchors in multiple feature levels. \\\n                The sizes of each tensor should be [N, 4], where \\\n                N = width * height * num_base_anchors, width and height \\\n                are the sizes of the corresponding feature level, \\\n                num_base_anchors is the number of anchors for that level.\n        \"\"\"\n        warnings.warn('``grid_anchors`` would be deprecated soon. '\n                      'Please use ``grid_priors`` ')\n\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_anchors = []\n        for i in range(self.num_levels):\n            anchors = self.single_level_grid_anchors(\n                self.base_anchors[i].to(device),\n                featmap_sizes[i],\n                self.strides[i],\n                device=device)\n            multi_level_anchors.append(anchors)\n        return multi_level_anchors\n\n    def single_level_grid_anchors(self,\n                                  base_anchors: Tensor,\n                                  featmap_size: Tuple[int, int],\n                                  stride: Tuple[int, int] = (16, 16),\n                                  device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate grid anchors of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_anchors``.\n\n        Args:\n            base_anchors (torch.Tensor): The base anchors of a feature grid.\n            featmap_size (tuple[int]): Size of the feature maps.\n            stride (tuple[int, int]): Stride of the feature map in order\n                (w, h). Defaults to (16, 16).\n            device (str | torch.device): Device the tensor will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: Anchors in the overall feature maps.\n        \"\"\"\n\n        warnings.warn(\n            '``single_level_grid_anchors`` would be deprecated soon. '\n            'Please use ``single_level_grid_priors`` ')\n\n        # keep featmap_size as Tensor instead of int, so that we\n        # can convert to ONNX correctly\n        feat_h, feat_w = featmap_size\n        shift_x = torch.arange(0, feat_w, device=device) * stride[0]\n        shift_y = torch.arange(0, feat_h, device=device) * stride[1]\n\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)\n        shifts = shifts.type_as(base_anchors)\n        # first feat_w elements correspond to the first row of shifts\n        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n        # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n        all_anchors = all_anchors.view(-1, 4)\n        # first A rows correspond to A anchors of (0, 0) in feature map,\n        # then (0, 1), (0, 2), ...\n        return all_anchors\n\n    def valid_flags(self,\n                    featmap_sizes: List[Tuple[int, int]],\n                    pad_shape: Tuple,\n                    device: DeviceType = 'cuda') -> List[Tensor]:\n        \"\"\"Generate valid flags of anchors in multiple feature levels.\n\n        Args:\n            featmap_sizes (list(tuple[int, int])): List of feature map sizes in\n                multiple feature levels.\n            pad_shape (tuple): The padded shape of the image.\n            device (str | torch.device): Device where the anchors will be\n                put on.\n\n        Return:\n            list(torch.Tensor): Valid flags of anchors in multiple levels.\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_flags = []\n        for i in range(self.num_levels):\n            anchor_stride = self.strides[i]\n            feat_h, feat_w = featmap_sizes[i]\n            h, w = pad_shape[:2]\n            valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)\n            valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)\n            flags = self.single_level_valid_flags((feat_h, feat_w),\n                                                  (valid_feat_h, valid_feat_w),\n                                                  self.num_base_anchors[i],\n                                                  device=device)\n            multi_level_flags.append(flags)\n        return multi_level_flags\n\n    def single_level_valid_flags(self,\n                                 featmap_size: Tuple[int, int],\n                                 valid_size: Tuple[int, int],\n                                 num_base_anchors: int,\n                                 device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate the valid flags of anchor in a single feature map.\n\n        Args:\n            featmap_size (tuple[int]): The size of feature maps, arrange\n                as (h, w).\n            valid_size (tuple[int]): The valid size of the feature maps.\n            num_base_anchors (int): The number of base anchors.\n            device (str | torch.device): Device where the flags will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: The valid flags of each anchor in a single level \\\n                feature map.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        valid_h, valid_w = valid_size\n        assert valid_h <= feat_h and valid_w <= feat_w\n        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n        valid_x[:valid_w] = 1\n        valid_y[:valid_h] = 1\n        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n        valid = valid_xx & valid_yy\n        valid = valid[:, None].expand(valid.size(0),\n                                      num_base_anchors).contiguous().view(-1)\n        return valid\n\n    def __repr__(self) -> str:\n        \"\"\"str: a string that describes the module\"\"\"\n        indent_str = '    '\n        repr_str = self.__class__.__name__ + '(\\n'\n        repr_str += f'{indent_str}strides={self.strides},\\n'\n        repr_str += f'{indent_str}ratios={self.ratios},\\n'\n        repr_str += f'{indent_str}scales={self.scales},\\n'\n        repr_str += f'{indent_str}base_sizes={self.base_sizes},\\n'\n        repr_str += f'{indent_str}scale_major={self.scale_major},\\n'\n        repr_str += f'{indent_str}octave_base_scale='\n        repr_str += f'{self.octave_base_scale},\\n'\n        repr_str += f'{indent_str}scales_per_octave='\n        repr_str += f'{self.scales_per_octave},\\n'\n        repr_str += f'{indent_str}num_levels={self.num_levels}\\n'\n        repr_str += f'{indent_str}centers={self.centers},\\n'\n        repr_str += f'{indent_str}center_offset={self.center_offset})'\n        return repr_str\n\n\n@TASK_UTILS.register_module()\nclass SSDAnchorGenerator(AnchorGenerator):\n    \"\"\"Anchor generator for SSD.\n\n    Args:\n        strides (list[int]  | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels.\n        ratios (list[float]): The list of ratios between the height and width\n            of anchors in a single level.\n        min_sizes (list[float]): The list of minimum anchor sizes on each\n            level.\n        max_sizes (list[float]): The list of maximum anchor sizes on each\n            level.\n        basesize_ratio_range (tuple(float)): Ratio range of anchors. Being\n            used when not setting min_sizes and max_sizes.\n        input_size (int): Size of feature map, 300 for SSD300, 512 for\n            SSD512. Being used when not setting min_sizes and max_sizes.\n        scale_major (bool): Whether to multiply scales first when generating\n            base anchors. If true, the anchors in the same row will have the\n            same scales. It is always set to be False in SSD.\n        use_box_type (bool): Whether to warp anchors with the box type data\n            structure. Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 strides: Union[List[int], List[Tuple[int, int]]],\n                 ratios: List[float],\n                 min_sizes: Optional[List[float]] = None,\n                 max_sizes: Optional[List[float]] = None,\n                 basesize_ratio_range: Tuple[float] = (0.15, 0.9),\n                 input_size: int = 300,\n                 scale_major: bool = True,\n                 use_box_type: bool = False) -> None:\n        assert len(strides) == len(ratios)\n        assert not (min_sizes is None) ^ (max_sizes is None)\n        self.strides = [_pair(stride) for stride in strides]\n        self.centers = [(stride[0] / 2., stride[1] / 2.)\n                        for stride in self.strides]\n\n        if min_sizes is None and max_sizes is None:\n            # use hard code to generate SSD anchors\n            self.input_size = input_size\n            assert is_tuple_of(basesize_ratio_range, float)\n            self.basesize_ratio_range = basesize_ratio_range\n            # calculate anchor ratios and sizes\n            min_ratio, max_ratio = basesize_ratio_range\n            min_ratio = int(min_ratio * 100)\n            max_ratio = int(max_ratio * 100)\n            step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))\n            min_sizes = []\n            max_sizes = []\n            for ratio in range(int(min_ratio), int(max_ratio) + 1, step):\n                min_sizes.append(int(self.input_size * ratio / 100))\n                max_sizes.append(int(self.input_size * (ratio + step) / 100))\n            if self.input_size == 300:\n                if basesize_ratio_range[0] == 0.15:  # SSD300 COCO\n                    min_sizes.insert(0, int(self.input_size * 7 / 100))\n                    max_sizes.insert(0, int(self.input_size * 15 / 100))\n                elif basesize_ratio_range[0] == 0.2:  # SSD300 VOC\n                    min_sizes.insert(0, int(self.input_size * 10 / 100))\n                    max_sizes.insert(0, int(self.input_size * 20 / 100))\n                else:\n                    raise ValueError(\n                        'basesize_ratio_range[0] should be either 0.15'\n                        'or 0.2 when input_size is 300, got '\n                        f'{basesize_ratio_range[0]}.')\n            elif self.input_size == 512:\n                if basesize_ratio_range[0] == 0.1:  # SSD512 COCO\n                    min_sizes.insert(0, int(self.input_size * 4 / 100))\n                    max_sizes.insert(0, int(self.input_size * 10 / 100))\n                elif basesize_ratio_range[0] == 0.15:  # SSD512 VOC\n                    min_sizes.insert(0, int(self.input_size * 7 / 100))\n                    max_sizes.insert(0, int(self.input_size * 15 / 100))\n                else:\n                    raise ValueError(\n                        'When not setting min_sizes and max_sizes,'\n                        'basesize_ratio_range[0] should be either 0.1'\n                        'or 0.15 when input_size is 512, got'\n                        f' {basesize_ratio_range[0]}.')\n            else:\n                raise ValueError(\n                    'Only support 300 or 512 in SSDAnchorGenerator when '\n                    'not setting min_sizes and max_sizes, '\n                    f'got {self.input_size}.')\n\n        assert len(min_sizes) == len(max_sizes) == len(strides)\n\n        anchor_ratios = []\n        anchor_scales = []\n        for k in range(len(self.strides)):\n            scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]\n            anchor_ratio = [1.]\n            for r in ratios[k]:\n                anchor_ratio += [1 / r, r]  # 4 or 6 ratio\n            anchor_ratios.append(torch.Tensor(anchor_ratio))\n            anchor_scales.append(torch.Tensor(scales))\n\n        self.base_sizes = min_sizes\n        self.scales = anchor_scales\n        self.ratios = anchor_ratios\n        self.scale_major = scale_major\n        self.center_offset = 0\n        self.base_anchors = self.gen_base_anchors()\n        self.use_box_type = use_box_type\n\n    def gen_base_anchors(self) -> List[Tensor]:\n        \"\"\"Generate base anchors.\n\n        Returns:\n            list(torch.Tensor): Base anchors of a feature grid in multiple \\\n                feature levels.\n        \"\"\"\n        multi_level_base_anchors = []\n        for i, base_size in enumerate(self.base_sizes):\n            base_anchors = self.gen_single_level_base_anchors(\n                base_size,\n                scales=self.scales[i],\n                ratios=self.ratios[i],\n                center=self.centers[i])\n            indices = list(range(len(self.ratios[i])))\n            indices.insert(1, len(indices))\n            base_anchors = torch.index_select(base_anchors, 0,\n                                              torch.LongTensor(indices))\n            multi_level_base_anchors.append(base_anchors)\n        return multi_level_base_anchors\n\n    def __repr__(self) -> str:\n        \"\"\"str: a string that describes the module\"\"\"\n        indent_str = '    '\n        repr_str = self.__class__.__name__ + '(\\n'\n        repr_str += f'{indent_str}strides={self.strides},\\n'\n        repr_str += f'{indent_str}scales={self.scales},\\n'\n        repr_str += f'{indent_str}scale_major={self.scale_major},\\n'\n        repr_str += f'{indent_str}input_size={self.input_size},\\n'\n        repr_str += f'{indent_str}scales={self.scales},\\n'\n        repr_str += f'{indent_str}ratios={self.ratios},\\n'\n        repr_str += f'{indent_str}num_levels={self.num_levels},\\n'\n        repr_str += f'{indent_str}base_sizes={self.base_sizes},\\n'\n        repr_str += f'{indent_str}basesize_ratio_range='\n        repr_str += f'{self.basesize_ratio_range})'\n        return repr_str\n\n\n@TASK_UTILS.register_module()\nclass LegacyAnchorGenerator(AnchorGenerator):\n    \"\"\"Legacy anchor generator used in MMDetection V1.x.\n\n    Note:\n        Difference to the V2.0 anchor generator:\n\n        1. The center offset of V1.x anchors are set to be 0.5 rather than 0.\n        2. The width/height are minused by 1 when calculating the anchors' \\\n            centers and corners to meet the V1.x coordinate system.\n        3. The anchors' corners are quantized.\n\n    Args:\n        strides (list[int] | list[tuple[int]]): Strides of anchors\n            in multiple feature levels.\n        ratios (list[float]): The list of ratios between the height and width\n            of anchors in a single level.\n        scales (list[int] | None): Anchor scales for anchors in a single level.\n            It cannot be set at the same time if `octave_base_scale` and\n            `scales_per_octave` are set.\n        base_sizes (list[int]): The basic sizes of anchors in multiple levels.\n            If None is given, strides will be used to generate base_sizes.\n        scale_major (bool): Whether to multiply scales first when generating\n            base anchors. If true, the anchors in the same row will have the\n            same scales. By default it is True in V2.0\n        octave_base_scale (int): The base scale of octave.\n        scales_per_octave (int): Number of scales for each octave.\n            `octave_base_scale` and `scales_per_octave` are usually used in\n            retinanet and the `scales` should be None when they are set.\n        centers (list[tuple[float, float]] | None): The centers of the anchor\n            relative to the feature grid center in multiple feature levels.\n            By default it is set to be None and not used. It a list of float\n            is given, this list will be used to shift the centers of anchors.\n        center_offset (float): The offset of center in proportion to anchors'\n            width and height. By default it is 0.5 in V2.0 but it should be 0.5\n            in v1.x models.\n        use_box_type (bool): Whether to warp anchors with the box type data\n            structure. Defaults to False.\n\n    Examples:\n        >>> from mmdet.models.task_modules.\n        ... prior_generators import LegacyAnchorGenerator\n        >>> self = LegacyAnchorGenerator(\n        >>>     [16], [1.], [1.], [9], center_offset=0.5)\n        >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')\n        >>> print(all_anchors)\n        [tensor([[ 0.,  0.,  8.,  8.],\n                [16.,  0., 24.,  8.],\n                [ 0., 16.,  8., 24.],\n                [16., 16., 24., 24.]])]\n    \"\"\"\n\n    def gen_single_level_base_anchors(self,\n                                      base_size: Union[int, float],\n                                      scales: Tensor,\n                                      ratios: Tensor,\n                                      center: Optional[Tuple[float]] = None) \\\n            -> Tensor:\n        \"\"\"Generate base anchors of a single level.\n\n        Note:\n            The width/height of anchors are minused by 1 when calculating \\\n                the centers and corners to meet the V1.x coordinate system.\n\n        Args:\n            base_size (int | float): Basic size of an anchor.\n            scales (torch.Tensor): Scales of the anchor.\n            ratios (torch.Tensor): The ratio between the height.\n                and width of anchors in a single level.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature map.\n        \"\"\"\n        w = base_size\n        h = base_size\n        if center is None:\n            x_center = self.center_offset * (w - 1)\n            y_center = self.center_offset * (h - 1)\n        else:\n            x_center, y_center = center\n\n        h_ratios = torch.sqrt(ratios)\n        w_ratios = 1 / h_ratios\n        if self.scale_major:\n            ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)\n            hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)\n        else:\n            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)\n            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)\n\n        # use float anchor and the anchor's center is aligned with the\n        # pixel center\n        base_anchors = [\n            x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),\n            x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)\n        ]\n        base_anchors = torch.stack(base_anchors, dim=-1).round()\n\n        return base_anchors\n\n\n@TASK_UTILS.register_module()\nclass LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):\n    \"\"\"Legacy anchor generator used in MMDetection V1.x.\n\n    The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`\n    can be found in `LegacyAnchorGenerator`.\n    \"\"\"\n\n    def __init__(self,\n                 strides: Union[List[int], List[Tuple[int, int]]],\n                 ratios: List[float],\n                 basesize_ratio_range: Tuple[float],\n                 input_size: int = 300,\n                 scale_major: bool = True,\n                 use_box_type: bool = False) -> None:\n        super(LegacySSDAnchorGenerator, self).__init__(\n            strides=strides,\n            ratios=ratios,\n            basesize_ratio_range=basesize_ratio_range,\n            input_size=input_size,\n            scale_major=scale_major,\n            use_box_type=use_box_type)\n        self.centers = [((stride - 1) / 2., (stride - 1) / 2.)\n                        for stride in strides]\n        self.base_anchors = self.gen_base_anchors()\n\n\n@TASK_UTILS.register_module()\nclass YOLOAnchorGenerator(AnchorGenerator):\n    \"\"\"Anchor generator for YOLO.\n\n    Args:\n        strides (list[int] | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels.\n        base_sizes (list[list[tuple[int, int]]]): The basic sizes\n            of anchors in multiple levels.\n    \"\"\"\n\n    def __init__(self,\n                 strides: Union[List[int], List[Tuple[int, int]]],\n                 base_sizes: List[List[Tuple[int, int]]],\n                 use_box_type: bool = False) -> None:\n        self.strides = [_pair(stride) for stride in strides]\n        self.centers = [(stride[0] / 2., stride[1] / 2.)\n                        for stride in self.strides]\n        self.base_sizes = []\n        num_anchor_per_level = len(base_sizes[0])\n        for base_sizes_per_level in base_sizes:\n            assert num_anchor_per_level == len(base_sizes_per_level)\n            self.base_sizes.append(\n                [_pair(base_size) for base_size in base_sizes_per_level])\n        self.base_anchors = self.gen_base_anchors()\n        self.use_box_type = use_box_type\n\n    @property\n    def num_levels(self) -> int:\n        \"\"\"int: number of feature levels that the generator will be applied\"\"\"\n        return len(self.base_sizes)\n\n    def gen_base_anchors(self) -> List[Tensor]:\n        \"\"\"Generate base anchors.\n\n        Returns:\n            list(torch.Tensor): Base anchors of a feature grid in multiple \\\n                feature levels.\n        \"\"\"\n        multi_level_base_anchors = []\n        for i, base_sizes_per_level in enumerate(self.base_sizes):\n            center = None\n            if self.centers is not None:\n                center = self.centers[i]\n            multi_level_base_anchors.append(\n                self.gen_single_level_base_anchors(base_sizes_per_level,\n                                                   center))\n        return multi_level_base_anchors\n\n    def gen_single_level_base_anchors(self,\n                                      base_sizes_per_level: List[Tuple[int]],\n                                      center: Optional[Tuple[float]] = None) \\\n            -> Tensor:\n        \"\"\"Generate base anchors of a single level.\n\n        Args:\n            base_sizes_per_level (list[tuple[int]]): Basic sizes of\n                anchors.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature maps.\n        \"\"\"\n        x_center, y_center = center\n        base_anchors = []\n        for base_size in base_sizes_per_level:\n            w, h = base_size\n\n            # use float anchor and the anchor's center is aligned with the\n            # pixel center\n            base_anchor = torch.Tensor([\n                x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,\n                y_center + 0.5 * h\n            ])\n            base_anchors.append(base_anchor)\n        base_anchors = torch.stack(base_anchors, dim=0)\n\n        return base_anchors\n"
  },
  {
    "path": "mmdet/models/task_modules/prior_generators/point_generator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.registry import TASK_UTILS\n\nDeviceType = Union[str, torch.device]\n\n\n@TASK_UTILS.register_module()\nclass PointGenerator:\n\n    def _meshgrid(self,\n                  x: Tensor,\n                  y: Tensor,\n                  row_major: bool = True) -> Tuple[Tensor, Tensor]:\n        \"\"\"Generate mesh grid of x and y.\n\n        Args:\n            x (torch.Tensor): Grids of x dimension.\n            y (torch.Tensor): Grids of y dimension.\n            row_major (bool): Whether to return y grids first.\n                Defaults to True.\n\n        Returns:\n            tuple[torch.Tensor]: The mesh grids of x and y.\n        \"\"\"\n        xx = x.repeat(len(y))\n        yy = y.view(-1, 1).repeat(1, len(x)).view(-1)\n        if row_major:\n            return xx, yy\n        else:\n            return yy, xx\n\n    def grid_points(self,\n                    featmap_size: Tuple[int, int],\n                    stride=16,\n                    device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate grid points of a single level.\n\n        Args:\n            featmap_size (tuple[int, int]): Size of the feature maps.\n            stride (int): The stride of corresponding feature map.\n            device (str | torch.device): The device the tensor will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: grid point in a feature map.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        shift_x = torch.arange(0., feat_w, device=device) * stride\n        shift_y = torch.arange(0., feat_h, device=device) * stride\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        stride = shift_x.new_full((shift_xx.shape[0], ), stride)\n        shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)\n        all_points = shifts.to(device)\n        return all_points\n\n    def valid_flags(self,\n                    featmap_size: Tuple[int, int],\n                    valid_size: Tuple[int, int],\n                    device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate valid flags of anchors in a feature map.\n\n        Args:\n            featmap_sizes (list(tuple[int, int])): List of feature map sizes in\n                multiple feature levels.\n            valid_shape (tuple[int, int]): The valid shape of the image.\n            device (str | torch.device): Device where the anchors will be\n                put on.\n\n        Return:\n            torch.Tensor: Valid flags of anchors in a level.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        valid_h, valid_w = valid_size\n        assert valid_h <= feat_h and valid_w <= feat_w\n        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n        valid_x[:valid_w] = 1\n        valid_y[:valid_h] = 1\n        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n        valid = valid_xx & valid_yy\n        return valid\n\n\n@TASK_UTILS.register_module()\nclass MlvlPointGenerator:\n    \"\"\"Standard points generator for multi-level (Mlvl) feature maps in 2D\n    points-based detectors.\n\n    Args:\n        strides (list[int] | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels in order (w, h).\n        offset (float): The offset of points, the value is normalized with\n            corresponding stride. Defaults to 0.5.\n    \"\"\"\n\n    def __init__(self,\n                 strides: Union[List[int], List[Tuple[int, int]]],\n                 offset: float = 0.5) -> None:\n        self.strides = [_pair(stride) for stride in strides]\n        self.offset = offset\n\n    @property\n    def num_levels(self) -> int:\n        \"\"\"int: number of feature levels that the generator will be applied\"\"\"\n        return len(self.strides)\n\n    @property\n    def num_base_priors(self) -> List[int]:\n        \"\"\"list[int]: The number of priors (points) at a point\n        on the feature grid\"\"\"\n        return [1 for _ in range(len(self.strides))]\n\n    def _meshgrid(self,\n                  x: Tensor,\n                  y: Tensor,\n                  row_major: bool = True) -> Tuple[Tensor, Tensor]:\n        yy, xx = torch.meshgrid(y, x)\n        if row_major:\n            # warning .flatten() would cause error in ONNX exporting\n            # have to use reshape here\n            return xx.reshape(-1), yy.reshape(-1)\n\n        else:\n            return yy.reshape(-1), xx.reshape(-1)\n\n    def grid_priors(self,\n                    featmap_sizes: List[Tuple],\n                    dtype: torch.dtype = torch.float32,\n                    device: DeviceType = 'cuda',\n                    with_stride: bool = False) -> List[Tensor]:\n        \"\"\"Generate grid points of multiple feature levels.\n\n        Args:\n            featmap_sizes (list[tuple]): List of feature map sizes in\n                multiple feature levels, each size arrange as\n                as (h, w).\n            dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32.\n            device (str | torch.device): The device where the anchors will be\n                put on.\n            with_stride (bool): Whether to concatenate the stride to\n                the last dimension of points.\n\n        Return:\n            list[torch.Tensor]: Points of  multiple feature levels.\n            The sizes of each tensor should be (N, 2) when with stride is\n            ``False``, where N = width * height, width and height\n            are the sizes of the corresponding feature level,\n            and the last dimension 2 represent (coord_x, coord_y),\n            otherwise the shape should be (N, 4),\n            and the last dimension 4 represent\n            (coord_x, coord_y, stride_w, stride_h).\n        \"\"\"\n\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_priors = []\n        for i in range(self.num_levels):\n            priors = self.single_level_grid_priors(\n                featmap_sizes[i],\n                level_idx=i,\n                dtype=dtype,\n                device=device,\n                with_stride=with_stride)\n            multi_level_priors.append(priors)\n        return multi_level_priors\n\n    def single_level_grid_priors(self,\n                                 featmap_size: Tuple[int],\n                                 level_idx: int,\n                                 dtype: torch.dtype = torch.float32,\n                                 device: DeviceType = 'cuda',\n                                 with_stride: bool = False) -> Tensor:\n        \"\"\"Generate grid Points of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_priors``.\n\n        Args:\n            featmap_size (tuple[int]): Size of the feature maps, arrange as\n                (h, w).\n            level_idx (int): The index of corresponding feature map level.\n            dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32.\n            device (str | torch.device): The device the tensor will be put on.\n                Defaults to 'cuda'.\n            with_stride (bool): Concatenate the stride to the last dimension\n                of points.\n\n        Return:\n            Tensor: Points of single feature levels.\n            The shape of tensor should be (N, 2) when with stride is\n            ``False``, where N = width * height, width and height\n            are the sizes of the corresponding feature level,\n            and the last dimension 2 represent (coord_x, coord_y),\n            otherwise the shape should be (N, 4),\n            and the last dimension 4 represent\n            (coord_x, coord_y, stride_w, stride_h).\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        stride_w, stride_h = self.strides[level_idx]\n        shift_x = (torch.arange(0, feat_w, device=device) +\n                   self.offset) * stride_w\n        # keep featmap_size as Tensor instead of int, so that we\n        # can convert to ONNX correctly\n        shift_x = shift_x.to(dtype)\n\n        shift_y = (torch.arange(0, feat_h, device=device) +\n                   self.offset) * stride_h\n        # keep featmap_size as Tensor instead of int, so that we\n        # can convert to ONNX correctly\n        shift_y = shift_y.to(dtype)\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        if not with_stride:\n            shifts = torch.stack([shift_xx, shift_yy], dim=-1)\n        else:\n            # use `shape[0]` instead of `len(shift_xx)` for ONNX export\n            stride_w = shift_xx.new_full((shift_xx.shape[0], ),\n                                         stride_w).to(dtype)\n            stride_h = shift_xx.new_full((shift_yy.shape[0], ),\n                                         stride_h).to(dtype)\n            shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h],\n                                 dim=-1)\n        all_points = shifts.to(device)\n        return all_points\n\n    def valid_flags(self,\n                    featmap_sizes: List[Tuple[int, int]],\n                    pad_shape: Tuple[int],\n                    device: DeviceType = 'cuda') -> List[Tensor]:\n        \"\"\"Generate valid flags of points of multiple feature levels.\n\n        Args:\n            featmap_sizes (list(tuple)): List of feature map sizes in\n                multiple feature levels, each size arrange as\n                as (h, w).\n            pad_shape (tuple(int)): The padded shape of the image,\n                arrange as (h, w).\n            device (str | torch.device): The device where the anchors will be\n                put on.\n\n        Return:\n            list(torch.Tensor): Valid flags of points of multiple levels.\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_flags = []\n        for i in range(self.num_levels):\n            point_stride = self.strides[i]\n            feat_h, feat_w = featmap_sizes[i]\n            h, w = pad_shape[:2]\n            valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h)\n            valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w)\n            flags = self.single_level_valid_flags((feat_h, feat_w),\n                                                  (valid_feat_h, valid_feat_w),\n                                                  device=device)\n            multi_level_flags.append(flags)\n        return multi_level_flags\n\n    def single_level_valid_flags(self,\n                                 featmap_size: Tuple[int, int],\n                                 valid_size: Tuple[int, int],\n                                 device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate the valid flags of points of a single feature map.\n\n        Args:\n            featmap_size (tuple[int]): The size of feature maps, arrange as\n                as (h, w).\n            valid_size (tuple[int]): The valid size of the feature maps.\n                The size arrange as as (h, w).\n            device (str | torch.device): The device where the flags will be\n            put on. Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: The valid flags of each points in a single level \\\n                feature map.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        valid_h, valid_w = valid_size\n        assert valid_h <= feat_h and valid_w <= feat_w\n        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n        valid_x[:valid_w] = 1\n        valid_y[:valid_h] = 1\n        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n        valid = valid_xx & valid_yy\n        return valid\n\n    def sparse_priors(self,\n                      prior_idxs: Tensor,\n                      featmap_size: Tuple[int],\n                      level_idx: int,\n                      dtype: torch.dtype = torch.float32,\n                      device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate sparse points according to the ``prior_idxs``.\n\n        Args:\n            prior_idxs (Tensor): The index of corresponding anchors\n                in the feature map.\n            featmap_size (tuple[int]): feature map size arrange as (w, h).\n            level_idx (int): The level index of corresponding feature\n                map.\n            dtype (obj:`torch.dtype`): Date type of points. Defaults to\n                ``torch.float32``.\n            device (str | torch.device): The device where the points is\n                located.\n        Returns:\n            Tensor: Anchor with shape (N, 2), N should be equal to\n            the length of ``prior_idxs``. And last dimension\n            2 represent (coord_x, coord_y).\n        \"\"\"\n        height, width = featmap_size\n        x = (prior_idxs % width + self.offset) * self.strides[level_idx][0]\n        y = ((prior_idxs // width) % height +\n             self.offset) * self.strides[level_idx][1]\n        prioris = torch.stack([x, y], 1).to(dtype)\n        prioris = prioris.to(device)\n        return prioris\n"
  },
  {
    "path": "mmdet/models/task_modules/prior_generators/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.structures.bbox import BaseBoxes\n\n\ndef anchor_inside_flags(flat_anchors: Tensor,\n                        valid_flags: Tensor,\n                        img_shape: Tuple[int],\n                        allowed_border: int = 0) -> Tensor:\n    \"\"\"Check whether the anchors are inside the border.\n\n    Args:\n        flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).\n        valid_flags (torch.Tensor): An existing valid flags of anchors.\n        img_shape (tuple(int)): Shape of current image.\n        allowed_border (int): The border to allow the valid anchor.\n            Defaults to 0.\n\n    Returns:\n        torch.Tensor: Flags indicating whether the anchors are inside a \\\n            valid range.\n    \"\"\"\n    img_h, img_w = img_shape[:2]\n    if allowed_border >= 0:\n        if isinstance(flat_anchors, BaseBoxes):\n            inside_flags = valid_flags & \\\n                flat_anchors.is_inside([img_h, img_w],\n                                       all_inside=True,\n                                       allowed_border=allowed_border)\n        else:\n            inside_flags = valid_flags & \\\n                (flat_anchors[:, 0] >= -allowed_border) & \\\n                (flat_anchors[:, 1] >= -allowed_border) & \\\n                (flat_anchors[:, 2] < img_w + allowed_border) & \\\n                (flat_anchors[:, 3] < img_h + allowed_border)\n    else:\n        inside_flags = valid_flags\n    return inside_flags\n\n\ndef calc_region(bbox: Tensor,\n                ratio: float,\n                featmap_size: Optional[Tuple] = None) -> Tuple[int]:\n    \"\"\"Calculate a proportional bbox region.\n\n    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.\n\n    Args:\n        bbox (Tensor): Bboxes to calculate regions, shape (n, 4).\n        ratio (float): Ratio of the output region.\n        featmap_size (tuple, Optional): Feature map size in (height, width)\n            order used for clipping the boundary. Defaults to None.\n\n    Returns:\n        tuple: x1, y1, x2, y2\n    \"\"\"\n    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()\n    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()\n    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()\n    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()\n    if featmap_size is not None:\n        x1 = x1.clamp(min=0, max=featmap_size[1])\n        y1 = y1.clamp(min=0, max=featmap_size[0])\n        x2 = x2.clamp(min=0, max=featmap_size[1])\n        y2 = y2.clamp(min=0, max=featmap_size[0])\n    return (x1, y1, x2, y2)\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_sampler import BaseSampler\nfrom .combined_sampler import CombinedSampler\nfrom .instance_balanced_pos_sampler import InstanceBalancedPosSampler\nfrom .iou_balanced_neg_sampler import IoUBalancedNegSampler\nfrom .mask_pseudo_sampler import MaskPseudoSampler\nfrom .mask_sampling_result import MaskSamplingResult\nfrom .multi_instance_random_sampler import MultiInsRandomSampler\nfrom .multi_instance_sampling_result import MultiInstanceSamplingResult\nfrom .ohem_sampler import OHEMSampler\nfrom .pseudo_sampler import PseudoSampler\nfrom .random_sampler import RandomSampler\nfrom .sampling_result import SamplingResult\nfrom .score_hlr_sampler import ScoreHLRSampler\n\n__all__ = [\n    'BaseSampler', 'PseudoSampler', 'RandomSampler',\n    'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n    'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',\n    'MaskSamplingResult', 'MultiInstanceSamplingResult',\n    'MultiInsRandomSampler'\n]\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/base_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.structures.bbox import BaseBoxes, cat_boxes\nfrom ..assigners import AssignResult\nfrom .sampling_result import SamplingResult\n\n\nclass BaseSampler(metaclass=ABCMeta):\n    \"\"\"Base class of samplers.\n\n    Args:\n        num (int): Number of samples\n        pos_fraction (float): Fraction of positive samples\n        neg_pos_up (int): Upper bound number of negative and\n            positive samples. Defaults to -1.\n        add_gt_as_proposals (bool): Whether to add ground truth\n            boxes as proposals. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 num: int,\n                 pos_fraction: float,\n                 neg_pos_ub: int = -1,\n                 add_gt_as_proposals: bool = True,\n                 **kwargs) -> None:\n        self.num = num\n        self.pos_fraction = pos_fraction\n        self.neg_pos_ub = neg_pos_ub\n        self.add_gt_as_proposals = add_gt_as_proposals\n        self.pos_sampler = self\n        self.neg_sampler = self\n\n    @abstractmethod\n    def _sample_pos(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        pass\n\n    @abstractmethod\n    def _sample_neg(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        pass\n\n    def sample(self, assign_result: AssignResult, pred_instances: InstanceData,\n               gt_instances: InstanceData, **kwargs) -> SamplingResult:\n        \"\"\"Sample positive and negative bboxes.\n\n        This is a simple implementation of bbox sampling given candidates,\n        assigning results and ground truth bboxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigning results.\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n\n        Returns:\n            :obj:`SamplingResult`: Sampling result.\n\n        Example:\n            >>> from mmengine.structures import InstanceData\n            >>> from mmdet.models.task_modules.samplers import RandomSampler,\n            >>> from mmdet.models.task_modules.assigners import AssignResult\n            >>> from mmdet.models.task_modules.samplers.\n            ... sampling_result import ensure_rng, random_boxes\n            >>> rng = ensure_rng(None)\n            >>> assign_result = AssignResult.random(rng=rng)\n            >>> pred_instances = InstanceData()\n            >>> pred_instances.priors = random_boxes(assign_result.num_preds,\n            ...                                      rng=rng)\n            >>> gt_instances = InstanceData()\n            >>> gt_instances.bboxes = random_boxes(assign_result.num_gts,\n            ...                                    rng=rng)\n            >>> gt_instances.labels = torch.randint(\n            ...     0, 5, (assign_result.num_gts,), dtype=torch.long)\n            >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,\n            >>>                      add_gt_as_proposals=False)\n            >>> self = self.sample(assign_result, pred_instances, gt_instances)\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        gt_labels = gt_instances.labels\n        if len(priors.shape) < 2:\n            priors = priors[None, :]\n\n        gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8)\n        if self.add_gt_as_proposals and len(gt_bboxes) > 0:\n            # When `gt_bboxes` and `priors` are all box type, convert\n            # `gt_bboxes` type to `priors` type.\n            if (isinstance(gt_bboxes, BaseBoxes)\n                    and isinstance(priors, BaseBoxes)):\n                gt_bboxes_ = gt_bboxes.convert_to(type(priors))\n            else:\n                gt_bboxes_ = gt_bboxes\n            priors = cat_boxes([gt_bboxes_, priors], dim=0)\n            assign_result.add_gt_(gt_labels)\n            gt_ones = priors.new_ones(gt_bboxes_.shape[0], dtype=torch.uint8)\n            gt_flags = torch.cat([gt_ones, gt_flags])\n\n        num_expected_pos = int(self.num * self.pos_fraction)\n        pos_inds = self.pos_sampler._sample_pos(\n            assign_result, num_expected_pos, bboxes=priors, **kwargs)\n        # We found that sampled indices have duplicated items occasionally.\n        # (may be a bug of PyTorch)\n        pos_inds = pos_inds.unique()\n        num_sampled_pos = pos_inds.numel()\n        num_expected_neg = self.num - num_sampled_pos\n        if self.neg_pos_ub >= 0:\n            _pos = max(1, num_sampled_pos)\n            neg_upper_bound = int(self.neg_pos_ub * _pos)\n            if num_expected_neg > neg_upper_bound:\n                num_expected_neg = neg_upper_bound\n        neg_inds = self.neg_sampler._sample_neg(\n            assign_result, num_expected_neg, bboxes=priors, **kwargs)\n        neg_inds = neg_inds.unique()\n\n        sampling_result = SamplingResult(\n            pos_inds=pos_inds,\n            neg_inds=neg_inds,\n            priors=priors,\n            gt_bboxes=gt_bboxes,\n            assign_result=assign_result,\n            gt_flags=gt_flags)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/combined_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.registry import TASK_UTILS\nfrom .base_sampler import BaseSampler\n\n\n@TASK_UTILS.register_module()\nclass CombinedSampler(BaseSampler):\n    \"\"\"A sampler that combines positive sampler and negative sampler.\"\"\"\n\n    def __init__(self, pos_sampler, neg_sampler, **kwargs):\n        super(CombinedSampler, self).__init__(**kwargs)\n        self.pos_sampler = TASK_UTILS.build(pos_sampler, default_args=kwargs)\n        self.neg_sampler = TASK_UTILS.build(neg_sampler, default_args=kwargs)\n\n    def _sample_pos(self, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        raise NotImplementedError\n\n    def _sample_neg(self, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        raise NotImplementedError\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/instance_balanced_pos_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom .random_sampler import RandomSampler\n\n\n@TASK_UTILS.register_module()\nclass InstanceBalancedPosSampler(RandomSampler):\n    \"\"\"Instance balanced sampler that samples equal number of positive samples\n    for each instance.\"\"\"\n\n    def _sample_pos(self, assign_result, num_expected, **kwargs):\n        \"\"\"Sample positive boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): The assigned results of boxes.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            unique_gt_inds = assign_result.gt_inds[pos_inds].unique()\n            num_gts = len(unique_gt_inds)\n            num_per_gt = int(round(num_expected / float(num_gts)) + 1)\n            sampled_inds = []\n            for i in unique_gt_inds:\n                inds = torch.nonzero(\n                    assign_result.gt_inds == i.item(), as_tuple=False)\n                if inds.numel() != 0:\n                    inds = inds.squeeze(1)\n                else:\n                    continue\n                if len(inds) > num_per_gt:\n                    inds = self.random_choice(inds, num_per_gt)\n                sampled_inds.append(inds)\n            sampled_inds = torch.cat(sampled_inds)\n            if len(sampled_inds) < num_expected:\n                num_extra = num_expected - len(sampled_inds)\n                extra_inds = np.array(\n                    list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))\n                if len(extra_inds) > num_extra:\n                    extra_inds = self.random_choice(extra_inds, num_extra)\n                extra_inds = torch.from_numpy(extra_inds).to(\n                    assign_result.gt_inds.device).long()\n                sampled_inds = torch.cat([sampled_inds, extra_inds])\n            elif len(sampled_inds) > num_expected:\n                sampled_inds = self.random_choice(sampled_inds, num_expected)\n            return sampled_inds\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/iou_balanced_neg_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom .random_sampler import RandomSampler\n\n\n@TASK_UTILS.register_module()\nclass IoUBalancedNegSampler(RandomSampler):\n    \"\"\"IoU Balanced Sampling.\n\n    arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n    Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n    are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n    The others are sampled from proposals whose IoU are higher than\n    `floor_thr`. These proposals are sampled from some bins evenly, which are\n    split by `num_bins` via IoU evenly.\n\n    Args:\n        num (int): number of proposals.\n        pos_fraction (float): fraction of positive proposals.\n        floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n            set to -1 if all using IoU balanced sampling.\n        floor_fraction (float): sampling fraction of proposals under floor_thr.\n        num_bins (int): number of bins in IoU balanced sampling.\n    \"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 floor_thr=-1,\n                 floor_fraction=0,\n                 num_bins=3,\n                 **kwargs):\n        super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,\n                                                    **kwargs)\n        assert floor_thr >= 0 or floor_thr == -1\n        assert 0 <= floor_fraction <= 1\n        assert num_bins >= 1\n\n        self.floor_thr = floor_thr\n        self.floor_fraction = floor_fraction\n        self.num_bins = num_bins\n\n    def sample_via_interval(self, max_overlaps, full_set, num_expected):\n        \"\"\"Sample according to the iou interval.\n\n        Args:\n            max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n                truth boxes.\n            full_set (set(int)): A full set of indices of boxes。\n            num_expected (int): Number of expected samples。\n\n        Returns:\n            np.ndarray: Indices  of samples\n        \"\"\"\n        max_iou = max_overlaps.max()\n        iou_interval = (max_iou - self.floor_thr) / self.num_bins\n        per_num_expected = int(num_expected / self.num_bins)\n\n        sampled_inds = []\n        for i in range(self.num_bins):\n            start_iou = self.floor_thr + i * iou_interval\n            end_iou = self.floor_thr + (i + 1) * iou_interval\n            tmp_set = set(\n                np.where(\n                    np.logical_and(max_overlaps >= start_iou,\n                                   max_overlaps < end_iou))[0])\n            tmp_inds = list(tmp_set & full_set)\n            if len(tmp_inds) > per_num_expected:\n                tmp_sampled_set = self.random_choice(tmp_inds,\n                                                     per_num_expected)\n            else:\n                tmp_sampled_set = np.array(tmp_inds, dtype=np.int64)\n            sampled_inds.append(tmp_sampled_set)\n\n        sampled_inds = np.concatenate(sampled_inds)\n        if len(sampled_inds) < num_expected:\n            num_extra = num_expected - len(sampled_inds)\n            extra_inds = np.array(list(full_set - set(sampled_inds)))\n            if len(extra_inds) > num_extra:\n                extra_inds = self.random_choice(extra_inds, num_extra)\n            sampled_inds = np.concatenate([sampled_inds, extra_inds])\n\n        return sampled_inds\n\n    def _sample_neg(self, assign_result, num_expected, **kwargs):\n        \"\"\"Sample negative boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): The assigned results of boxes.\n            num_expected (int): The number of expected negative samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            max_overlaps = assign_result.max_overlaps.cpu().numpy()\n            # balance sampling for negative samples\n            neg_set = set(neg_inds.cpu().numpy())\n\n            if self.floor_thr > 0:\n                floor_set = set(\n                    np.where(\n                        np.logical_and(max_overlaps >= 0,\n                                       max_overlaps < self.floor_thr))[0])\n                iou_sampling_set = set(\n                    np.where(max_overlaps >= self.floor_thr)[0])\n            elif self.floor_thr == 0:\n                floor_set = set(np.where(max_overlaps == 0)[0])\n                iou_sampling_set = set(\n                    np.where(max_overlaps > self.floor_thr)[0])\n            else:\n                floor_set = set()\n                iou_sampling_set = set(\n                    np.where(max_overlaps > self.floor_thr)[0])\n                # for sampling interval calculation\n                self.floor_thr = 0\n\n            floor_neg_inds = list(floor_set & neg_set)\n            iou_sampling_neg_inds = list(iou_sampling_set & neg_set)\n            num_expected_iou_sampling = int(num_expected *\n                                            (1 - self.floor_fraction))\n            if len(iou_sampling_neg_inds) > num_expected_iou_sampling:\n                if self.num_bins >= 2:\n                    iou_sampled_inds = self.sample_via_interval(\n                        max_overlaps, set(iou_sampling_neg_inds),\n                        num_expected_iou_sampling)\n                else:\n                    iou_sampled_inds = self.random_choice(\n                        iou_sampling_neg_inds, num_expected_iou_sampling)\n            else:\n                iou_sampled_inds = np.array(\n                    iou_sampling_neg_inds, dtype=np.int64)\n            num_expected_floor = num_expected - len(iou_sampled_inds)\n            if len(floor_neg_inds) > num_expected_floor:\n                sampled_floor_inds = self.random_choice(\n                    floor_neg_inds, num_expected_floor)\n            else:\n                sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int64)\n            sampled_inds = np.concatenate(\n                (sampled_floor_inds, iou_sampled_inds))\n            if len(sampled_inds) < num_expected:\n                num_extra = num_expected - len(sampled_inds)\n                extra_inds = np.array(list(neg_set - set(sampled_inds)))\n                if len(extra_inds) > num_extra:\n                    extra_inds = self.random_choice(extra_inds, num_extra)\n                sampled_inds = np.concatenate((sampled_inds, extra_inds))\n            sampled_inds = torch.from_numpy(sampled_inds).long().to(\n                assign_result.gt_inds.device)\n            return sampled_inds\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/mask_pseudo_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"copy from\nhttps://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.\"\"\"\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom ..assigners import AssignResult\nfrom .base_sampler import BaseSampler\nfrom .mask_sampling_result import MaskSamplingResult\n\n\n@TASK_UTILS.register_module()\nclass MaskPseudoSampler(BaseSampler):\n    \"\"\"A pseudo sampler that does not do sampling actually.\"\"\"\n\n    def __init__(self, **kwargs):\n        pass\n\n    def _sample_pos(self, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        raise NotImplementedError\n\n    def _sample_neg(self, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        raise NotImplementedError\n\n    def sample(self, assign_result: AssignResult, pred_instances: InstanceData,\n               gt_instances: InstanceData, *args, **kwargs):\n        \"\"\"Directly returns the positive and negative indices  of samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Mask assigning results.\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``scores`` and ``masks`` predicted\n                by the model.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``labels`` and ``masks``\n                attributes.\n\n        Returns:\n            :obj:`SamplingResult`: sampler results\n        \"\"\"\n        pred_masks = pred_instances.masks\n        gt_masks = gt_instances.masks\n        pos_inds = torch.nonzero(\n            assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()\n        neg_inds = torch.nonzero(\n            assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()\n        gt_flags = pred_masks.new_zeros(pred_masks.shape[0], dtype=torch.uint8)\n        sampling_result = MaskSamplingResult(\n            pos_inds=pos_inds,\n            neg_inds=neg_inds,\n            masks=pred_masks,\n            gt_masks=gt_masks,\n            assign_result=assign_result,\n            gt_flags=gt_flags,\n            avg_factor_with_neg=False)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/mask_sampling_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"copy from\nhttps://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.\"\"\"\n\nimport torch\nfrom torch import Tensor\n\nfrom ..assigners import AssignResult\nfrom .sampling_result import SamplingResult\n\n\nclass MaskSamplingResult(SamplingResult):\n    \"\"\"Mask sampling result.\"\"\"\n\n    def __init__(self,\n                 pos_inds: Tensor,\n                 neg_inds: Tensor,\n                 masks: Tensor,\n                 gt_masks: Tensor,\n                 assign_result: AssignResult,\n                 gt_flags: Tensor,\n                 avg_factor_with_neg: bool = True) -> None:\n        self.pos_inds = pos_inds\n        self.neg_inds = neg_inds\n        self.num_pos = max(pos_inds.numel(), 1)\n        self.num_neg = max(neg_inds.numel(), 1)\n        self.avg_factor = self.num_pos + self.num_neg \\\n            if avg_factor_with_neg else self.num_pos\n\n        self.pos_masks = masks[pos_inds]\n        self.neg_masks = masks[neg_inds]\n        self.pos_is_gt = gt_flags[pos_inds]\n\n        self.num_gts = gt_masks.shape[0]\n        self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n\n        if gt_masks.numel() == 0:\n            # hack for index error case\n            assert self.pos_assigned_gt_inds.numel() == 0\n            self.pos_gt_masks = torch.empty_like(gt_masks)\n        else:\n            self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :]\n\n    @property\n    def masks(self) -> Tensor:\n        \"\"\"torch.Tensor: concatenated positive and negative masks.\"\"\"\n        return torch.cat([self.pos_masks, self.neg_masks])\n\n    def __nice__(self) -> str:\n        data = self.info.copy()\n        data['pos_masks'] = data.pop('pos_masks').shape\n        data['neg_masks'] = data.pop('neg_masks').shape\n        parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n        body = '    ' + ',\\n    '.join(parts)\n        return '{\\n' + body + '\\n}'\n\n    @property\n    def info(self) -> dict:\n        \"\"\"Returns a dictionary of info about the object.\"\"\"\n        return {\n            'pos_inds': self.pos_inds,\n            'neg_inds': self.neg_inds,\n            'pos_masks': self.pos_masks,\n            'neg_masks': self.neg_masks,\n            'pos_is_gt': self.pos_is_gt,\n            'num_gts': self.num_gts,\n            'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n        }\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/multi_instance_random_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Union\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom numpy import ndarray\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom ..assigners import AssignResult\nfrom .multi_instance_sampling_result import MultiInstanceSamplingResult\nfrom .random_sampler import RandomSampler\n\n\n@TASK_UTILS.register_module()\nclass MultiInsRandomSampler(RandomSampler):\n    \"\"\"Random sampler for multi instance.\n\n    Note:\n        Multi-instance means to predict multiple detection boxes with\n        one proposal box. `AssignResult` may assign multiple gt boxes\n        to each proposal box, in this case `RandomSampler` should be\n        replaced by `MultiInsRandomSampler`\n    \"\"\"\n\n    def _sample_pos(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs) -> Union[Tensor, ndarray]:\n        \"\"\"Randomly sample some positive samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        pos_inds = torch.nonzero(\n            assign_result.labels[:, 0] > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.random_choice(pos_inds, num_expected)\n\n    def _sample_neg(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs) -> Union[Tensor, ndarray]:\n        \"\"\"Randomly sample some negative samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        neg_inds = torch.nonzero(\n            assign_result.labels[:, 0] == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            return self.random_choice(neg_inds, num_expected)\n\n    def sample(self, assign_result: AssignResult, pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               **kwargs) -> MultiInstanceSamplingResult:\n        \"\"\"Sample positive and negative bboxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigning results from\n                MultiInstanceAssigner.\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n\n        Returns:\n            :obj:`MultiInstanceSamplingResult`: Sampling result.\n        \"\"\"\n\n        assert 'batch_gt_instances_ignore' in kwargs, \\\n            'batch_gt_instances_ignore is necessary for MultiInsRandomSampler'\n\n        gt_bboxes = gt_instances.bboxes\n        ignore_bboxes = kwargs['batch_gt_instances_ignore'].bboxes\n        gt_and_ignore_bboxes = torch.cat([gt_bboxes, ignore_bboxes], dim=0)\n        priors = pred_instances.priors\n        if len(priors.shape) < 2:\n            priors = priors[None, :]\n        priors = priors[:, :4]\n\n        gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8)\n        priors = torch.cat([priors, gt_and_ignore_bboxes], dim=0)\n        gt_ones = priors.new_ones(\n            gt_and_ignore_bboxes.shape[0], dtype=torch.uint8)\n        gt_flags = torch.cat([gt_flags, gt_ones])\n\n        num_expected_pos = int(self.num * self.pos_fraction)\n        pos_inds = self.pos_sampler._sample_pos(assign_result,\n                                                num_expected_pos)\n        # We found that sampled indices have duplicated items occasionally.\n        # (may be a bug of PyTorch)\n        pos_inds = pos_inds.unique()\n        num_sampled_pos = pos_inds.numel()\n        num_expected_neg = self.num - num_sampled_pos\n        if self.neg_pos_ub >= 0:\n            _pos = max(1, num_sampled_pos)\n            neg_upper_bound = int(self.neg_pos_ub * _pos)\n            if num_expected_neg > neg_upper_bound:\n                num_expected_neg = neg_upper_bound\n        neg_inds = self.neg_sampler._sample_neg(assign_result,\n                                                num_expected_neg)\n        neg_inds = neg_inds.unique()\n\n        sampling_result = MultiInstanceSamplingResult(\n            pos_inds=pos_inds,\n            neg_inds=neg_inds,\n            priors=priors,\n            gt_and_ignore_bboxes=gt_and_ignore_bboxes,\n            assign_result=assign_result,\n            gt_flags=gt_flags)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/multi_instance_sampling_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom torch import Tensor\n\nfrom ..assigners import AssignResult\nfrom .sampling_result import SamplingResult\n\n\nclass MultiInstanceSamplingResult(SamplingResult):\n    \"\"\"Bbox sampling result. Further encapsulation of SamplingResult. Three\n    attributes neg_assigned_gt_inds, neg_gt_labels, and neg_gt_bboxes have been\n    added for SamplingResult.\n\n    Args:\n        pos_inds (Tensor): Indices of positive samples.\n        neg_inds (Tensor): Indices of negative samples.\n        priors (Tensor): The priors can be anchors or points,\n            or the bboxes predicted by the previous stage.\n        gt_and_ignore_bboxes (Tensor): Ground truth and ignore bboxes.\n        assign_result (:obj:`AssignResult`): Assigning results.\n        gt_flags (Tensor): The Ground truth flags.\n        avg_factor_with_neg (bool):  If True, ``avg_factor`` equal to\n            the number of total priors; Otherwise, it is the number of\n            positive priors. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 pos_inds: Tensor,\n                 neg_inds: Tensor,\n                 priors: Tensor,\n                 gt_and_ignore_bboxes: Tensor,\n                 assign_result: AssignResult,\n                 gt_flags: Tensor,\n                 avg_factor_with_neg: bool = True) -> None:\n        self.neg_assigned_gt_inds = assign_result.gt_inds[neg_inds]\n        self.neg_gt_labels = assign_result.labels[neg_inds]\n\n        if gt_and_ignore_bboxes.numel() == 0:\n            self.neg_gt_bboxes = torch.empty_like(gt_and_ignore_bboxes).view(\n                -1, 4)\n        else:\n            if len(gt_and_ignore_bboxes.shape) < 2:\n                gt_and_ignore_bboxes = gt_and_ignore_bboxes.view(-1, 4)\n            self.neg_gt_bboxes = gt_and_ignore_bboxes[\n                self.neg_assigned_gt_inds.long(), :]\n\n        # To resist the minus 1 operation in `SamplingResult.init()`.\n        assign_result.gt_inds += 1\n        super().__init__(\n            pos_inds=pos_inds,\n            neg_inds=neg_inds,\n            priors=priors,\n            gt_bboxes=gt_and_ignore_bboxes,\n            assign_result=assign_result,\n            gt_flags=gt_flags,\n            avg_factor_with_neg=avg_factor_with_neg)\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/ohem_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import bbox2roi\nfrom .base_sampler import BaseSampler\n\n\n@TASK_UTILS.register_module()\nclass OHEMSampler(BaseSampler):\n    r\"\"\"Online Hard Example Mining Sampler described in `Training Region-based\n    Object Detectors with Online Hard Example Mining\n    <https://arxiv.org/abs/1604.03540>`_.\n    \"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 context,\n                 neg_pos_ub=-1,\n                 add_gt_as_proposals=True,\n                 loss_key='loss_cls',\n                 **kwargs):\n        super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,\n                                          add_gt_as_proposals)\n        self.context = context\n        if not hasattr(self.context, 'num_stages'):\n            self.bbox_head = self.context.bbox_head\n        else:\n            self.bbox_head = self.context.bbox_head[self.context.current_stage]\n\n        self.loss_key = loss_key\n\n    def hard_mining(self, inds, num_expected, bboxes, labels, feats):\n        with torch.no_grad():\n            rois = bbox2roi([bboxes])\n            if not hasattr(self.context, 'num_stages'):\n                bbox_results = self.context._bbox_forward(feats, rois)\n            else:\n                bbox_results = self.context._bbox_forward(\n                    self.context.current_stage, feats, rois)\n            cls_score = bbox_results['cls_score']\n            loss = self.bbox_head.loss(\n                cls_score=cls_score,\n                bbox_pred=None,\n                rois=rois,\n                labels=labels,\n                label_weights=cls_score.new_ones(cls_score.size(0)),\n                bbox_targets=None,\n                bbox_weights=None,\n                reduction_override='none')[self.loss_key]\n            _, topk_loss_inds = loss.topk(num_expected)\n        return inds[topk_loss_inds]\n\n    def _sample_pos(self,\n                    assign_result,\n                    num_expected,\n                    bboxes=None,\n                    feats=None,\n                    **kwargs):\n        \"\"\"Sample positive boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigned results\n            num_expected (int): Number of expected positive samples\n            bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n            feats (list[torch.Tensor], optional): Multi-level features.\n                Defaults to None.\n\n        Returns:\n            torch.Tensor: Indices  of positive samples\n        \"\"\"\n        # Sample some hard positive samples\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],\n                                    assign_result.labels[pos_inds], feats)\n\n    def _sample_neg(self,\n                    assign_result,\n                    num_expected,\n                    bboxes=None,\n                    feats=None,\n                    **kwargs):\n        \"\"\"Sample negative boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigned results\n            num_expected (int): Number of expected negative samples\n            bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n            feats (list[torch.Tensor], optional): Multi-level features.\n                Defaults to None.\n\n        Returns:\n            torch.Tensor: Indices  of negative samples\n        \"\"\"\n        # Sample some hard negative samples\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            neg_labels = assign_result.labels.new_empty(\n                neg_inds.size(0)).fill_(self.bbox_head.num_classes)\n            return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],\n                                    neg_labels, feats)\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/pseudo_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import TASK_UTILS\nfrom ..assigners import AssignResult\nfrom .base_sampler import BaseSampler\nfrom .sampling_result import SamplingResult\n\n\n@TASK_UTILS.register_module()\nclass PseudoSampler(BaseSampler):\n    \"\"\"A pseudo sampler that does not do sampling actually.\"\"\"\n\n    def __init__(self, **kwargs):\n        pass\n\n    def _sample_pos(self, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        raise NotImplementedError\n\n    def _sample_neg(self, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        raise NotImplementedError\n\n    def sample(self, assign_result: AssignResult, pred_instances: InstanceData,\n               gt_instances: InstanceData, *args, **kwargs):\n        \"\"\"Directly returns the positive and negative indices  of samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors, points, or bboxes predicted by the model,\n                shape(n, 4).\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes`` and ``labels``\n                attributes.\n\n        Returns:\n            :obj:`SamplingResult`: sampler results\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n\n        pos_inds = torch.nonzero(\n            assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()\n        neg_inds = torch.nonzero(\n            assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()\n\n        gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)\n        sampling_result = SamplingResult(\n            pos_inds=pos_inds,\n            neg_inds=neg_inds,\n            priors=priors,\n            gt_bboxes=gt_bboxes,\n            assign_result=assign_result,\n            gt_flags=gt_flags,\n            avg_factor_with_neg=False)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/random_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Union\n\nimport torch\nfrom numpy import ndarray\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom ..assigners import AssignResult\nfrom .base_sampler import BaseSampler\n\n\n@TASK_UTILS.register_module()\nclass RandomSampler(BaseSampler):\n    \"\"\"Random sampler.\n\n    Args:\n        num (int): Number of samples\n        pos_fraction (float): Fraction of positive samples\n        neg_pos_up (int): Upper bound number of negative and\n            positive samples. Defaults to -1.\n        add_gt_as_proposals (bool): Whether to add ground truth\n            boxes as proposals. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 num: int,\n                 pos_fraction: float,\n                 neg_pos_ub: int = -1,\n                 add_gt_as_proposals: bool = True,\n                 **kwargs):\n        from .sampling_result import ensure_rng\n        super().__init__(\n            num=num,\n            pos_fraction=pos_fraction,\n            neg_pos_ub=neg_pos_ub,\n            add_gt_as_proposals=add_gt_as_proposals)\n        self.rng = ensure_rng(kwargs.get('rng', None))\n\n    def random_choice(self, gallery: Union[Tensor, ndarray, list],\n                      num: int) -> Union[Tensor, ndarray]:\n        \"\"\"Random select some elements from the gallery.\n\n        If `gallery` is a Tensor, the returned indices will be a Tensor;\n        If `gallery` is a ndarray or list, the returned indices will be a\n        ndarray.\n\n        Args:\n            gallery (Tensor | ndarray | list): indices pool.\n            num (int): expected sample num.\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        assert len(gallery) >= num\n\n        is_tensor = isinstance(gallery, torch.Tensor)\n        if not is_tensor:\n            if torch.cuda.is_available():\n                device = torch.cuda.current_device()\n            else:\n                device = 'cpu'\n            gallery = torch.tensor(gallery, dtype=torch.long, device=device)\n        # This is a temporary fix. We can revert the following code\n        # when PyTorch fixes the abnormal return of torch.randperm.\n        # See: https://github.com/open-mmlab/mmdetection/pull/5014\n        perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)\n        rand_inds = gallery[perm]\n        if not is_tensor:\n            rand_inds = rand_inds.cpu().numpy()\n        return rand_inds\n\n    def _sample_pos(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs) -> Union[Tensor, ndarray]:\n        \"\"\"Randomly sample some positive samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.random_choice(pos_inds, num_expected)\n\n    def _sample_neg(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs) -> Union[Tensor, ndarray]:\n        \"\"\"Randomly sample some negative samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            return self.random_choice(neg_inds, num_expected)\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/sampling_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.structures.bbox import BaseBoxes, cat_boxes\nfrom mmdet.utils import util_mixins\nfrom mmdet.utils.util_random import ensure_rng\nfrom ..assigners import AssignResult\n\n\ndef random_boxes(num=1, scale=1, rng=None):\n    \"\"\"Simple version of ``kwimage.Boxes.random``\n\n    Returns:\n        Tensor: shape (n, 4) in x1, y1, x2, y2 format.\n\n    References:\n        https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390\n\n    Example:\n        >>> num = 3\n        >>> scale = 512\n        >>> rng = 0\n        >>> boxes = random_boxes(num, scale, rng)\n        >>> print(boxes)\n        tensor([[280.9925, 278.9802, 308.6148, 366.1769],\n                [216.9113, 330.6978, 224.0446, 456.5878],\n                [405.3632, 196.3221, 493.3953, 270.7942]])\n    \"\"\"\n    rng = ensure_rng(rng)\n\n    tlbr = rng.rand(num, 4).astype(np.float32)\n\n    tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])\n    tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])\n    br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])\n    br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])\n\n    tlbr[:, 0] = tl_x * scale\n    tlbr[:, 1] = tl_y * scale\n    tlbr[:, 2] = br_x * scale\n    tlbr[:, 3] = br_y * scale\n\n    boxes = torch.from_numpy(tlbr)\n    return boxes\n\n\nclass SamplingResult(util_mixins.NiceRepr):\n    \"\"\"Bbox sampling result.\n\n    Args:\n        pos_inds (Tensor): Indices of positive samples.\n        neg_inds (Tensor): Indices of negative samples.\n        priors (Tensor): The priors can be anchors or points,\n            or the bboxes predicted by the previous stage.\n        gt_bboxes (Tensor): Ground truth of bboxes.\n        assign_result (:obj:`AssignResult`): Assigning results.\n        gt_flags (Tensor): The Ground truth flags.\n        avg_factor_with_neg (bool):  If True, ``avg_factor`` equal to\n            the number of total priors; Otherwise, it is the number of\n            positive priors. Defaults to True.\n\n    Example:\n        >>> # xdoctest: +IGNORE_WANT\n        >>> from mmdet.models.task_modules.samplers.sampling_result import *  # NOQA\n        >>> self = SamplingResult.random(rng=10)\n        >>> print(f'self = {self}')\n        self = <SamplingResult({\n            'neg_inds': tensor([1,  2,  3,  5,  6,  7,  8,\n                                9, 10, 11, 12, 13]),\n            'neg_priors': torch.Size([12, 4]),\n            'num_gts': 1,\n            'num_neg': 12,\n            'num_pos': 1,\n            'avg_factor': 13,\n            'pos_assigned_gt_inds': tensor([0]),\n            'pos_inds': tensor([0]),\n            'pos_is_gt': tensor([1], dtype=torch.uint8),\n            'pos_priors': torch.Size([1, 4])\n        })>\n    \"\"\"\n\n    def __init__(self,\n                 pos_inds: Tensor,\n                 neg_inds: Tensor,\n                 priors: Tensor,\n                 gt_bboxes: Tensor,\n                 assign_result: AssignResult,\n                 gt_flags: Tensor,\n                 avg_factor_with_neg: bool = True) -> None:\n        self.pos_inds = pos_inds\n        self.neg_inds = neg_inds\n        self.num_pos = max(pos_inds.numel(), 1)\n        self.num_neg = max(neg_inds.numel(), 1)\n        self.avg_factor_with_neg = avg_factor_with_neg\n        self.avg_factor = self.num_pos + self.num_neg \\\n            if avg_factor_with_neg else self.num_pos\n        self.pos_priors = priors[pos_inds]\n        self.neg_priors = priors[neg_inds]\n        self.pos_is_gt = gt_flags[pos_inds]\n\n        self.num_gts = gt_bboxes.shape[0]\n        self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n        self.pos_gt_labels = assign_result.labels[pos_inds]\n        box_dim = gt_bboxes.box_dim if isinstance(gt_bboxes, BaseBoxes) else 4\n        if gt_bboxes.numel() == 0:\n            # hack for index error case\n            assert self.pos_assigned_gt_inds.numel() == 0\n            self.pos_gt_bboxes = gt_bboxes.view(-1, box_dim)\n        else:\n            if len(gt_bboxes.shape) < 2:\n                gt_bboxes = gt_bboxes.view(-1, box_dim)\n            self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long()]\n\n    @property\n    def priors(self):\n        \"\"\"torch.Tensor: concatenated positive and negative priors\"\"\"\n        return cat_boxes([self.pos_priors, self.neg_priors])\n\n    @property\n    def bboxes(self):\n        \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n        warnings.warn('DeprecationWarning: bboxes is deprecated, '\n                      'please use \"priors\" instead')\n        return self.priors\n\n    @property\n    def pos_bboxes(self):\n        warnings.warn('DeprecationWarning: pos_bboxes is deprecated, '\n                      'please use \"pos_priors\" instead')\n        return self.pos_priors\n\n    @property\n    def neg_bboxes(self):\n        warnings.warn('DeprecationWarning: neg_bboxes is deprecated, '\n                      'please use \"neg_priors\" instead')\n        return self.neg_priors\n\n    def to(self, device):\n        \"\"\"Change the device of the data inplace.\n\n        Example:\n            >>> self = SamplingResult.random()\n            >>> print(f'self = {self.to(None)}')\n            >>> # xdoctest: +REQUIRES(--gpu)\n            >>> print(f'self = {self.to(0)}')\n        \"\"\"\n        _dict = self.__dict__\n        for key, value in _dict.items():\n            if isinstance(value, (torch.Tensor, BaseBoxes)):\n                _dict[key] = value.to(device)\n        return self\n\n    def __nice__(self):\n        data = self.info.copy()\n        data['pos_priors'] = data.pop('pos_priors').shape\n        data['neg_priors'] = data.pop('neg_priors').shape\n        parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n        body = '    ' + ',\\n    '.join(parts)\n        return '{\\n' + body + '\\n}'\n\n    @property\n    def info(self):\n        \"\"\"Returns a dictionary of info about the object.\"\"\"\n        return {\n            'pos_inds': self.pos_inds,\n            'neg_inds': self.neg_inds,\n            'pos_priors': self.pos_priors,\n            'neg_priors': self.neg_priors,\n            'pos_is_gt': self.pos_is_gt,\n            'num_gts': self.num_gts,\n            'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n            'num_pos': self.num_pos,\n            'num_neg': self.num_neg,\n            'avg_factor': self.avg_factor\n        }\n\n    @classmethod\n    def random(cls, rng=None, **kwargs):\n        \"\"\"\n        Args:\n            rng (None | int | numpy.random.RandomState): seed or state.\n            kwargs (keyword arguments):\n                - num_preds: Number of predicted boxes.\n                - num_gts: Number of true boxes.\n                - p_ignore (float): Probability of a predicted box assigned to\n                    an ignored truth.\n                - p_assigned (float): probability of a predicted box not being\n                    assigned.\n\n        Returns:\n            :obj:`SamplingResult`: Randomly generated sampling result.\n\n        Example:\n            >>> from mmdet.models.task_modules.samplers.sampling_result import *  # NOQA\n            >>> self = SamplingResult.random()\n            >>> print(self.__dict__)\n        \"\"\"\n        from mmengine.structures import InstanceData\n\n        from mmdet.models.task_modules.assigners import AssignResult\n        from mmdet.models.task_modules.samplers import RandomSampler\n        rng = ensure_rng(rng)\n\n        # make probabilistic?\n        num = 32\n        pos_fraction = 0.5\n        neg_pos_ub = -1\n\n        assign_result = AssignResult.random(rng=rng, **kwargs)\n\n        # Note we could just compute an assignment\n        priors = random_boxes(assign_result.num_preds, rng=rng)\n        gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)\n        gt_labels = torch.randint(\n            0, 5, (assign_result.num_gts, ), dtype=torch.long)\n\n        pred_instances = InstanceData()\n        pred_instances.priors = priors\n\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n\n        add_gt_as_proposals = True\n\n        sampler = RandomSampler(\n            num,\n            pos_fraction,\n            neg_pos_ub=neg_pos_ub,\n            add_gt_as_proposals=add_gt_as_proposals,\n            rng=rng)\n        self = sampler.sample(\n            assign_result=assign_result,\n            pred_instances=pred_instances,\n            gt_instances=gt_instances)\n        return self\n"
  },
  {
    "path": "mmdet/models/task_modules/samplers/score_hlr_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Union\n\nimport torch\nfrom mmcv.ops import nms_match\nfrom mmengine.structures import InstanceData\nfrom numpy import ndarray\nfrom torch import Tensor\n\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import bbox2roi\nfrom ..assigners import AssignResult\nfrom .base_sampler import BaseSampler\nfrom .sampling_result import SamplingResult\n\n\n@TASK_UTILS.register_module()\nclass ScoreHLRSampler(BaseSampler):\n    r\"\"\"Importance-based Sample Reweighting (ISR_N), described in `Prime Sample\n    Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.\n\n    Score hierarchical local rank (HLR) differentiates with RandomSampler in\n    negative part. It firstly computes Score-HLR in a two-step way,\n    then linearly maps score hlr to the loss weights.\n\n    Args:\n        num (int): Total number of sampled RoIs.\n        pos_fraction (float): Fraction of positive samples.\n        context (:obj:`BaseRoIHead`): RoI head that the sampler belongs to.\n        neg_pos_ub (int): Upper bound of the ratio of num negative to num\n            positive, -1 means no upper bound. Defaults to -1.\n        add_gt_as_proposals (bool): Whether to add ground truth as proposals.\n            Defaults to True.\n        k (float): Power of the non-linear mapping. Defaults to 0.5\n        bias (float): Shift of the non-linear mapping. Defaults to 0.\n        score_thr (float): Minimum score that a negative sample is to be\n            considered as valid bbox. Defaults to 0.05.\n        iou_thr (float): IoU threshold for NMS match. Defaults to 0.5.\n    \"\"\"\n\n    def __init__(self,\n                 num: int,\n                 pos_fraction: float,\n                 context,\n                 neg_pos_ub: int = -1,\n                 add_gt_as_proposals: bool = True,\n                 k: float = 0.5,\n                 bias: float = 0,\n                 score_thr: float = 0.05,\n                 iou_thr: float = 0.5,\n                 **kwargs) -> None:\n        super().__init__(\n            num=num,\n            pos_fraction=pos_fraction,\n            neg_pos_ub=neg_pos_ub,\n            add_gt_as_proposals=add_gt_as_proposals)\n        self.k = k\n        self.bias = bias\n        self.score_thr = score_thr\n        self.iou_thr = iou_thr\n        self.context = context\n        # context of cascade detectors is a list, so distinguish them here.\n        if not hasattr(context, 'num_stages'):\n            self.bbox_roi_extractor = context.bbox_roi_extractor\n            self.bbox_head = context.bbox_head\n            self.with_shared_head = context.with_shared_head\n            if self.with_shared_head:\n                self.shared_head = context.shared_head\n        else:\n            self.bbox_roi_extractor = context.bbox_roi_extractor[\n                context.current_stage]\n            self.bbox_head = context.bbox_head[context.current_stage]\n\n    @staticmethod\n    def random_choice(gallery: Union[Tensor, ndarray, list],\n                      num: int) -> Union[Tensor, ndarray]:\n        \"\"\"Randomly select some elements from the gallery.\n\n        If `gallery` is a Tensor, the returned indices will be a Tensor;\n        If `gallery` is a ndarray or list, the returned indices will be a\n        ndarray.\n\n        Args:\n            gallery (Tensor or ndarray or list): indices pool.\n            num (int): expected sample num.\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        assert len(gallery) >= num\n\n        is_tensor = isinstance(gallery, torch.Tensor)\n        if not is_tensor:\n            if torch.cuda.is_available():\n                device = torch.cuda.current_device()\n            else:\n                device = 'cpu'\n            gallery = torch.tensor(gallery, dtype=torch.long, device=device)\n        perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]\n        rand_inds = gallery[perm]\n        if not is_tensor:\n            rand_inds = rand_inds.cpu().numpy()\n        return rand_inds\n\n    def _sample_pos(self, assign_result: AssignResult, num_expected: int,\n                    **kwargs) -> Union[Tensor, ndarray]:\n        \"\"\"Randomly sample some positive samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.random_choice(pos_inds, num_expected)\n\n    def _sample_neg(self, assign_result: AssignResult, num_expected: int,\n                    bboxes: Tensor, feats: Tensor,\n                    **kwargs) -> Union[Tensor, ndarray]:\n        \"\"\"Sample negative samples.\n\n        Score-HLR sampler is done in the following steps:\n        1. Take the maximum positive score prediction of each negative samples\n            as s_i.\n        2. Filter out negative samples whose s_i <= score_thr, the left samples\n            are called valid samples.\n        3. Use NMS-Match to divide valid samples into different groups,\n            samples in the same group will greatly overlap with each other\n        4. Rank the matched samples in two-steps to get Score-HLR.\n            (1) In the same group, rank samples with their scores.\n            (2) In the same score rank across different groups,\n                rank samples with their scores again.\n        5. Linearly map Score-HLR to the final label weights.\n\n        Args:\n            assign_result (:obj:`AssignResult`): result of assigner.\n            num_expected (int): Expected number of samples.\n            bboxes (Tensor): bbox to be sampled.\n            feats (Tensor): Features come from FPN.\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()\n        num_neg = neg_inds.size(0)\n        if num_neg == 0:\n            return neg_inds, None\n        with torch.no_grad():\n            neg_bboxes = bboxes[neg_inds]\n            neg_rois = bbox2roi([neg_bboxes])\n            bbox_result = self.context._bbox_forward(feats, neg_rois)\n            cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[\n                'bbox_pred']\n\n            ori_loss = self.bbox_head.loss(\n                cls_score=cls_score,\n                bbox_pred=None,\n                rois=None,\n                labels=neg_inds.new_full((num_neg, ),\n                                         self.bbox_head.num_classes),\n                label_weights=cls_score.new_ones(num_neg),\n                bbox_targets=None,\n                bbox_weights=None,\n                reduction_override='none')['loss_cls']\n\n            # filter out samples with the max score lower than score_thr\n            max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)\n            valid_inds = (max_score > self.score_thr).nonzero().view(-1)\n            invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)\n            num_valid = valid_inds.size(0)\n            num_invalid = invalid_inds.size(0)\n\n            num_expected = min(num_neg, num_expected)\n            num_hlr = min(num_valid, num_expected)\n            num_rand = num_expected - num_hlr\n            if num_valid > 0:\n                valid_rois = neg_rois[valid_inds]\n                valid_max_score = max_score[valid_inds]\n                valid_argmax_score = argmax_score[valid_inds]\n                valid_bbox_pred = bbox_pred[valid_inds]\n\n                # valid_bbox_pred shape: [num_valid, #num_classes, 4]\n                valid_bbox_pred = valid_bbox_pred.view(\n                    valid_bbox_pred.size(0), -1, 4)\n                selected_bbox_pred = valid_bbox_pred[range(num_valid),\n                                                     valid_argmax_score]\n                pred_bboxes = self.bbox_head.bbox_coder.decode(\n                    valid_rois[:, 1:], selected_bbox_pred)\n                pred_bboxes_with_score = torch.cat(\n                    [pred_bboxes, valid_max_score[:, None]], -1)\n                group = nms_match(pred_bboxes_with_score, self.iou_thr)\n\n                # imp: importance\n                imp = cls_score.new_zeros(num_valid)\n                for g in group:\n                    g_score = valid_max_score[g]\n                    # g_score has already sorted\n                    rank = g_score.new_tensor(range(g_score.size(0)))\n                    imp[g] = num_valid - rank + g_score\n                _, imp_rank_inds = imp.sort(descending=True)\n                _, imp_rank = imp_rank_inds.sort()\n                hlr_inds = imp_rank_inds[:num_expected]\n\n                if num_rand > 0:\n                    rand_inds = torch.randperm(num_invalid)[:num_rand]\n                    select_inds = torch.cat(\n                        [valid_inds[hlr_inds], invalid_inds[rand_inds]])\n                else:\n                    select_inds = valid_inds[hlr_inds]\n\n                neg_label_weights = cls_score.new_ones(num_expected)\n\n                up_bound = max(num_expected, num_valid)\n                imp_weights = (up_bound -\n                               imp_rank[hlr_inds].float()) / up_bound\n                neg_label_weights[:num_hlr] = imp_weights\n                neg_label_weights[num_hlr:] = imp_weights.min()\n                neg_label_weights = (self.bias +\n                                     (1 - self.bias) * neg_label_weights).pow(\n                                         self.k)\n                ori_selected_loss = ori_loss[select_inds]\n                new_loss = ori_selected_loss * neg_label_weights\n                norm_ratio = ori_selected_loss.sum() / new_loss.sum()\n                neg_label_weights *= norm_ratio\n            else:\n                neg_label_weights = cls_score.new_ones(num_expected)\n                select_inds = torch.randperm(num_neg)[:num_expected]\n\n            return neg_inds[select_inds], neg_label_weights\n\n    def sample(self, assign_result: AssignResult, pred_instances: InstanceData,\n               gt_instances: InstanceData, **kwargs) -> SamplingResult:\n        \"\"\"Sample positive and negative bboxes.\n\n        This is a simple implementation of bbox sampling given candidates,\n        assigning results and ground truth bboxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigning results.\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n\n        Returns:\n            :obj:`SamplingResult`: Sampling result.\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        gt_labels = gt_instances.labels\n\n        gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8)\n        if self.add_gt_as_proposals and len(gt_bboxes) > 0:\n            priors = torch.cat([gt_bboxes, priors], dim=0)\n            assign_result.add_gt_(gt_labels)\n            gt_ones = priors.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)\n            gt_flags = torch.cat([gt_ones, gt_flags])\n\n        num_expected_pos = int(self.num * self.pos_fraction)\n        pos_inds = self.pos_sampler._sample_pos(\n            assign_result, num_expected_pos, bboxes=priors, **kwargs)\n        num_sampled_pos = pos_inds.numel()\n        num_expected_neg = self.num - num_sampled_pos\n        if self.neg_pos_ub >= 0:\n            _pos = max(1, num_sampled_pos)\n            neg_upper_bound = int(self.neg_pos_ub * _pos)\n            if num_expected_neg > neg_upper_bound:\n                num_expected_neg = neg_upper_bound\n        neg_inds, neg_label_weights = self.neg_sampler._sample_neg(\n            assign_result, num_expected_neg, bboxes=priors, **kwargs)\n\n        sampling_result = SamplingResult(\n            pos_inds=pos_inds,\n            neg_inds=neg_inds,\n            priors=priors,\n            gt_bboxes=gt_bboxes,\n            assign_result=assign_result,\n            gt_flags=gt_flags)\n        return sampling_result, neg_label_weights\n"
  },
  {
    "path": "mmdet/models/test_time_augs/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .det_tta import DetTTAModel\nfrom .merge_augs import (merge_aug_bboxes, merge_aug_masks,\n                         merge_aug_proposals, merge_aug_results,\n                         merge_aug_scores)\n\n__all__ = [\n    'merge_aug_bboxes', 'merge_aug_masks', 'merge_aug_proposals',\n    'merge_aug_scores', 'merge_aug_results', 'DetTTAModel'\n]\n"
  },
  {
    "path": "mmdet/models/test_time_augs/det_tta.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple\n\nimport torch\nfrom mmcv.ops import batched_nms\nfrom mmengine.model import BaseTTAModel\nfrom mmengine.registry import MODELS\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.bbox import bbox_flip\n\n\n@MODELS.register_module()\nclass DetTTAModel(BaseTTAModel):\n    \"\"\"Merge augmented detection results, only bboxes corresponding score under\n    flipping and multi-scale resizing can be processed now.\n\n    Examples:\n        >>> tta_model = dict(\n        >>>     type='DetTTAModel',\n        >>>     tta_cfg=dict(nms=dict(\n        >>>                     type='nms',\n        >>>                     iou_threshold=0.5),\n        >>>                     max_per_img=100))\n        >>>\n        >>> tta_pipeline = [\n        >>>     dict(type='LoadImageFromFile',\n        >>>          file_client_args=dict(backend='disk')),\n        >>>     dict(\n        >>>         type='TestTimeAug',\n        >>>         transforms=[[\n        >>>             dict(type='Resize',\n        >>>                  scale=(1333, 800),\n        >>>                  keep_ratio=True),\n        >>>         ], [\n        >>>             dict(type='RandomFlip', prob=1.),\n        >>>             dict(type='RandomFlip', prob=0.)\n        >>>         ], [\n        >>>             dict(\n        >>>                 type='PackDetInputs',\n        >>>                 meta_keys=('img_id', 'img_path', 'ori_shape',\n        >>>                         'img_shape', 'scale_factor', 'flip',\n        >>>                         'flip_direction'))\n        >>>         ]])]\n    \"\"\"\n\n    def __init__(self, tta_cfg=None, **kwargs):\n        super().__init__(**kwargs)\n        self.tta_cfg = tta_cfg\n\n    def merge_aug_bboxes(self, aug_bboxes: List[Tensor],\n                         aug_scores: List[Tensor],\n                         img_metas: List[str]) -> Tuple[Tensor, Tensor]:\n        \"\"\"Merge augmented detection bboxes and scores.\n\n        Args:\n            aug_bboxes (list[Tensor]): shape (n, 4*#class)\n            aug_scores (list[Tensor] or None): shape (n, #class)\n        Returns:\n            tuple[Tensor]: ``bboxes`` with shape (n,4), where\n            4 represent (tl_x, tl_y, br_x, br_y)\n            and ``scores`` with shape (n,).\n        \"\"\"\n        recovered_bboxes = []\n        for bboxes, img_info in zip(aug_bboxes, img_metas):\n            ori_shape = img_info['ori_shape']\n            flip = img_info['flip']\n            flip_direction = img_info['flip_direction']\n            if flip:\n                bboxes = bbox_flip(\n                    bboxes=bboxes,\n                    img_shape=ori_shape,\n                    direction=flip_direction)\n            recovered_bboxes.append(bboxes)\n        bboxes = torch.cat(recovered_bboxes, dim=0)\n        if aug_scores is None:\n            return bboxes\n        else:\n            scores = torch.cat(aug_scores, dim=0)\n            return bboxes, scores\n\n    def merge_preds(self, data_samples_list: List[List[DetDataSample]]):\n        \"\"\"Merge batch predictions of enhanced data.\n\n        Args:\n            data_samples_list (List[List[DetDataSample]]): List of predictions\n                of all enhanced data. The outer list indicates images, and the\n                inner list corresponds to the different views of one image.\n                Each element of the inner list is a ``DetDataSample``.\n        Returns:\n            List[DetDataSample]: Merged batch prediction.\n        \"\"\"\n        merged_data_samples = []\n        for data_samples in data_samples_list:\n            merged_data_samples.append(self._merge_single_sample(data_samples))\n        return merged_data_samples\n\n    def _merge_single_sample(\n            self, data_samples: List[DetDataSample]) -> DetDataSample:\n        \"\"\"Merge predictions which come form the different views of one image\n        to one prediction.\n\n        Args:\n            data_samples (List[DetDataSample]): List of predictions\n            of enhanced data which come form one image.\n        Returns:\n            List[DetDataSample]: Merged prediction.\n        \"\"\"\n        aug_bboxes = []\n        aug_scores = []\n        aug_labels = []\n        img_metas = []\n        # TODO: support instance segmentation TTA\n        assert data_samples[0].pred_instances.get('masks', None) is None, \\\n            'TTA of instance segmentation does not support now.'\n        for data_sample in data_samples:\n            aug_bboxes.append(data_sample.pred_instances.bboxes)\n            aug_scores.append(data_sample.pred_instances.scores)\n            aug_labels.append(data_sample.pred_instances.labels)\n            img_metas.append(data_sample.metainfo)\n\n        merged_bboxes, merged_scores = self.merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas)\n        merged_labels = torch.cat(aug_labels, dim=0)\n\n        if merged_bboxes.numel() == 0:\n            return data_samples[0]\n\n        det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,\n                                            merged_labels, self.tta_cfg.nms)\n\n        det_bboxes = det_bboxes[:self.tta_cfg.max_per_img]\n        det_labels = merged_labels[keep_idxs][:self.tta_cfg.max_per_img]\n\n        results = InstanceData()\n        _det_bboxes = det_bboxes.clone()\n        results.bboxes = _det_bboxes[:, :-1]\n        results.scores = _det_bboxes[:, -1]\n        results.labels = det_labels\n        det_results = data_samples[0]\n        det_results.pred_instances = results\n        return det_results\n"
  },
  {
    "path": "mmdet/models/test_time_augs/merge_augs.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport torch\nfrom mmcv.ops import nms\nfrom mmengine.config import ConfigDict\nfrom torch import Tensor\n\nfrom mmdet.structures.bbox import bbox_mapping_back\n\n\n# TODO remove this, never be used in mmdet\ndef merge_aug_proposals(aug_proposals, img_metas, cfg):\n    \"\"\"Merge augmented proposals (multiscale, flip, etc.)\n\n    Args:\n        aug_proposals (list[Tensor]): proposals from different testing\n            schemes, shape (n, 5). Note that they are not rescaled to the\n            original image size.\n\n        img_metas (list[dict]): list of image info dict where each dict has:\n            'img_shape', 'scale_factor', 'flip', and may also contain\n            'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n            For details on the values of these keys see\n            `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n        cfg (dict): rpn test config.\n\n    Returns:\n        Tensor: shape (n, 4), proposals corresponding to original image scale.\n    \"\"\"\n\n    cfg = copy.deepcopy(cfg)\n\n    # deprecate arguments warning\n    if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:\n        warnings.warn(\n            'In rpn_proposal or test_cfg, '\n            'nms_thr has been moved to a dict named nms as '\n            'iou_threshold, max_num has been renamed as max_per_img, '\n            'name of original arguments and the way to specify '\n            'iou_threshold of NMS will be deprecated.')\n    if 'nms' not in cfg:\n        cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))\n    if 'max_num' in cfg:\n        if 'max_per_img' in cfg:\n            assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \\\n                f'max_per_img at the same time, but get {cfg.max_num} ' \\\n                f'and {cfg.max_per_img} respectively' \\\n                f'Please delete max_num which will be deprecated.'\n        else:\n            cfg.max_per_img = cfg.max_num\n    if 'nms_thr' in cfg:\n        assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \\\n            f'iou_threshold in nms and ' \\\n            f'nms_thr at the same time, but get ' \\\n            f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \\\n            f' respectively. Please delete the nms_thr ' \\\n            f'which will be deprecated.'\n\n    recovered_proposals = []\n    for proposals, img_info in zip(aug_proposals, img_metas):\n        img_shape = img_info['img_shape']\n        scale_factor = img_info['scale_factor']\n        flip = img_info['flip']\n        flip_direction = img_info['flip_direction']\n        _proposals = proposals.clone()\n        _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,\n                                              scale_factor, flip,\n                                              flip_direction)\n        recovered_proposals.append(_proposals)\n    aug_proposals = torch.cat(recovered_proposals, dim=0)\n    merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),\n                              aug_proposals[:, -1].contiguous(),\n                              cfg.nms.iou_threshold)\n    scores = merged_proposals[:, 4]\n    _, order = scores.sort(0, descending=True)\n    num = min(cfg.max_per_img, merged_proposals.shape[0])\n    order = order[:num]\n    merged_proposals = merged_proposals[order, :]\n    return merged_proposals\n\n\n# TODO remove this, never be used in mmdet\ndef merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):\n    \"\"\"Merge augmented detection bboxes and scores.\n\n    Args:\n        aug_bboxes (list[Tensor]): shape (n, 4*#class)\n        aug_scores (list[Tensor] or None): shape (n, #class)\n        img_shapes (list[Tensor]): shape (3, ).\n        rcnn_test_cfg (dict): rcnn test config.\n\n    Returns:\n        tuple: (bboxes, scores)\n    \"\"\"\n    recovered_bboxes = []\n    for bboxes, img_info in zip(aug_bboxes, img_metas):\n        img_shape = img_info[0]['img_shape']\n        scale_factor = img_info[0]['scale_factor']\n        flip = img_info[0]['flip']\n        flip_direction = img_info[0]['flip_direction']\n        bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,\n                                   flip_direction)\n        recovered_bboxes.append(bboxes)\n    bboxes = torch.stack(recovered_bboxes).mean(dim=0)\n    if aug_scores is None:\n        return bboxes\n    else:\n        scores = torch.stack(aug_scores).mean(dim=0)\n        return bboxes, scores\n\n\ndef merge_aug_results(aug_batch_results, aug_batch_img_metas):\n    \"\"\"Merge augmented detection results, only bboxes corresponding score under\n    flipping and multi-scale resizing can be processed now.\n\n    Args:\n        aug_batch_results (list[list[[obj:`InstanceData`]]):\n            Detection results of multiple images with\n            different augmentations.\n            The outer list indicate the augmentation . The inter\n            list indicate the batch dimension.\n            Each item usually contains the following keys.\n\n            - scores (Tensor): Classification scores, in shape\n              (num_instance,)\n            - labels (Tensor): Labels of bboxes, in shape\n              (num_instances,).\n            - bboxes (Tensor): In shape (num_instances, 4),\n              the last dimension 4 arrange as (x1, y1, x2, y2).\n        aug_batch_img_metas (list[list[dict]]): The outer list\n            indicates test-time augs (multiscale, flip, etc.)\n            and the inner list indicates\n            images in a batch. Each dict in the list contains\n            information of an image in the batch.\n\n    Returns:\n        batch_results (list[obj:`InstanceData`]): Same with\n        the input `aug_results` except that all bboxes have\n        been mapped to the original scale.\n    \"\"\"\n    num_augs = len(aug_batch_results)\n    num_imgs = len(aug_batch_results[0])\n\n    batch_results = []\n    aug_batch_results = copy.deepcopy(aug_batch_results)\n    for img_id in range(num_imgs):\n        aug_results = []\n        for aug_id in range(num_augs):\n            img_metas = aug_batch_img_metas[aug_id][img_id]\n            results = aug_batch_results[aug_id][img_id]\n\n            img_shape = img_metas['img_shape']\n            scale_factor = img_metas['scale_factor']\n            flip = img_metas['flip']\n            flip_direction = img_metas['flip_direction']\n            bboxes = bbox_mapping_back(results.bboxes, img_shape, scale_factor,\n                                       flip, flip_direction)\n            results.bboxes = bboxes\n            aug_results.append(results)\n        merged_aug_results = results.cat(aug_results)\n        batch_results.append(merged_aug_results)\n\n    return batch_results\n\n\ndef merge_aug_scores(aug_scores):\n    \"\"\"Merge augmented bbox scores.\"\"\"\n    if isinstance(aug_scores[0], torch.Tensor):\n        return torch.mean(torch.stack(aug_scores), dim=0)\n    else:\n        return np.mean(aug_scores, axis=0)\n\n\ndef merge_aug_masks(aug_masks: List[Tensor],\n                    img_metas: dict,\n                    weights: Optional[Union[list, Tensor]] = None) -> Tensor:\n    \"\"\"Merge augmented mask prediction.\n\n    Args:\n        aug_masks (list[Tensor]): each has shape\n            (n, c, h, w).\n        img_metas (dict): Image information.\n        weights (list or Tensor): Weight of each aug_masks,\n            the length should be n.\n\n    Returns:\n        Tensor: has shape (n, c, h, w)\n    \"\"\"\n    recovered_masks = []\n    for i, mask in enumerate(aug_masks):\n        if weights is not None:\n            assert len(weights) == len(aug_masks)\n            weight = weights[i]\n        else:\n            weight = 1\n        flip = img_metas.get('filp', False)\n        if flip:\n            flip_direction = img_metas['flip_direction']\n            if flip_direction == 'horizontal':\n                mask = mask[:, :, :, ::-1]\n            elif flip_direction == 'vertical':\n                mask = mask[:, :, ::-1, :]\n            elif flip_direction == 'diagonal':\n                mask = mask[:, :, :, ::-1]\n                mask = mask[:, :, ::-1, :]\n            else:\n                raise ValueError(\n                    f\"Invalid flipping direction '{flip_direction}'\")\n        recovered_masks.append(mask[None, :] * weight)\n\n    merged_masks = torch.cat(recovered_masks, 0).mean(dim=0)\n    if weights is not None:\n        merged_masks = merged_masks * len(weights) / sum(weights)\n    return merged_masks\n"
  },
  {
    "path": "mmdet/models/utils/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .gaussian_target import (gather_feat, gaussian_radius,\n                              gen_gaussian_target, get_local_maximum,\n                              get_topk_from_heatmap, transpose_and_gather_feat)\nfrom .make_divisible import make_divisible\nfrom .misc import (aligned_bilinear, center_of_mass, empty_instances,\n                   filter_gt_instances, filter_scores_and_topk, flip_tensor,\n                   generate_coordinate, images_to_levels, interpolate_as,\n                   levels_to_images, mask2ndarray, multi_apply,\n                   relative_coordinate_maps, rename_loss_dict,\n                   reweight_loss_dict, samplelist_boxtype2tensor,\n                   select_single_mlvl, sigmoid_geometric_mean,\n                   unfold_wo_center, unmap, unpack_gt_instances)\nfrom .panoptic_gt_processing import preprocess_panoptic_gt\nfrom .point_sample import (get_uncertain_point_coords_with_randomness,\n                           get_uncertainty)\n\n__all__ = [\n    'gaussian_radius', 'gen_gaussian_target', 'make_divisible',\n    'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',\n    'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',\n    'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',\n    'get_uncertainty', 'unpack_gt_instances', 'empty_instances',\n    'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',\n    'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',\n    'select_single_mlvl', 'unmap', 'images_to_levels',\n    'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',\n    'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',\n    'unfold_wo_center'\n]\n"
  },
  {
    "path": "mmdet/models/utils/gaussian_target.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom math import sqrt\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):\n    \"\"\"Generate 2D gaussian kernel.\n\n    Args:\n        radius (int): Radius of gaussian kernel.\n        sigma (int): Sigma of gaussian function. Default: 1.\n        dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.\n        device (str): Device of gaussian tensor. Default: 'cpu'.\n\n    Returns:\n        h (Tensor): Gaussian kernel with a\n            ``(2 * radius + 1) * (2 * radius + 1)`` shape.\n    \"\"\"\n    x = torch.arange(\n        -radius, radius + 1, dtype=dtype, device=device).view(1, -1)\n    y = torch.arange(\n        -radius, radius + 1, dtype=dtype, device=device).view(-1, 1)\n\n    h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()\n\n    h[h < torch.finfo(h.dtype).eps * h.max()] = 0\n    return h\n\n\ndef gen_gaussian_target(heatmap, center, radius, k=1):\n    \"\"\"Generate 2D gaussian heatmap.\n\n    Args:\n        heatmap (Tensor): Input heatmap, the gaussian kernel will cover on\n            it and maintain the max value.\n        center (list[int]): Coord of gaussian kernel's center.\n        radius (int): Radius of gaussian kernel.\n        k (int): Coefficient of gaussian kernel. Default: 1.\n\n    Returns:\n        out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.\n    \"\"\"\n    diameter = 2 * radius + 1\n    gaussian_kernel = gaussian2D(\n        radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)\n\n    x, y = center\n\n    height, width = heatmap.shape[:2]\n\n    left, right = min(x, radius), min(width - x, radius + 1)\n    top, bottom = min(y, radius), min(height - y, radius + 1)\n\n    masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n    masked_gaussian = gaussian_kernel[radius - top:radius + bottom,\n                                      radius - left:radius + right]\n    out_heatmap = heatmap\n    torch.max(\n        masked_heatmap,\n        masked_gaussian * k,\n        out=out_heatmap[y - top:y + bottom, x - left:x + right])\n\n    return out_heatmap\n\n\ndef gaussian_radius(det_size, min_overlap):\n    r\"\"\"Generate 2D gaussian radius.\n\n    This function is modified from the `official github repo\n    <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/\n    utils.py#L65>`_.\n\n    Given ``min_overlap``, radius could computed by a quadratic equation\n    according to Vieta's formulas.\n\n    There are 3 cases for computing gaussian radius, details are following:\n\n    - Explanation of figure: ``lt`` and ``br`` indicates the left-top and\n      bottom-right corner of ground truth box. ``x`` indicates the\n      generated corner at the limited position when ``radius=r``.\n\n    - Case1: one corner is inside the gt box and the other is outside.\n\n    .. code:: text\n\n        |<   width   >|\n\n        lt-+----------+         -\n        |  |          |         ^\n        +--x----------+--+\n        |  |          |  |\n        |  |          |  |    height\n        |  | overlap  |  |\n        |  |          |  |\n        |  |          |  |      v\n        +--+---------br--+      -\n           |          |  |\n           +----------+--x\n\n    To ensure IoU of generated box and gt box is larger than ``min_overlap``:\n\n    .. math::\n        \\cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \\ge {iou} \\quad\\Rightarrow\\quad\n        {r^2-(w+h)r+\\cfrac{1-iou}{1+iou}*w*h} \\ge 0 \\\\\n        {a} = 1,\\quad{b} = {-(w+h)},\\quad{c} = {\\cfrac{1-iou}{1+iou}*w*h}\n        {r} \\le \\cfrac{-b-\\sqrt{b^2-4*a*c}}{2*a}\n\n    - Case2: both two corners are inside the gt box.\n\n    .. code:: text\n\n        |<   width   >|\n\n        lt-+----------+         -\n        |  |          |         ^\n        +--x-------+  |\n        |  |       |  |\n        |  |overlap|  |       height\n        |  |       |  |\n        |  +-------x--+\n        |          |  |         v\n        +----------+-br         -\n\n    To ensure IoU of generated box and gt box is larger than ``min_overlap``:\n\n    .. math::\n        \\cfrac{(w-2*r)*(h-2*r)}{w*h} \\ge {iou} \\quad\\Rightarrow\\quad\n        {4r^2-2(w+h)r+(1-iou)*w*h} \\ge 0 \\\\\n        {a} = 4,\\quad {b} = {-2(w+h)},\\quad {c} = {(1-iou)*w*h}\n        {r} \\le \\cfrac{-b-\\sqrt{b^2-4*a*c}}{2*a}\n\n    - Case3: both two corners are outside the gt box.\n\n    .. code:: text\n\n           |<   width   >|\n\n        x--+----------------+\n        |  |                |\n        +-lt-------------+  |   -\n        |  |             |  |   ^\n        |  |             |  |\n        |  |   overlap   |  | height\n        |  |             |  |\n        |  |             |  |   v\n        |  +------------br--+   -\n        |                |  |\n        +----------------+--x\n\n    To ensure IoU of generated box and gt box is larger than ``min_overlap``:\n\n    .. math::\n        \\cfrac{w*h}{(w+2*r)*(h+2*r)} \\ge {iou} \\quad\\Rightarrow\\quad\n        {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \\le 0 \\\\\n        {a} = {4*iou},\\quad {b} = {2*iou*(w+h)},\\quad {c} = {(iou-1)*w*h} \\\\\n        {r} \\le \\cfrac{-b+\\sqrt{b^2-4*a*c}}{2*a}\n\n    Args:\n        det_size (list[int]): Shape of object.\n        min_overlap (float): Min IoU with ground truth for boxes generated by\n            keypoints inside the gaussian kernel.\n\n    Returns:\n        radius (int): Radius of gaussian kernel.\n    \"\"\"\n    height, width = det_size\n\n    a1 = 1\n    b1 = (height + width)\n    c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n    sq1 = sqrt(b1**2 - 4 * a1 * c1)\n    r1 = (b1 - sq1) / (2 * a1)\n\n    a2 = 4\n    b2 = 2 * (height + width)\n    c2 = (1 - min_overlap) * width * height\n    sq2 = sqrt(b2**2 - 4 * a2 * c2)\n    r2 = (b2 - sq2) / (2 * a2)\n\n    a3 = 4 * min_overlap\n    b3 = -2 * min_overlap * (height + width)\n    c3 = (min_overlap - 1) * width * height\n    sq3 = sqrt(b3**2 - 4 * a3 * c3)\n    r3 = (b3 + sq3) / (2 * a3)\n    return min(r1, r2, r3)\n\n\ndef get_local_maximum(heat, kernel=3):\n    \"\"\"Extract local maximum pixel with given kernel.\n\n    Args:\n        heat (Tensor): Target heatmap.\n        kernel (int): Kernel size of max pooling. Default: 3.\n\n    Returns:\n        heat (Tensor): A heatmap where local maximum pixels maintain its\n            own value and other positions are 0.\n    \"\"\"\n    pad = (kernel - 1) // 2\n    hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n    keep = (hmax == heat).float()\n    return heat * keep\n\n\ndef get_topk_from_heatmap(scores, k=20):\n    \"\"\"Get top k positions from heatmap.\n\n    Args:\n        scores (Tensor): Target heatmap with shape\n            [batch, num_classes, height, width].\n        k (int): Target number. Default: 20.\n\n    Returns:\n        tuple[torch.Tensor]: Scores, indexes, categories and coords of\n            topk keypoint. Containing following Tensors:\n\n        - topk_scores (Tensor): Max scores of each topk keypoint.\n        - topk_inds (Tensor): Indexes of each topk keypoint.\n        - topk_clses (Tensor): Categories of each topk keypoint.\n        - topk_ys (Tensor): Y-coord of each topk keypoint.\n        - topk_xs (Tensor): X-coord of each topk keypoint.\n    \"\"\"\n    batch, _, height, width = scores.size()\n    topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)\n    topk_clses = topk_inds // (height * width)\n    topk_inds = topk_inds % (height * width)\n    topk_ys = topk_inds // width\n    topk_xs = (topk_inds % width).int().float()\n    return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs\n\n\ndef gather_feat(feat, ind, mask=None):\n    \"\"\"Gather feature according to index.\n\n    Args:\n        feat (Tensor): Target feature map.\n        ind (Tensor): Target coord index.\n        mask (Tensor | None): Mask of feature map. Default: None.\n\n    Returns:\n        feat (Tensor): Gathered feature.\n    \"\"\"\n    dim = feat.size(2)\n    ind = ind.unsqueeze(2).repeat(1, 1, dim)\n    feat = feat.gather(1, ind)\n    if mask is not None:\n        mask = mask.unsqueeze(2).expand_as(feat)\n        feat = feat[mask]\n        feat = feat.view(-1, dim)\n    return feat\n\n\ndef transpose_and_gather_feat(feat, ind):\n    \"\"\"Transpose and gather feature according to index.\n\n    Args:\n        feat (Tensor): Target feature map.\n        ind (Tensor): Target coord index.\n\n    Returns:\n        feat (Tensor): Transposed and gathered feature.\n    \"\"\"\n    feat = feat.permute(0, 2, 3, 1).contiguous()\n    feat = feat.view(feat.size(0), -1, feat.size(3))\n    feat = gather_feat(feat, ind)\n    return feat\n"
  },
  {
    "path": "mmdet/models/utils/make_divisible.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\ndef make_divisible(value, divisor, min_value=None, min_ratio=0.9):\n    \"\"\"Make divisible function.\n\n    This function rounds the channel number to the nearest value that can be\n    divisible by the divisor. It is taken from the original tf repo. It ensures\n    that all layers have a channel number that is divisible by divisor. It can\n    be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py  # noqa\n\n    Args:\n        value (int): The original channel number.\n        divisor (int): The divisor to fully divide the channel number.\n        min_value (int): The minimum value of the output channel.\n            Default: None, means that the minimum value equal to the divisor.\n        min_ratio (float): The minimum ratio of the rounded channel number to\n            the original channel number. Default: 0.9.\n\n    Returns:\n        int: The modified output channel number.\n    \"\"\"\n\n    if min_value is None:\n        min_value = divisor\n    new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)\n    # Make sure that round down does not go down by more than (1-min_ratio).\n    if new_value < min_ratio * value:\n        new_value += divisor\n    return new_value\n"
  },
  {
    "path": "mmdet/models/utils/misc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\nfrom typing import List, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.utils import digit_version\nfrom six.moves import map, zip\nfrom torch import Tensor\nfrom torch.autograd import Function\nfrom torch.nn import functional as F\n\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import BaseBoxes, get_box_type, stack_boxes\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\nfrom mmdet.utils import OptInstanceList\n\n\nclass SigmoidGeometricMean(Function):\n    \"\"\"Forward and backward function of geometric mean of two sigmoid\n    functions.\n\n    This implementation with analytical gradient function substitutes\n    the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The\n    original implementation incurs none during gradient backprapagation\n    if both x and y are very small values.\n    \"\"\"\n\n    @staticmethod\n    def forward(ctx, x, y):\n        x_sigmoid = x.sigmoid()\n        y_sigmoid = y.sigmoid()\n        z = (x_sigmoid * y_sigmoid).sqrt()\n        ctx.save_for_backward(x_sigmoid, y_sigmoid, z)\n        return z\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        x_sigmoid, y_sigmoid, z = ctx.saved_tensors\n        grad_x = grad_output * z * (1 - x_sigmoid) / 2\n        grad_y = grad_output * z * (1 - y_sigmoid) / 2\n        return grad_x, grad_y\n\n\nsigmoid_geometric_mean = SigmoidGeometricMean.apply\n\n\ndef interpolate_as(source, target, mode='bilinear', align_corners=False):\n    \"\"\"Interpolate the `source` to the shape of the `target`.\n\n    The `source` must be a Tensor, but the `target` can be a Tensor or a\n    np.ndarray with the shape (..., target_h, target_w).\n\n    Args:\n        source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or\n            (N, C, H, W).\n        target (Tensor | np.ndarray): The interpolation target with the shape\n            (..., target_h, target_w).\n        mode (str): Algorithm used for interpolation. The options are the\n            same as those in F.interpolate(). Default: ``'bilinear'``.\n        align_corners (bool): The same as the argument in F.interpolate().\n\n    Returns:\n        Tensor: The interpolated source Tensor.\n    \"\"\"\n    assert len(target.shape) >= 2\n\n    def _interpolate_as(source, target, mode='bilinear', align_corners=False):\n        \"\"\"Interpolate the `source` (4D) to the shape of the `target`.\"\"\"\n        target_h, target_w = target.shape[-2:]\n        source_h, source_w = source.shape[-2:]\n        if target_h != source_h or target_w != source_w:\n            source = F.interpolate(\n                source,\n                size=(target_h, target_w),\n                mode=mode,\n                align_corners=align_corners)\n        return source\n\n    if len(source.shape) == 3:\n        source = source[:, None, :, :]\n        source = _interpolate_as(source, target, mode, align_corners)\n        return source[:, 0, :, :]\n    else:\n        return _interpolate_as(source, target, mode, align_corners)\n\n\ndef unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n    \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n    on ``batch_data_samples``\n\n    Args:\n        batch_data_samples (List[:obj:`DetDataSample`]): The Data\n            Samples. It usually includes information such as\n            `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n    Returns:\n        tuple:\n\n            - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes`` and ``labels``\n                attributes.\n            - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n                Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n                data that is ignored during training and testing.\n                Defaults to None.\n            - batch_img_metas (list[dict]): Meta information of each image,\n                e.g., image size, scaling factor, etc.\n    \"\"\"\n    batch_gt_instances = []\n    batch_gt_instances_ignore = []\n    batch_img_metas = []\n    for data_sample in batch_data_samples:\n        batch_img_metas.append(data_sample.metainfo)\n        batch_gt_instances.append(data_sample.gt_instances)\n        if 'ignored_instances' in data_sample:\n            batch_gt_instances_ignore.append(data_sample.ignored_instances)\n        else:\n            batch_gt_instances_ignore.append(None)\n\n    return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas\n\n\ndef empty_instances(batch_img_metas: List[dict],\n                    device: torch.device,\n                    task_type: str,\n                    instance_results: OptInstanceList = None,\n                    mask_thr_binary: Union[int, float] = 0,\n                    box_type: Union[str, type] = 'hbox',\n                    use_box_type: bool = False,\n                    num_classes: int = 80,\n                    score_per_cls: bool = False) -> List[InstanceData]:\n    \"\"\"Handle predicted instances when RoI is empty.\n\n    Note: If ``instance_results`` is not None, it will be modified\n    in place internally, and then return ``instance_results``\n\n    Args:\n        batch_img_metas (list[dict]): List of image information.\n        device (torch.device): Device of tensor.\n        task_type (str): Expected returned task type. it currently\n            supports bbox and mask.\n        instance_results (list[:obj:`InstanceData`]): List of instance\n            results.\n        mask_thr_binary (int, float): mask binarization threshold.\n            Defaults to 0.\n        box_type (str or type): The empty box type. Defaults to `hbox`.\n        use_box_type (bool): Whether to warp boxes with the box type.\n            Defaults to False.\n        num_classes (int): num_classes of bbox_head. Defaults to 80.\n        score_per_cls (bool):  Whether to generate classwise score for\n            the empty instance. ``score_per_cls`` will be True when the model\n            needs to produce raw results without nms. Defaults to False.\n\n    Returns:\n        list[:obj:`InstanceData`]: Detection results of each image\n    \"\"\"\n    assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \\\n                                          f' but got {task_type}'\n\n    if instance_results is not None:\n        assert len(instance_results) == len(batch_img_metas)\n\n    results_list = []\n    for img_id in range(len(batch_img_metas)):\n        if instance_results is not None:\n            results = instance_results[img_id]\n            assert isinstance(results, InstanceData)\n        else:\n            results = InstanceData()\n\n        if task_type == 'bbox':\n            _, box_type = get_box_type(box_type)\n            bboxes = torch.zeros(0, box_type.box_dim, device=device)\n            if use_box_type:\n                bboxes = box_type(bboxes, clone=False)\n            results.bboxes = bboxes\n            score_shape = (0, num_classes + 1) if score_per_cls else (0, )\n            results.scores = torch.zeros(score_shape, device=device)\n            results.labels = torch.zeros((0, ),\n                                         device=device,\n                                         dtype=torch.long)\n        else:\n            # TODO: Handle the case where rescale is false\n            img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2]\n            # the type of `im_mask` will be torch.bool or torch.uint8,\n            # where uint8 if for visualization and debugging.\n            im_mask = torch.zeros(\n                0,\n                img_h,\n                img_w,\n                device=device,\n                dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8)\n            results.masks = im_mask\n        results_list.append(results)\n    return results_list\n\n\ndef multi_apply(func, *args, **kwargs):\n    \"\"\"Apply function to a list of arguments.\n\n    Note:\n        This function applies the ``func`` to multiple inputs and\n        map the multiple outputs of the ``func`` into different\n        list. Each list contains the same type of outputs corresponding\n        to different inputs.\n\n    Args:\n        func (Function): A function that will be applied to a list of\n            arguments\n\n    Returns:\n        tuple(list): A tuple containing multiple list, each list contains \\\n            a kind of returned results by the function\n    \"\"\"\n    pfunc = partial(func, **kwargs) if kwargs else func\n    map_results = map(pfunc, *args)\n    return tuple(map(list, zip(*map_results)))\n\n\ndef unmap(data, count, inds, fill=0):\n    \"\"\"Unmap a subset of item (data) back to the original set of items (of size\n    count)\"\"\"\n    if data.dim() == 1:\n        ret = data.new_full((count, ), fill)\n        ret[inds.type(torch.bool)] = data\n    else:\n        new_size = (count, ) + data.size()[1:]\n        ret = data.new_full(new_size, fill)\n        ret[inds.type(torch.bool), :] = data\n    return ret\n\n\ndef mask2ndarray(mask):\n    \"\"\"Convert Mask to ndarray..\n\n    Args:\n        mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or\n        torch.Tensor or np.ndarray): The mask to be converted.\n\n    Returns:\n        np.ndarray: Ndarray mask of shape (n, h, w) that has been converted\n    \"\"\"\n    if isinstance(mask, (BitmapMasks, PolygonMasks)):\n        mask = mask.to_ndarray()\n    elif isinstance(mask, torch.Tensor):\n        mask = mask.detach().cpu().numpy()\n    elif not isinstance(mask, np.ndarray):\n        raise TypeError(f'Unsupported {type(mask)} data type')\n    return mask\n\n\ndef flip_tensor(src_tensor, flip_direction):\n    \"\"\"flip tensor base on flip_direction.\n\n    Args:\n        src_tensor (Tensor): input feature map, shape (B, C, H, W).\n        flip_direction (str): The flipping direction. Options are\n          'horizontal', 'vertical', 'diagonal'.\n\n    Returns:\n        out_tensor (Tensor): Flipped tensor.\n    \"\"\"\n    assert src_tensor.ndim == 4\n    valid_directions = ['horizontal', 'vertical', 'diagonal']\n    assert flip_direction in valid_directions\n    if flip_direction == 'horizontal':\n        out_tensor = torch.flip(src_tensor, [3])\n    elif flip_direction == 'vertical':\n        out_tensor = torch.flip(src_tensor, [2])\n    else:\n        out_tensor = torch.flip(src_tensor, [2, 3])\n    return out_tensor\n\n\ndef select_single_mlvl(mlvl_tensors, batch_id, detach=True):\n    \"\"\"Extract a multi-scale single image tensor from a multi-scale batch\n    tensor based on batch index.\n\n    Note: The default value of detach is True, because the proposal gradient\n    needs to be detached during the training of the two-stage model. E.g\n    Cascade Mask R-CNN.\n\n    Args:\n        mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,\n           each is a 4D-tensor.\n        batch_id (int): Batch index.\n        detach (bool): Whether detach gradient. Default True.\n\n    Returns:\n        list[Tensor]: Multi-scale single image tensor.\n    \"\"\"\n    assert isinstance(mlvl_tensors, (list, tuple))\n    num_levels = len(mlvl_tensors)\n\n    if detach:\n        mlvl_tensor_list = [\n            mlvl_tensors[i][batch_id].detach() for i in range(num_levels)\n        ]\n    else:\n        mlvl_tensor_list = [\n            mlvl_tensors[i][batch_id] for i in range(num_levels)\n        ]\n    return mlvl_tensor_list\n\n\ndef filter_scores_and_topk(scores, score_thr, topk, results=None):\n    \"\"\"Filter results using score threshold and topk candidates.\n\n    Args:\n        scores (Tensor): The scores, shape (num_bboxes, K).\n        score_thr (float): The score filter threshold.\n        topk (int): The number of topk candidates.\n        results (dict or list or Tensor, Optional): The results to\n           which the filtering rule is to be applied. The shape\n           of each item is (num_bboxes, N).\n\n    Returns:\n        tuple: Filtered results\n\n            - scores (Tensor): The scores after being filtered, \\\n                shape (num_bboxes_filtered, ).\n            - labels (Tensor): The class labels, shape \\\n                (num_bboxes_filtered, ).\n            - anchor_idxs (Tensor): The anchor indexes, shape \\\n                (num_bboxes_filtered, ).\n            - filtered_results (dict or list or Tensor, Optional): \\\n                The filtered results. The shape of each item is \\\n                (num_bboxes_filtered, N).\n    \"\"\"\n    valid_mask = scores > score_thr\n    scores = scores[valid_mask]\n    valid_idxs = torch.nonzero(valid_mask)\n\n    num_topk = min(topk, valid_idxs.size(0))\n    # torch.sort is actually faster than .topk (at least on GPUs)\n    scores, idxs = scores.sort(descending=True)\n    scores = scores[:num_topk]\n    topk_idxs = valid_idxs[idxs[:num_topk]]\n    keep_idxs, labels = topk_idxs.unbind(dim=1)\n\n    filtered_results = None\n    if results is not None:\n        if isinstance(results, dict):\n            filtered_results = {k: v[keep_idxs] for k, v in results.items()}\n        elif isinstance(results, list):\n            filtered_results = [result[keep_idxs] for result in results]\n        elif isinstance(results, torch.Tensor):\n            filtered_results = results[keep_idxs]\n        else:\n            raise NotImplementedError(f'Only supports dict or list or Tensor, '\n                                      f'but get {type(results)}.')\n    return scores, labels, keep_idxs, filtered_results\n\n\ndef center_of_mass(mask, esp=1e-6):\n    \"\"\"Calculate the centroid coordinates of the mask.\n\n    Args:\n        mask (Tensor): The mask to be calculated, shape (h, w).\n        esp (float): Avoid dividing by zero. Default: 1e-6.\n\n    Returns:\n        tuple[Tensor]: the coordinates of the center point of the mask.\n\n            - center_h (Tensor): the center point of the height.\n            - center_w (Tensor): the center point of the width.\n    \"\"\"\n    h, w = mask.shape\n    grid_h = torch.arange(h, device=mask.device)[:, None]\n    grid_w = torch.arange(w, device=mask.device)\n    normalizer = mask.sum().float().clamp(min=esp)\n    center_h = (mask * grid_h).sum() / normalizer\n    center_w = (mask * grid_w).sum() / normalizer\n    return center_h, center_w\n\n\ndef generate_coordinate(featmap_sizes, device='cuda'):\n    \"\"\"Generate the coordinate.\n\n    Args:\n        featmap_sizes (tuple): The feature to be calculated,\n            of shape (N, C, W, H).\n        device (str): The device where the feature will be put on.\n    Returns:\n        coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H).\n    \"\"\"\n\n    x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device)\n    y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device)\n    y, x = torch.meshgrid(y_range, x_range)\n    y = y.expand([featmap_sizes[0], 1, -1, -1])\n    x = x.expand([featmap_sizes[0], 1, -1, -1])\n    coord_feat = torch.cat([x, y], 1)\n\n    return coord_feat\n\n\ndef levels_to_images(mlvl_tensor: List[torch.Tensor]) -> List[torch.Tensor]:\n    \"\"\"Concat multi-level feature maps by image.\n\n    [feature_level0, feature_level1...] -> [feature_image0, feature_image1...]\n    Convert the shape of each element in mlvl_tensor from (N, C, H, W) to\n    (N, H*W , C), then split the element to N elements with shape (H*W, C), and\n    concat elements in same image of all level along first dimension.\n\n    Args:\n        mlvl_tensor (list[Tensor]): list of Tensor which collect from\n            corresponding level. Each element is of shape (N, C, H, W)\n\n    Returns:\n        list[Tensor]: A list that contains N tensors and each tensor is\n            of shape (num_elements, C)\n    \"\"\"\n    batch_size = mlvl_tensor[0].size(0)\n    batch_list = [[] for _ in range(batch_size)]\n    channels = mlvl_tensor[0].size(1)\n    for t in mlvl_tensor:\n        t = t.permute(0, 2, 3, 1)\n        t = t.view(batch_size, -1, channels).contiguous()\n        for img in range(batch_size):\n            batch_list[img].append(t[img])\n    return [torch.cat(item, 0) for item in batch_list]\n\n\ndef images_to_levels(target, num_levels):\n    \"\"\"Convert targets by image to targets by feature level.\n\n    [target_img0, target_img1] -> [target_level0, target_level1, ...]\n    \"\"\"\n    target = stack_boxes(target, 0)\n    level_targets = []\n    start = 0\n    for n in num_levels:\n        end = start + n\n        # level_targets.append(target[:, start:end].squeeze(0))\n        level_targets.append(target[:, start:end])\n        start = end\n    return level_targets\n\n\ndef samplelist_boxtype2tensor(batch_data_samples: SampleList) -> SampleList:\n    for data_samples in batch_data_samples:\n        if 'gt_instances' in data_samples:\n            bboxes = data_samples.gt_instances.get('bboxes', None)\n            if isinstance(bboxes, BaseBoxes):\n                data_samples.gt_instances.bboxes = bboxes.tensor\n        if 'pred_instances' in data_samples:\n            bboxes = data_samples.pred_instances.get('bboxes', None)\n            if isinstance(bboxes, BaseBoxes):\n                data_samples.pred_instances.bboxes = bboxes.tensor\n        if 'ignored_instances' in data_samples:\n            bboxes = data_samples.ignored_instances.get('bboxes', None)\n            if isinstance(bboxes, BaseBoxes):\n                data_samples.ignored_instances.bboxes = bboxes.tensor\n\n\n_torch_version_div_indexing = (\n    'parrots' not in torch.__version__\n    and digit_version(torch.__version__) >= digit_version('1.8'))\n\n\ndef floordiv(dividend, divisor, rounding_mode='trunc'):\n    if _torch_version_div_indexing:\n        return torch.div(dividend, divisor, rounding_mode=rounding_mode)\n    else:\n        return dividend // divisor\n\n\ndef _filter_gt_instances_by_score(batch_data_samples: SampleList,\n                                  score_thr: float) -> SampleList:\n    \"\"\"Filter ground truth (GT) instances by score.\n\n    Args:\n        batch_data_samples (SampleList): The Data\n            Samples. It usually includes information such as\n            `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n        score_thr (float): The score filter threshold.\n\n    Returns:\n        SampleList: The Data Samples filtered by score.\n    \"\"\"\n    for data_samples in batch_data_samples:\n        assert 'scores' in data_samples.gt_instances, \\\n            'there does not exit scores in instances'\n        if data_samples.gt_instances.bboxes.shape[0] > 0:\n            data_samples.gt_instances = data_samples.gt_instances[\n                data_samples.gt_instances.scores > score_thr]\n    return batch_data_samples\n\n\ndef _filter_gt_instances_by_size(batch_data_samples: SampleList,\n                                 wh_thr: tuple) -> SampleList:\n    \"\"\"Filter ground truth (GT) instances by size.\n\n    Args:\n        batch_data_samples (SampleList): The Data\n            Samples. It usually includes information such as\n            `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n        wh_thr (tuple):  Minimum width and height of bbox.\n\n    Returns:\n        SampleList: The Data Samples filtered by score.\n    \"\"\"\n    for data_samples in batch_data_samples:\n        bboxes = data_samples.gt_instances.bboxes\n        if bboxes.shape[0] > 0:\n            w = bboxes[:, 2] - bboxes[:, 0]\n            h = bboxes[:, 3] - bboxes[:, 1]\n            data_samples.gt_instances = data_samples.gt_instances[\n                (w > wh_thr[0]) & (h > wh_thr[1])]\n    return batch_data_samples\n\n\ndef filter_gt_instances(batch_data_samples: SampleList,\n                        score_thr: float = None,\n                        wh_thr: tuple = None):\n    \"\"\"Filter ground truth (GT) instances by score and/or size.\n\n    Args:\n        batch_data_samples (SampleList): The Data\n            Samples. It usually includes information such as\n            `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n        score_thr (float): The score filter threshold.\n        wh_thr (tuple):  Minimum width and height of bbox.\n\n    Returns:\n        SampleList: The Data Samples filtered by score and/or size.\n    \"\"\"\n\n    if score_thr is not None:\n        batch_data_samples = _filter_gt_instances_by_score(\n            batch_data_samples, score_thr)\n    if wh_thr is not None:\n        batch_data_samples = _filter_gt_instances_by_size(\n            batch_data_samples, wh_thr)\n    return batch_data_samples\n\n\ndef rename_loss_dict(prefix: str, losses: dict) -> dict:\n    \"\"\"Rename the key names in loss dict by adding a prefix.\n\n    Args:\n        prefix (str): The prefix for loss components.\n        losses (dict):  A dictionary of loss components.\n\n    Returns:\n            dict: A dictionary of loss components with prefix.\n    \"\"\"\n    return {prefix + k: v for k, v in losses.items()}\n\n\ndef reweight_loss_dict(losses: dict, weight: float) -> dict:\n    \"\"\"Reweight losses in the dict by weight.\n\n    Args:\n        losses (dict):  A dictionary of loss components.\n        weight (float): Weight for loss components.\n\n    Returns:\n            dict: A dictionary of weighted loss components.\n    \"\"\"\n    for name, loss in losses.items():\n        if 'loss' in name:\n            if isinstance(loss, Sequence):\n                losses[name] = [item * weight for item in loss]\n            else:\n                losses[name] = loss * weight\n    return losses\n\n\ndef relative_coordinate_maps(\n    locations: Tensor,\n    centers: Tensor,\n    strides: Tensor,\n    size_of_interest: int,\n    feat_sizes: Tuple[int],\n) -> Tensor:\n    \"\"\"Generate the relative coordinate maps with feat_stride.\n\n    Args:\n        locations (Tensor): The prior location of mask feature map.\n            It has shape (num_priors, 2).\n        centers (Tensor): The prior points of a object in\n            all feature pyramid. It has shape (num_pos, 2)\n        strides (Tensor): The prior strides of a object in\n            all feature pyramid. It has shape (num_pos, 1)\n        size_of_interest (int): The size of the region used in rel coord.\n        feat_sizes (Tuple[int]): The feature size H and W, which has 2 dims.\n    Returns:\n        rel_coord_feat (Tensor): The coordinate feature\n            of shape (num_pos, 2, H, W).\n    \"\"\"\n\n    H, W = feat_sizes\n    rel_coordinates = centers.reshape(-1, 1, 2) - locations.reshape(1, -1, 2)\n    rel_coordinates = rel_coordinates.permute(0, 2, 1).float()\n    rel_coordinates = rel_coordinates / (\n        strides[:, None, None] * size_of_interest)\n    return rel_coordinates.reshape(-1, 2, H, W)\n\n\ndef aligned_bilinear(tensor: Tensor, factor: int) -> Tensor:\n    \"\"\"aligned bilinear, used in original implement in CondInst:\n\n    https://github.com/aim-uofa/AdelaiDet/blob/\\\n    c0b2092ce72442b0f40972f7c6dda8bb52c46d16/adet/utils/comm.py#L23\n    \"\"\"\n\n    assert tensor.dim() == 4\n    assert factor >= 1\n    assert int(factor) == factor\n\n    if factor == 1:\n        return tensor\n\n    h, w = tensor.size()[2:]\n    tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode='replicate')\n    oh = factor * h + 1\n    ow = factor * w + 1\n    tensor = F.interpolate(\n        tensor, size=(oh, ow), mode='bilinear', align_corners=True)\n    tensor = F.pad(\n        tensor, pad=(factor // 2, 0, factor // 2, 0), mode='replicate')\n\n    return tensor[:, :, :oh - 1, :ow - 1]\n\n\ndef unfold_wo_center(x, kernel_size: int, dilation: int) -> Tensor:\n    \"\"\"unfold_wo_center, used in original implement in BoxInst:\n\n    https://github.com/aim-uofa/AdelaiDet/blob/\\\n    4a3a1f7372c35b48ebf5f6adc59f135a0fa28d60/\\\n    adet/modeling/condinst/condinst.py#L53\n    \"\"\"\n    assert x.dim() == 4\n    assert kernel_size % 2 == 1\n\n    # using SAME padding\n    padding = (kernel_size + (dilation - 1) * (kernel_size - 1)) // 2\n    unfolded_x = F.unfold(\n        x, kernel_size=kernel_size, padding=padding, dilation=dilation)\n    unfolded_x = unfolded_x.reshape(\n        x.size(0), x.size(1), -1, x.size(2), x.size(3))\n    # remove the center pixels\n    size = kernel_size**2\n    unfolded_x = torch.cat(\n        (unfolded_x[:, :, :size // 2], unfolded_x[:, :, size // 2 + 1:]),\n        dim=2)\n\n    return unfolded_x\n"
  },
  {
    "path": "mmdet/models/utils/panoptic_gt_processing.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\n\ndef preprocess_panoptic_gt(gt_labels: Tensor, gt_masks: Tensor,\n                           gt_semantic_seg: Tensor, num_things: int,\n                           num_stuff: int) -> Tuple[Tensor, Tensor]:\n    \"\"\"Preprocess the ground truth for a image.\n\n    Args:\n        gt_labels (Tensor): Ground truth labels of each bbox,\n            with shape (num_gts, ).\n        gt_masks (BitmapMasks): Ground truth masks of each instances\n            of a image, shape (num_gts, h, w).\n        gt_semantic_seg (Tensor | None): Ground truth of semantic\n            segmentation with the shape (1, h, w).\n            [0, num_thing_class - 1] means things,\n            [num_thing_class, num_class-1] means stuff,\n            255 means VOID. It's None when training instance segmentation.\n\n    Returns:\n        tuple[Tensor, Tensor]: a tuple containing the following targets.\n\n            - labels (Tensor): Ground truth class indices for a\n                image, with shape (n, ), n is the sum of number\n                of stuff type and number of instance in a image.\n            - masks (Tensor): Ground truth mask for a image, with\n                shape (n, h, w). Contains stuff and things when training\n                panoptic segmentation, and things only when training\n                instance segmentation.\n    \"\"\"\n    num_classes = num_things + num_stuff\n    things_masks = gt_masks.to_tensor(\n        dtype=torch.bool, device=gt_labels.device)\n\n    if gt_semantic_seg is None:\n        masks = things_masks.long()\n        return gt_labels, masks\n\n    things_labels = gt_labels\n    gt_semantic_seg = gt_semantic_seg.squeeze(0)\n\n    semantic_labels = torch.unique(\n        gt_semantic_seg,\n        sorted=False,\n        return_inverse=False,\n        return_counts=False)\n    stuff_masks_list = []\n    stuff_labels_list = []\n    for label in semantic_labels:\n        if label < num_things or label >= num_classes:\n            continue\n        stuff_mask = gt_semantic_seg == label\n        stuff_masks_list.append(stuff_mask)\n        stuff_labels_list.append(label)\n\n    if len(stuff_masks_list) > 0:\n        stuff_masks = torch.stack(stuff_masks_list, dim=0)\n        stuff_labels = torch.stack(stuff_labels_list, dim=0)\n        labels = torch.cat([things_labels, stuff_labels], dim=0)\n        masks = torch.cat([things_masks, stuff_masks], dim=0)\n    else:\n        labels = things_labels\n        masks = things_masks\n\n    masks = masks.long()\n    return labels, masks\n"
  },
  {
    "path": "mmdet/models/utils/point_sample.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops import point_sample\nfrom torch import Tensor\n\n\ndef get_uncertainty(mask_preds: Tensor, labels: Tensor) -> Tensor:\n    \"\"\"Estimate uncertainty based on pred logits.\n\n    We estimate uncertainty as L1 distance between 0.0 and the logits\n    prediction in 'mask_preds' for the foreground class in `classes`.\n\n    Args:\n        mask_preds (Tensor): mask predication logits, shape (num_rois,\n            num_classes, mask_height, mask_width).\n\n        labels (Tensor): Either predicted or ground truth label for\n            each predicted mask, of length num_rois.\n\n    Returns:\n        scores (Tensor): Uncertainty scores with the most uncertain\n            locations having the highest uncertainty score,\n            shape (num_rois, 1, mask_height, mask_width)\n    \"\"\"\n    if mask_preds.shape[1] == 1:\n        gt_class_logits = mask_preds.clone()\n    else:\n        inds = torch.arange(mask_preds.shape[0], device=mask_preds.device)\n        gt_class_logits = mask_preds[inds, labels].unsqueeze(1)\n    return -torch.abs(gt_class_logits)\n\n\ndef get_uncertain_point_coords_with_randomness(\n        mask_preds: Tensor, labels: Tensor, num_points: int,\n        oversample_ratio: float, importance_sample_ratio: float) -> Tensor:\n    \"\"\"Get ``num_points`` most uncertain points with random points during\n    train.\n\n    Sample points in [0, 1] x [0, 1] coordinate space based on their\n    uncertainty. The uncertainties are calculated for each point using\n    'get_uncertainty()' function that takes point's logit prediction as\n    input.\n\n    Args:\n        mask_preds (Tensor): A tensor of shape (num_rois, num_classes,\n            mask_height, mask_width) for class-specific or class-agnostic\n            prediction.\n        labels (Tensor): The ground truth class for each instance.\n        num_points (int): The number of points to sample.\n        oversample_ratio (float): Oversampling parameter.\n        importance_sample_ratio (float): Ratio of points that are sampled\n            via importnace sampling.\n\n    Returns:\n        point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n            that contains the coordinates sampled points.\n    \"\"\"\n    assert oversample_ratio >= 1\n    assert 0 <= importance_sample_ratio <= 1\n    batch_size = mask_preds.shape[0]\n    num_sampled = int(num_points * oversample_ratio)\n    point_coords = torch.rand(\n        batch_size, num_sampled, 2, device=mask_preds.device)\n    point_logits = point_sample(mask_preds, point_coords)\n    # It is crucial to calculate uncertainty based on the sampled\n    # prediction value for the points. Calculating uncertainties of the\n    # coarse predictions first and sampling them for points leads to\n    # incorrect results.  To illustrate this: assume uncertainty func(\n    # logits)=-abs(logits), a sampled point between two coarse\n    # predictions with -1 and 1 logits has 0 logits, and therefore 0\n    # uncertainty value. However, if we calculate uncertainties for the\n    # coarse predictions first, both will have -1 uncertainty,\n    # and sampled point will get -1 uncertainty.\n    point_uncertainties = get_uncertainty(point_logits, labels)\n    num_uncertain_points = int(importance_sample_ratio * num_points)\n    num_random_points = num_points - num_uncertain_points\n    idx = torch.topk(\n        point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]\n    shift = num_sampled * torch.arange(\n        batch_size, dtype=torch.long, device=mask_preds.device)\n    idx += shift[:, None]\n    point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(\n        batch_size, num_uncertain_points, 2)\n    if num_random_points > 0:\n        rand_roi_coords = torch.rand(\n            batch_size, num_random_points, 2, device=mask_preds.device)\n        point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)\n    return point_coords\n"
  },
  {
    "path": "mmdet/registry.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"MMDetection provides 17 registry nodes to support using modules across\nprojects. Each node is a child of the root registry in MMEngine.\n\nMore details can be found at\nhttps://mmengine.readthedocs.io/en/latest/tutorials/registry.html.\n\"\"\"\n\nfrom mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS\nfrom mmengine.registry import DATASETS as MMENGINE_DATASETS\nfrom mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR\nfrom mmengine.registry import HOOKS as MMENGINE_HOOKS\nfrom mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS\nfrom mmengine.registry import LOOPS as MMENGINE_LOOPS\nfrom mmengine.registry import METRICS as MMENGINE_METRICS\nfrom mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS\nfrom mmengine.registry import MODELS as MMENGINE_MODELS\nfrom mmengine.registry import \\\n    OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS\nfrom mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS\nfrom mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS\nfrom mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS\nfrom mmengine.registry import \\\n    RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS\nfrom mmengine.registry import RUNNERS as MMENGINE_RUNNERS\nfrom mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS\nfrom mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS\nfrom mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS\nfrom mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS\nfrom mmengine.registry import \\\n    WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS\nfrom mmengine.registry import Registry\n\n# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`\nRUNNERS = Registry(\n    'runner', parent=MMENGINE_RUNNERS, locations=['mmdet.engine.runner'])\n# manage runner constructors that define how to initialize runners\nRUNNER_CONSTRUCTORS = Registry(\n    'runner constructor',\n    parent=MMENGINE_RUNNER_CONSTRUCTORS,\n    locations=['mmdet.engine.runner'])\n# manage all kinds of loops like `EpochBasedTrainLoop`\nLOOPS = Registry(\n    'loop', parent=MMENGINE_LOOPS, locations=['mmdet.engine.runner'])\n# manage all kinds of hooks like `CheckpointHook`\nHOOKS = Registry(\n    'hook', parent=MMENGINE_HOOKS, locations=['mmdet.engine.hooks'])\n\n# manage data-related modules\nDATASETS = Registry(\n    'dataset', parent=MMENGINE_DATASETS, locations=['mmdet.datasets'])\nDATA_SAMPLERS = Registry(\n    'data sampler',\n    parent=MMENGINE_DATA_SAMPLERS,\n    locations=['mmdet.datasets.samplers'])\nTRANSFORMS = Registry(\n    'transform',\n    parent=MMENGINE_TRANSFORMS,\n    locations=['mmdet.datasets.transforms'])\n\n# manage all kinds of modules inheriting `nn.Module`\nMODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])\n# manage all kinds of model wrappers like 'MMDistributedDataParallel'\nMODEL_WRAPPERS = Registry(\n    'model_wrapper',\n    parent=MMENGINE_MODEL_WRAPPERS,\n    locations=['mmdet.models'])\n# manage all kinds of weight initialization modules like `Uniform`\nWEIGHT_INITIALIZERS = Registry(\n    'weight initializer',\n    parent=MMENGINE_WEIGHT_INITIALIZERS,\n    locations=['mmdet.models'])\n\n# manage all kinds of optimizers like `SGD` and `Adam`\nOPTIMIZERS = Registry(\n    'optimizer',\n    parent=MMENGINE_OPTIMIZERS,\n    locations=['mmdet.engine.optimizers'])\n# manage optimizer wrapper\nOPTIM_WRAPPERS = Registry(\n    'optim_wrapper',\n    parent=MMENGINE_OPTIM_WRAPPERS,\n    locations=['mmdet.engine.optimizers'])\n# manage constructors that customize the optimization hyperparameters.\nOPTIM_WRAPPER_CONSTRUCTORS = Registry(\n    'optimizer constructor',\n    parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS,\n    locations=['mmdet.engine.optimizers'])\n# manage all kinds of parameter schedulers like `MultiStepLR`\nPARAM_SCHEDULERS = Registry(\n    'parameter scheduler',\n    parent=MMENGINE_PARAM_SCHEDULERS,\n    locations=['mmdet.engine.schedulers'])\n# manage all kinds of metrics\nMETRICS = Registry(\n    'metric', parent=MMENGINE_METRICS, locations=['mmdet.evaluation'])\n# manage evaluator\nEVALUATOR = Registry(\n    'evaluator', parent=MMENGINE_EVALUATOR, locations=['mmdet.evaluation'])\n\n# manage task-specific modules like anchor generators and box coders\nTASK_UTILS = Registry(\n    'task util', parent=MMENGINE_TASK_UTILS, locations=['mmdet.models'])\n\n# manage visualizer\nVISUALIZERS = Registry(\n    'visualizer',\n    parent=MMENGINE_VISUALIZERS,\n    locations=['mmdet.visualization'])\n# manage visualizer backend\nVISBACKENDS = Registry(\n    'vis_backend',\n    parent=MMENGINE_VISBACKENDS,\n    locations=['mmdet.visualization'])\n\n# manage logprocessor\nLOG_PROCESSORS = Registry(\n    'log_processor',\n    parent=MMENGINE_LOG_PROCESSORS,\n    # TODO: update the location when mmdet has its own log processor\n    locations=['mmdet.engine'])\n"
  },
  {
    "path": "mmdet/structures/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .det_data_sample import DetDataSample, OptSampleList, SampleList\n\n__all__ = ['DetDataSample', 'SampleList', 'OptSampleList']\n"
  },
  {
    "path": "mmdet/structures/bbox/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_boxes import BaseBoxes\nfrom .bbox_overlaps import bbox_overlaps\nfrom .box_type import (autocast_box_type, convert_box_type, get_box_type,\n                       register_box, register_box_converter)\nfrom .horizontal_boxes import HorizontalBoxes\nfrom .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,\n                         bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,\n                         bbox_mapping_back, bbox_project, bbox_rescale,\n                         bbox_xyxy_to_cxcywh, cat_boxes, corner2bbox,\n                         distance2bbox, empty_box_as, find_inside_bboxes,\n                         get_box_tensor, get_box_wh, roi2bbox, scale_boxes,\n                         stack_boxes)\n\n__all__ = [\n    'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',\n    'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',\n    'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',\n    'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',\n    'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',\n    'register_box_converter', 'HorizontalBoxes', 'autocast_box_type',\n    'cat_boxes', 'stack_boxes', 'scale_boxes', 'get_box_wh', 'get_box_tensor',\n    'empty_box_as'\n]\n"
  },
  {
    "path": "mmdet/structures/bbox/base_boxes.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod, abstractproperty, abstractstaticmethod\nfrom typing import List, Optional, Sequence, Tuple, Type, TypeVar, Union\n\nimport numpy as np\nimport torch\nfrom torch import BoolTensor, Tensor\n\nfrom mmdet.structures.mask.structures import BitmapMasks, PolygonMasks\n\nT = TypeVar('T')\nDeviceType = Union[str, torch.device]\nIndexType = Union[slice, int, list, torch.LongTensor, torch.cuda.LongTensor,\n                  torch.BoolTensor, torch.cuda.BoolTensor, np.ndarray]\nMaskType = Union[BitmapMasks, PolygonMasks]\n\n\nclass BaseBoxes(metaclass=ABCMeta):\n    \"\"\"The base class for 2D box types.\n\n    The functions of ``BaseBoxes`` lie in three fields:\n\n    - Verify the boxes shape.\n    - Support tensor-like operations.\n    - Define abstract functions for 2D boxes.\n\n    In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape\n    w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length\n    of the last dimension being ``box_dim`` will be regarded as valid.\n    ``BaseBoxes`` will restore them at the field ``tensor``. It's necessary\n    to override ``box_dim`` in subclass to guarantee the data shape is\n    correct.\n\n    There are many basic tensor-like functions implemented in ``BaseBoxes``.\n    In most cases, users can operate ``BaseBoxes`` instance like a normal\n    tensor. To protect the validity of data shape, All tensor-like functions\n    cannot modify the last dimension of ``self.tensor``.\n\n    When creating a new box type, users need to inherit from ``BaseBoxes``\n    and override abstract methods and specify the ``box_dim``. Then, register\n    the new box type by using the decorator ``register_box_type``.\n\n    Args:\n        data (Tensor or np.ndarray or Sequence): The box data with shape\n            (..., box_dim).\n        dtype (torch.dtype, Optional): data type of boxes. Defaults to None.\n        device (str or torch.device, Optional): device of boxes.\n            Default to None.\n        clone (bool): Whether clone ``boxes`` or not. Defaults to True.\n    \"\"\"\n\n    # Used to verify the last dimension length\n    # Should override it in subclass.\n    box_dim: int = 0\n\n    def __init__(self,\n                 data: Union[Tensor, np.ndarray, Sequence],\n                 dtype: Optional[torch.dtype] = None,\n                 device: Optional[DeviceType] = None,\n                 clone: bool = True) -> None:\n        if isinstance(data, (np.ndarray, Tensor, Sequence)):\n            data = torch.as_tensor(data)\n        else:\n            raise TypeError('boxes should be Tensor, ndarray, or Sequence, ',\n                            f'but got {type(data)}')\n\n        if device is not None or dtype is not None:\n            data = data.to(dtype=dtype, device=device)\n        # Clone the data to avoid potential bugs\n        if clone:\n            data = data.clone()\n        # handle the empty input like []\n        if data.numel() == 0:\n            data = data.reshape((-1, self.box_dim))\n\n        assert data.dim() >= 2 and data.size(-1) == self.box_dim, \\\n            ('The boxes dimension must >= 2 and the length of the last '\n             f'dimension must be {self.box_dim}, but got boxes with '\n             f'shape {data.shape}.')\n        self.tensor = data\n\n    def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes':\n        \"\"\"Convert self to another box type.\n\n        Args:\n            dst_type (str or type): destination box type.\n\n        Returns:\n            :obj:`BaseBoxes`: destination box type object .\n        \"\"\"\n        from .box_type import convert_box_type\n        return convert_box_type(self, dst_type=dst_type)\n\n    def empty_boxes(self: T,\n                    dtype: Optional[torch.dtype] = None,\n                    device: Optional[DeviceType] = None) -> T:\n        \"\"\"Create empty box.\n\n        Args:\n            dtype (torch.dtype, Optional): data type of boxes.\n            device (str or torch.device, Optional): device of boxes.\n\n        Returns:\n            T: empty boxes with shape of (0, box_dim).\n        \"\"\"\n        empty_box = self.tensor.new_zeros(\n            0, self.box_dim, dtype=dtype, device=device)\n        return type(self)(empty_box, clone=False)\n\n    def fake_boxes(self: T,\n                   sizes: Tuple[int],\n                   fill: float = 0,\n                   dtype: Optional[torch.dtype] = None,\n                   device: Optional[DeviceType] = None) -> T:\n        \"\"\"Create fake boxes with specific sizes and fill values.\n\n        Args:\n            sizes (Tuple[int]): The size of fake boxes. The last value must\n                be equal with ``self.box_dim``.\n            fill (float): filling value. Defaults to 0.\n            dtype (torch.dtype, Optional): data type of boxes.\n            device (str or torch.device, Optional): device of boxes.\n\n        Returns:\n            T: Fake boxes with shape of ``sizes``.\n        \"\"\"\n        fake_boxes = self.tensor.new_full(\n            sizes, fill, dtype=dtype, device=device)\n        return type(self)(fake_boxes, clone=False)\n\n    def __getitem__(self: T, index: IndexType) -> T:\n        \"\"\"Rewrite getitem to protect the last dimension shape.\"\"\"\n        boxes = self.tensor\n        if isinstance(index, np.ndarray):\n            index = torch.as_tensor(index, device=self.device)\n        if isinstance(index, Tensor) and index.dtype == torch.bool:\n            assert index.dim() < boxes.dim()\n        elif isinstance(index, tuple):\n            assert len(index) < boxes.dim()\n            # `Ellipsis`(...) is commonly used in index like [None, ...].\n            # When `Ellipsis` is in index, it must be the last item.\n            if Ellipsis in index:\n                assert index[-1] is Ellipsis\n\n        boxes = boxes[index]\n        if boxes.dim() == 1:\n            boxes = boxes.reshape(1, -1)\n        return type(self)(boxes, clone=False)\n\n    def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T:\n        \"\"\"Rewrite setitem to protect the last dimension shape.\"\"\"\n        assert type(values) is type(self), \\\n            'The value to be set must be the same box type as self'\n        values = values.tensor\n\n        if isinstance(index, np.ndarray):\n            index = torch.as_tensor(index, device=self.device)\n        if isinstance(index, Tensor) and index.dtype == torch.bool:\n            assert index.dim() < self.tensor.dim()\n        elif isinstance(index, tuple):\n            assert len(index) < self.tensor.dim()\n            # `Ellipsis`(...) is commonly used in index like [None, ...].\n            # When `Ellipsis` is in index, it must be the last item.\n            if Ellipsis in index:\n                assert index[-1] is Ellipsis\n\n        self.tensor[index] = values\n\n    def __len__(self) -> int:\n        \"\"\"Return the length of self.tensor first dimension.\"\"\"\n        return self.tensor.size(0)\n\n    def __deepcopy__(self, memo):\n        \"\"\"Only clone the ``self.tensor`` when applying deepcopy.\"\"\"\n        cls = self.__class__\n        other = cls.__new__(cls)\n        memo[id(self)] = other\n        other.tensor = self.tensor.clone()\n        return other\n\n    def __repr__(self) -> str:\n        \"\"\"Return a strings that describes the object.\"\"\"\n        return self.__class__.__name__ + '(\\n' + str(self.tensor) + ')'\n\n    def new_tensor(self, *args, **kwargs) -> Tensor:\n        \"\"\"Reload ``new_tensor`` from self.tensor.\"\"\"\n        return self.tensor.new_tensor(*args, **kwargs)\n\n    def new_full(self, *args, **kwargs) -> Tensor:\n        \"\"\"Reload ``new_full`` from self.tensor.\"\"\"\n        return self.tensor.new_full(*args, **kwargs)\n\n    def new_empty(self, *args, **kwargs) -> Tensor:\n        \"\"\"Reload ``new_empty`` from self.tensor.\"\"\"\n        return self.tensor.new_empty(*args, **kwargs)\n\n    def new_ones(self, *args, **kwargs) -> Tensor:\n        \"\"\"Reload ``new_ones`` from self.tensor.\"\"\"\n        return self.tensor.new_ones(*args, **kwargs)\n\n    def new_zeros(self, *args, **kwargs) -> Tensor:\n        \"\"\"Reload ``new_zeros`` from self.tensor.\"\"\"\n        return self.tensor.new_zeros(*args, **kwargs)\n\n    def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]:\n        \"\"\"Reload new_zeros from self.tensor.\"\"\"\n        # self.tensor.size(dim) cannot work when dim=None.\n        return self.tensor.size() if dim is None else self.tensor.size(dim)\n\n    def dim(self) -> int:\n        \"\"\"Reload ``dim`` from self.tensor.\"\"\"\n        return self.tensor.dim()\n\n    @property\n    def device(self) -> torch.device:\n        \"\"\"Reload ``device`` from self.tensor.\"\"\"\n        return self.tensor.device\n\n    @property\n    def dtype(self) -> torch.dtype:\n        \"\"\"Reload ``dtype`` from self.tensor.\"\"\"\n        return self.tensor.dtype\n\n    @property\n    def shape(self) -> torch.Size:\n        return self.tensor.shape\n\n    def numel(self) -> int:\n        \"\"\"Reload ``numel`` from self.tensor.\"\"\"\n        return self.tensor.numel()\n\n    def numpy(self) -> np.ndarray:\n        \"\"\"Reload ``numpy`` from self.tensor.\"\"\"\n        return self.tensor.numpy()\n\n    def to(self: T, *args, **kwargs) -> T:\n        \"\"\"Reload ``to`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.to(*args, **kwargs), clone=False)\n\n    def cpu(self: T) -> T:\n        \"\"\"Reload ``cpu`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.cpu(), clone=False)\n\n    def cuda(self: T, *args, **kwargs) -> T:\n        \"\"\"Reload ``cuda`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.cuda(*args, **kwargs), clone=False)\n\n    def clone(self: T) -> T:\n        \"\"\"Reload ``clone`` from self.tensor.\"\"\"\n        return type(self)(self.tensor)\n\n    def detach(self: T) -> T:\n        \"\"\"Reload ``detach`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.detach(), clone=False)\n\n    def view(self: T, *shape: Tuple[int]) -> T:\n        \"\"\"Reload ``view`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.view(shape), clone=False)\n\n    def reshape(self: T, *shape: Tuple[int]) -> T:\n        \"\"\"Reload ``reshape`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.reshape(shape), clone=False)\n\n    def expand(self: T, *sizes: Tuple[int]) -> T:\n        \"\"\"Reload ``expand`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.expand(sizes), clone=False)\n\n    def repeat(self: T, *sizes: Tuple[int]) -> T:\n        \"\"\"Reload ``repeat`` from self.tensor.\"\"\"\n        return type(self)(self.tensor.repeat(sizes), clone=False)\n\n    def transpose(self: T, dim0: int, dim1: int) -> T:\n        \"\"\"Reload ``transpose`` from self.tensor.\"\"\"\n        ndim = self.tensor.dim()\n        assert dim0 != -1 and dim0 != ndim - 1\n        assert dim1 != -1 and dim1 != ndim - 1\n        return type(self)(self.tensor.transpose(dim0, dim1), clone=False)\n\n    def permute(self: T, *dims: Tuple[int]) -> T:\n        \"\"\"Reload ``permute`` from self.tensor.\"\"\"\n        assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1\n        return type(self)(self.tensor.permute(dims), clone=False)\n\n    def split(self: T,\n              split_size_or_sections: Union[int, Sequence[int]],\n              dim: int = 0) -> List[T]:\n        \"\"\"Reload ``split`` from self.tensor.\"\"\"\n        assert dim != -1 and dim != self.tensor.dim() - 1\n        boxes_list = self.tensor.split(split_size_or_sections, dim=dim)\n        return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n    def chunk(self: T, chunks: int, dim: int = 0) -> List[T]:\n        \"\"\"Reload ``chunk`` from self.tensor.\"\"\"\n        assert dim != -1 and dim != self.tensor.dim() - 1\n        boxes_list = self.tensor.chunk(chunks, dim=dim)\n        return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n    def unbind(self: T, dim: int = 0) -> T:\n        \"\"\"Reload ``unbind`` from self.tensor.\"\"\"\n        assert dim != -1 and dim != self.tensor.dim() - 1\n        boxes_list = self.tensor.unbind(dim=dim)\n        return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n    def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T:\n        \"\"\"Reload ``flatten`` from self.tensor.\"\"\"\n        assert end_dim != -1 and end_dim != self.tensor.dim() - 1\n        return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False)\n\n    def squeeze(self: T, dim: Optional[int] = None) -> T:\n        \"\"\"Reload ``squeeze`` from self.tensor.\"\"\"\n        boxes = self.tensor.squeeze() if dim is None else \\\n            self.tensor.squeeze(dim)\n        return type(self)(boxes, clone=False)\n\n    def unsqueeze(self: T, dim: int) -> T:\n        \"\"\"Reload ``unsqueeze`` from self.tensor.\"\"\"\n        assert dim != -1 and dim != self.tensor.dim()\n        return type(self)(self.tensor.unsqueeze(dim), clone=False)\n\n    @classmethod\n    def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:\n        \"\"\"Cancatenates a box instance list into one single box instance.\n        Similar to ``torch.cat``.\n\n        Args:\n            box_list (Sequence[T]): A sequence of box instances.\n            dim (int): The dimension over which the box are concatenated.\n                Defaults to 0.\n\n        Returns:\n            T: Concatenated box instance.\n        \"\"\"\n        assert isinstance(box_list, Sequence)\n        if len(box_list) == 0:\n            raise ValueError('box_list should not be a empty list.')\n\n        assert dim != -1 and dim != box_list[0].dim() - 1\n        assert all(isinstance(boxes, cls) for boxes in box_list)\n\n        th_box_list = [boxes.tensor for boxes in box_list]\n        return cls(torch.cat(th_box_list, dim=dim), clone=False)\n\n    @classmethod\n    def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:\n        \"\"\"Concatenates a sequence of tensors along a new dimension. Similar to\n        ``torch.stack``.\n\n        Args:\n            box_list (Sequence[T]): A sequence of box instances.\n            dim (int): Dimension to insert. Defaults to 0.\n\n        Returns:\n            T: Concatenated box instance.\n        \"\"\"\n        assert isinstance(box_list, Sequence)\n        if len(box_list) == 0:\n            raise ValueError('box_list should not be a empty list.')\n\n        assert dim != -1 and dim != box_list[0].dim()\n        assert all(isinstance(boxes, cls) for boxes in box_list)\n\n        th_box_list = [boxes.tensor for boxes in box_list]\n        return cls(torch.stack(th_box_list, dim=dim), clone=False)\n\n    @abstractproperty\n    def centers(self) -> Tensor:\n        \"\"\"Return a tensor representing the centers of boxes.\"\"\"\n        pass\n\n    @abstractproperty\n    def areas(self) -> Tensor:\n        \"\"\"Return a tensor representing the areas of boxes.\"\"\"\n        pass\n\n    @abstractproperty\n    def widths(self) -> Tensor:\n        \"\"\"Return a tensor representing the widths of boxes.\"\"\"\n        pass\n\n    @abstractproperty\n    def heights(self) -> Tensor:\n        \"\"\"Return a tensor representing the heights of boxes.\"\"\"\n        pass\n\n    @abstractmethod\n    def flip_(self,\n              img_shape: Tuple[int, int],\n              direction: str = 'horizontal') -> None:\n        \"\"\"Flip boxes horizontally or vertically in-place.\n\n        Args:\n            img_shape (Tuple[int, int]): A tuple of image height and width.\n            direction (str): Flip direction, options are \"horizontal\",\n                \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def translate_(self, distances: Tuple[float, float]) -> None:\n        \"\"\"Translate boxes in-place.\n\n        Args:\n            distances (Tuple[float, float]): translate distances. The first\n                is horizontal distance and the second is vertical distance.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def clip_(self, img_shape: Tuple[int, int]) -> None:\n        \"\"\"Clip boxes according to the image shape in-place.\n\n        Args:\n            img_shape (Tuple[int, int]): A tuple of image height and width.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def rotate_(self, center: Tuple[float, float], angle: float) -> None:\n        \"\"\"Rotate all boxes in-place.\n\n        Args:\n            center (Tuple[float, float]): Rotation origin.\n            angle (float): Rotation angle represented in degrees. Positive\n                values mean clockwise rotation.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:\n        \"\"\"Geometric transformat boxes in-place.\n\n        Args:\n            homography_matrix (Tensor or np.ndarray]):\n                Shape (3, 3) for geometric transformation.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def rescale_(self, scale_factor: Tuple[float, float]) -> None:\n        \"\"\"Rescale boxes w.r.t. rescale_factor in-place.\n\n        Note:\n            Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n            w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n            changes the width and the height of boxes, but ``rescale_`` also\n            rescales the box centers simultaneously.\n\n        Args:\n            scale_factor (Tuple[float, float]): factors for scaling boxes.\n                The length should be 2.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def resize_(self, scale_factor: Tuple[float, float]) -> None:\n        \"\"\"Resize the box width and height w.r.t scale_factor in-place.\n\n        Note:\n            Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n            w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n            changes the width and the height of boxes, but ``rescale_`` also\n            rescales the box centers simultaneously.\n\n        Args:\n            scale_factor (Tuple[float, float]): factors for scaling box\n                shapes. The length should be 2.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def is_inside(self,\n                  img_shape: Tuple[int, int],\n                  all_inside: bool = False,\n                  allowed_border: int = 0) -> BoolTensor:\n        \"\"\"Find boxes inside the image.\n\n        Args:\n            img_shape (Tuple[int, int]): A tuple of image height and width.\n            all_inside (bool): Whether the boxes are all inside the image or\n                part inside the image. Defaults to False.\n            allowed_border (int): Boxes that extend beyond the image shape\n                boundary by more than ``allowed_border`` are considered\n                \"outside\" Defaults to 0.\n        Returns:\n            BoolTensor: A BoolTensor indicating whether the box is inside\n            the image. Assuming the original boxes have shape (m, n, box_dim),\n            the output has shape (m, n).\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def find_inside_points(self,\n                           points: Tensor,\n                           is_aligned: bool = False) -> BoolTensor:\n        \"\"\"Find inside box points. Boxes dimension must be 2.\n\n        Args:\n            points (Tensor): Points coordinates. Has shape of (m, 2).\n            is_aligned (bool): Whether ``points`` has been aligned with boxes\n                or not. If True, the length of boxes and ``points`` should be\n                the same. Defaults to False.\n\n        Returns:\n            BoolTensor: A BoolTensor indicating whether a point is inside\n            boxes. Assuming the boxes has shape of (n, box_dim), if\n            ``is_aligned`` is False. The index has shape of (m, n). If\n            ``is_aligned`` is True, m should be equal to n and the index has\n            shape of (m, ).\n        \"\"\"\n        pass\n\n    @abstractstaticmethod\n    def overlaps(boxes1: 'BaseBoxes',\n                 boxes2: 'BaseBoxes',\n                 mode: str = 'iou',\n                 is_aligned: bool = False,\n                 eps: float = 1e-6) -> Tensor:\n        \"\"\"Calculate overlap between two set of boxes with their types\n        converted to the present box type.\n\n        Args:\n            boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)\n                or empty.\n            boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)\n                or empty.\n            mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n                over foreground). Defaults to \"iou\".\n            is_aligned (bool): If True, then m and n must be equal. Defaults\n                to False.\n            eps (float): A value added to the denominator for numerical\n                stability. Defaults to 1e-6.\n\n        Returns:\n            Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n        \"\"\"\n        pass\n\n    @abstractstaticmethod\n    def from_instance_masks(masks: MaskType) -> 'BaseBoxes':\n        \"\"\"Create boxes from instance masks.\n\n        Args:\n            masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or\n                PolygonMasks instance with length of n.\n\n        Returns:\n            :obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim).\n        \"\"\"\n        pass\n"
  },
  {
    "path": "mmdet/structures/bbox/bbox_overlaps.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef fp16_clamp(x, min=None, max=None):\n    if not x.is_cuda and x.dtype == torch.float16:\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\n        return x.float().clamp(min, max).half()\n\n    return x.clamp(min, max)\n\n\ndef bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n    \"\"\"Calculate overlap between two set of bboxes.\n\n    FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889\n    Note:\n        Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',\n        there are some new generated variable when calculating IOU\n        using bbox_overlaps function:\n\n        1) is_aligned is False\n            area1: M x 1\n            area2: N x 1\n            lt: M x N x 2\n            rb: M x N x 2\n            wh: M x N x 2\n            overlap: M x N x 1\n            union: M x N x 1\n            ious: M x N x 1\n\n            Total memory:\n                S = (9 x N x M + N + M) * 4 Byte,\n\n            When using FP16, we can reduce:\n                R = (9 x N x M + N + M) * 4 / 2 Byte\n                R large than (N + M) * 4 * 2 is always true when N and M >= 1.\n                Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,\n                           N + 1 < 3 * N, when N or M is 1.\n\n            Given M = 40 (ground truth), N = 400000 (three anchor boxes\n            in per grid, FPN, R-CNNs),\n                R = 275 MB (one times)\n\n            A special case (dense detection), M = 512 (ground truth),\n                R = 3516 MB = 3.43 GB\n\n            When the batch size is B, reduce:\n                B x R\n\n            Therefore, CUDA memory runs out frequently.\n\n            Experiments on GeForce RTX 2080Ti (11019 MiB):\n\n            |   dtype   |   M   |   N   |   Use    |   Real   |   Ideal   |\n            |:----:|:----:|:----:|:----:|:----:|:----:|\n            |   FP32   |   512 | 400000 | 8020 MiB |   --   |   --   |\n            |   FP16   |   512 | 400000 |   4504 MiB | 3516 MiB | 3516 MiB |\n            |   FP32   |   40 | 400000 |   1540 MiB |   --   |   --   |\n            |   FP16   |   40 | 400000 |   1264 MiB |   276MiB   | 275 MiB |\n\n        2) is_aligned is True\n            area1: N x 1\n            area2: N x 1\n            lt: N x 2\n            rb: N x 2\n            wh: N x 2\n            overlap: N x 1\n            union: N x 1\n            ious: N x 1\n\n            Total memory:\n                S = 11 x N * 4 Byte\n\n            When using FP16, we can reduce:\n                R = 11 x N * 4 / 2 Byte\n\n        So do the 'giou' (large than 'iou').\n\n        Time-wise, FP16 is generally faster than FP32.\n\n        When gpu_assign_thr is not -1, it takes more time on cpu\n        but not reduce memory.\n        There, we can reduce half the memory and keep the speed.\n\n    If ``is_aligned`` is ``False``, then calculate the overlaps between each\n    bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned\n    pair of bboxes1 and bboxes2.\n\n    Args:\n        bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.\n        bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.\n            B indicates the batch dim, in shape (B1, B2, ..., Bn).\n            If ``is_aligned`` is ``True``, then m and n must be equal.\n        mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n            foreground) or \"giou\" (generalized intersection over union).\n            Default \"iou\".\n        is_aligned (bool, optional): If True, then m and n must be equal.\n            Default False.\n        eps (float, optional): A value added to the denominator for numerical\n            stability. Default 1e-6.\n\n    Returns:\n        Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n\n    Example:\n        >>> bboxes1 = torch.FloatTensor([\n        >>>     [0, 0, 10, 10],\n        >>>     [10, 10, 20, 20],\n        >>>     [32, 32, 38, 42],\n        >>> ])\n        >>> bboxes2 = torch.FloatTensor([\n        >>>     [0, 0, 10, 20],\n        >>>     [0, 10, 10, 19],\n        >>>     [10, 10, 20, 20],\n        >>> ])\n        >>> overlaps = bbox_overlaps(bboxes1, bboxes2)\n        >>> assert overlaps.shape == (3, 3)\n        >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)\n        >>> assert overlaps.shape == (3, )\n\n    Example:\n        >>> empty = torch.empty(0, 4)\n        >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])\n        >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n        >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n        >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n    \"\"\"\n\n    assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'\n    # Either the boxes are empty or the length of boxes' last dimension is 4\n    assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)\n    assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)\n\n    # Batch dim must be the same\n    # Batch dim: (B1, B2, ... Bn)\n    assert bboxes1.shape[:-2] == bboxes2.shape[:-2]\n    batch_shape = bboxes1.shape[:-2]\n\n    rows = bboxes1.size(-2)\n    cols = bboxes2.size(-2)\n    if is_aligned:\n        assert rows == cols\n\n    if rows * cols == 0:\n        if is_aligned:\n            return bboxes1.new(batch_shape + (rows, ))\n        else:\n            return bboxes1.new(batch_shape + (rows, cols))\n\n    area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n        bboxes1[..., 3] - bboxes1[..., 1])\n    area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n        bboxes2[..., 3] - bboxes2[..., 1])\n\n    if is_aligned:\n        lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])  # [B, rows, 2]\n        rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])  # [B, rows, 2]\n\n        wh = fp16_clamp(rb - lt, min=0)\n        overlap = wh[..., 0] * wh[..., 1]\n\n        if mode in ['iou', 'giou']:\n            union = area1 + area2 - overlap\n        else:\n            union = area1\n        if mode == 'giou':\n            enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n            enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n    else:\n        lt = torch.max(bboxes1[..., :, None, :2],\n                       bboxes2[..., None, :, :2])  # [B, rows, cols, 2]\n        rb = torch.min(bboxes1[..., :, None, 2:],\n                       bboxes2[..., None, :, 2:])  # [B, rows, cols, 2]\n\n        wh = fp16_clamp(rb - lt, min=0)\n        overlap = wh[..., 0] * wh[..., 1]\n\n        if mode in ['iou', 'giou']:\n            union = area1[..., None] + area2[..., None, :] - overlap\n        else:\n            union = area1[..., None]\n        if mode == 'giou':\n            enclosed_lt = torch.min(bboxes1[..., :, None, :2],\n                                    bboxes2[..., None, :, :2])\n            enclosed_rb = torch.max(bboxes1[..., :, None, 2:],\n                                    bboxes2[..., None, :, 2:])\n\n    eps = union.new_tensor([eps])\n    union = torch.max(union, eps)\n    ious = overlap / union\n    if mode in ['iou', 'iof']:\n        return ious\n    # calculate gious\n    enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)\n    enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n    enclose_area = torch.max(enclose_area, eps)\n    gious = ious - (enclose_area - union) / enclose_area\n    return gious\n"
  },
  {
    "path": "mmdet/structures/bbox/box_type.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Callable, Optional, Tuple, Type, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nfrom .base_boxes import BaseBoxes\n\nBoxType = Union[np.ndarray, Tensor, BaseBoxes]\n\nbox_types: dict = {}\n_box_type_to_name: dict = {}\nbox_converters: dict = {}\n\n\ndef _register_box(name: str, box_type: Type, force: bool = False) -> None:\n    \"\"\"Register a box type.\n\n    Args:\n        name (str): The name of box type.\n        box_type (type): Box mode class to be registered.\n        force (bool): Whether to override an existing class with the same\n            name. Defaults to False.\n    \"\"\"\n    assert issubclass(box_type, BaseBoxes)\n    name = name.lower()\n\n    if not force and (name in box_types or box_type in _box_type_to_name):\n        raise KeyError(f'box type {name} has been registered')\n    elif name in box_types:\n        _box_type = box_types.pop(name)\n        _box_type_to_name.pop(_box_type)\n    elif box_type in _box_type_to_name:\n        _name = _box_type_to_name.pop(box_type)\n        box_types.pop(_name)\n\n    box_types[name] = box_type\n    _box_type_to_name[box_type] = name\n\n\ndef register_box(name: str,\n                 box_type: Type = None,\n                 force: bool = False) -> Union[Type, Callable]:\n    \"\"\"Register a box type.\n\n    A record will be added to ``bbox_types``, whose key is the box type name\n    and value is the box type itself. Simultaneously, a reverse dictionary\n    ``_box_type_to_name`` will be updated. It can be used as a decorator or\n    a normal function.\n\n    Args:\n        name (str): The name of box type.\n        bbox_type (type, Optional): Box type class to be registered.\n            Defaults to None.\n        force (bool): Whether to override the existing box type with the same\n            name. Defaults to False.\n\n    Examples:\n        >>> from mmdet.structures.bbox import register_box\n        >>> from mmdet.structures.bbox import BaseBoxes\n\n        >>> # as a decorator\n        >>> @register_box('hbox')\n        >>> class HorizontalBoxes(BaseBoxes):\n        >>>     pass\n\n        >>> # as a normal function\n        >>> class RotatedBoxes(BaseBoxes):\n        >>>     pass\n        >>> register_box('rbox', RotatedBoxes)\n    \"\"\"\n    if not isinstance(force, bool):\n        raise TypeError(f'force must be a boolean, but got {type(force)}')\n\n    # use it as a normal method: register_box(name, box_type=BoxCls)\n    if box_type is not None:\n        _register_box(name=name, box_type=box_type, force=force)\n        return box_type\n\n    # use it as a decorator: @register_box(name)\n    def _register(cls):\n        _register_box(name=name, box_type=cls, force=force)\n        return cls\n\n    return _register\n\n\ndef _register_box_converter(src_type: Union[str, type],\n                            dst_type: Union[str, type],\n                            converter: Callable,\n                            force: bool = False) -> None:\n    \"\"\"Register a box converter.\n\n    Args:\n        src_type (str or type): source box type name or class.\n        dst_type (str or type): destination box type name or class.\n        converter (Callable): Convert function.\n        force (bool): Whether to override the existing box type with the same\n            name. Defaults to False.\n    \"\"\"\n    assert callable(converter)\n    src_type_name, _ = get_box_type(src_type)\n    dst_type_name, _ = get_box_type(dst_type)\n\n    converter_name = src_type_name + '2' + dst_type_name\n    if not force and converter_name in box_converters:\n        raise KeyError(f'The box converter from {src_type_name} to '\n                       f'{dst_type_name} has been registered.')\n\n    box_converters[converter_name] = converter\n\n\ndef register_box_converter(src_type: Union[str, type],\n                           dst_type: Union[str, type],\n                           converter: Optional[Callable] = None,\n                           force: bool = False) -> Callable:\n    \"\"\"Register a box converter.\n\n    A record will be added to ``box_converter``, whose key is\n    '{src_type_name}2{dst_type_name}' and value is the convert function.\n    It can be used as a decorator or a normal function.\n\n    Args:\n        src_type (str or type): source box type name or class.\n        dst_type (str or type): destination box type name or class.\n        converter (Callable): Convert function. Defaults to None.\n        force (bool): Whether to override the existing box type with the same\n            name. Defaults to False.\n\n    Examples:\n        >>> from mmdet.structures.bbox import register_box_converter\n        >>> # as a decorator\n        >>> @register_box_converter('hbox', 'rbox')\n        >>> def converter_A(boxes):\n        >>>     pass\n\n        >>> # as a normal function\n        >>> def converter_B(boxes):\n        >>>     pass\n        >>> register_box_converter('rbox', 'hbox', converter_B)\n    \"\"\"\n    if not isinstance(force, bool):\n        raise TypeError(f'force must be a boolean, but got {type(force)}')\n\n    # use it as a normal method:\n    # register_box_converter(src_type, dst_type, converter=Func)\n    if converter is not None:\n        _register_box_converter(\n            src_type=src_type,\n            dst_type=dst_type,\n            converter=converter,\n            force=force)\n        return converter\n\n    # use it as a decorator: @register_box_converter(name)\n    def _register(func):\n        _register_box_converter(\n            src_type=src_type, dst_type=dst_type, converter=func, force=force)\n        return func\n\n    return _register\n\n\ndef get_box_type(box_type: Union[str, type]) -> Tuple[str, type]:\n    \"\"\"get both box type name and class.\n\n    Args:\n        box_type (str or type): Single box type name or class.\n\n    Returns:\n        Tuple[str, type]: A tuple of box type name and class.\n    \"\"\"\n    if isinstance(box_type, str):\n        type_name = box_type.lower()\n        assert type_name in box_types, \\\n            f\"Box type {type_name} hasn't been registered in box_types.\"\n        type_cls = box_types[type_name]\n    elif issubclass(box_type, BaseBoxes):\n        assert box_type in _box_type_to_name, \\\n            f\"Box type {box_type} hasn't been registered in box_types.\"\n        type_name = _box_type_to_name[box_type]\n        type_cls = box_type\n    else:\n        raise KeyError('box_type must be a str or class inheriting from '\n                       f'BaseBoxes, but got {type(box_type)}.')\n    return type_name, type_cls\n\n\ndef convert_box_type(boxes: BoxType,\n                     *,\n                     src_type: Union[str, type] = None,\n                     dst_type: Union[str, type] = None) -> BoxType:\n    \"\"\"Convert boxes from source type to destination type.\n\n    If ``boxes`` is a instance of BaseBoxes, the ``src_type`` will be set\n    as the type of ``boxes``.\n\n    Args:\n        boxes (np.ndarray or Tensor or :obj:`BaseBoxes`): boxes need to\n            convert.\n        src_type (str or type, Optional): source box type. Defaults to None.\n        dst_type (str or type, Optional): destination box type. Defaults to\n            None.\n\n    Returns:\n        Union[np.ndarray, Tensor, :obj:`BaseBoxes`]: Converted boxes. It's type\n        is consistent with the input's type.\n    \"\"\"\n    assert dst_type is not None\n    dst_type_name, dst_type_cls = get_box_type(dst_type)\n\n    is_box_cls = False\n    is_numpy = False\n    if isinstance(boxes, BaseBoxes):\n        src_type_name, _ = get_box_type(type(boxes))\n        is_box_cls = True\n    elif isinstance(boxes, (Tensor, np.ndarray)):\n        assert src_type is not None\n        src_type_name, _ = get_box_type(src_type)\n        if isinstance(boxes, np.ndarray):\n            is_numpy = True\n    else:\n        raise TypeError('boxes must be a instance of BaseBoxes, Tensor or '\n                        f'ndarray, but get {type(boxes)}.')\n\n    if src_type_name == dst_type_name:\n        return boxes\n\n    converter_name = src_type_name + '2' + dst_type_name\n    assert converter_name in box_converters, \\\n        \"Convert function hasn't been registered in box_converters.\"\n    converter = box_converters[converter_name]\n\n    if is_box_cls:\n        boxes = converter(boxes.tensor)\n        return dst_type_cls(boxes)\n    elif is_numpy:\n        boxes = converter(torch.from_numpy(boxes))\n        return boxes.numpy()\n    else:\n        return converter(boxes)\n\n\ndef autocast_box_type(dst_box_type='hbox') -> Callable:\n    \"\"\"A decorator which automatically casts results['gt_bboxes'] to the\n    destination box type.\n\n    It commenly used in mmdet.datasets.transforms to make the transforms up-\n    compatible with the np.ndarray type of results['gt_bboxes'].\n\n    The speed of processing of np.ndarray and BaseBoxes data are the same:\n\n    - np.ndarray: 0.0509 img/s\n    - BaseBoxes: 0.0551 img/s\n\n    Args:\n        dst_box_type (str): Destination box type.\n    \"\"\"\n    _, box_type_cls = get_box_type(dst_box_type)\n\n    def decorator(func: Callable) -> Callable:\n\n        def wrapper(self, results: dict, *args, **kwargs) -> dict:\n            if ('gt_bboxes' not in results\n                    or isinstance(results['gt_bboxes'], BaseBoxes)):\n                return func(self, results)\n            elif isinstance(results['gt_bboxes'], np.ndarray):\n                results['gt_bboxes'] = box_type_cls(\n                    results['gt_bboxes'], clone=False)\n                if 'mix_results' in results:\n                    for res in results['mix_results']:\n                        if isinstance(res['gt_bboxes'], np.ndarray):\n                            res['gt_bboxes'] = box_type_cls(\n                                res['gt_bboxes'], clone=False)\n\n                _results = func(self, results, *args, **kwargs)\n\n                # In some cases, the function will process gt_bboxes in-place\n                # Simultaneously convert inputting and outputting gt_bboxes\n                # back to np.ndarray\n                if isinstance(_results, dict) and 'gt_bboxes' in _results:\n                    if isinstance(_results['gt_bboxes'], BaseBoxes):\n                        _results['gt_bboxes'] = _results['gt_bboxes'].numpy()\n                if isinstance(results['gt_bboxes'], BaseBoxes):\n                    results['gt_bboxes'] = results['gt_bboxes'].numpy()\n                return _results\n            else:\n                raise TypeError(\n                    \"auto_box_type requires results['gt_bboxes'] to \"\n                    'be BaseBoxes or np.ndarray, but got '\n                    f\"{type(results['gt_bboxes'])}\")\n\n        return wrapper\n\n    return decorator\n"
  },
  {
    "path": "mmdet/structures/bbox/horizontal_boxes.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, TypeVar, Union\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch import BoolTensor, Tensor\n\nfrom mmdet.structures.mask.structures import BitmapMasks, PolygonMasks\nfrom .base_boxes import BaseBoxes\nfrom .bbox_overlaps import bbox_overlaps\nfrom .box_type import register_box\n\nT = TypeVar('T')\nDeviceType = Union[str, torch.device]\nMaskType = Union[BitmapMasks, PolygonMasks]\n\n\n@register_box(name='hbox')\nclass HorizontalBoxes(BaseBoxes):\n    \"\"\"The horizontal box class used in MMDetection by default.\n\n    The ``box_dim`` of ``HorizontalBoxes`` is 4, which means the length of\n    the last dimension of the data should be 4. Two modes of box data are\n    supported in ``HorizontalBoxes``:\n\n    - 'xyxy': Each row of data indicates (x1, y1, x2, y2), which are the\n      coordinates of the left-top and right-bottom points.\n    - 'cxcywh': Each row of data indicates (x, y, w, h), where (x, y) are the\n      coordinates of the box centers and (w, h) are the width and height.\n\n    ``HorizontalBoxes`` only restores 'xyxy' mode of data. If the the data is\n    in 'cxcywh' mode, users need to input ``in_mode='cxcywh'`` and The code\n    will convert the 'cxcywh' data to 'xyxy' automatically.\n\n    Args:\n        data (Tensor or np.ndarray or Sequence): The box data with shape of\n            (..., 4).\n        dtype (torch.dtype, Optional): data type of boxes. Defaults to None.\n        device (str or torch.device, Optional): device of boxes.\n            Default to None.\n        clone (bool): Whether clone ``boxes`` or not. Defaults to True.\n        mode (str, Optional): the mode of boxes. If it is 'cxcywh', the\n            `data` will be converted to 'xyxy' mode. Defaults to None.\n    \"\"\"\n\n    box_dim: int = 4\n\n    def __init__(self,\n                 data: Union[Tensor, np.ndarray],\n                 dtype: torch.dtype = None,\n                 device: DeviceType = None,\n                 clone: bool = True,\n                 in_mode: Optional[str] = None) -> None:\n        super().__init__(data=data, dtype=dtype, device=device, clone=clone)\n        if isinstance(in_mode, str):\n            if in_mode not in ('xyxy', 'cxcywh'):\n                raise ValueError(f'Get invalid mode {in_mode}.')\n            if in_mode == 'cxcywh':\n                self.tensor = self.cxcywh_to_xyxy(self.tensor)\n\n    @staticmethod\n    def cxcywh_to_xyxy(boxes: Tensor) -> Tensor:\n        \"\"\"Convert box coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).\n\n        Args:\n            boxes (Tensor): cxcywh boxes tensor with shape of (..., 4).\n\n        Returns:\n            Tensor: xyxy boxes tensor with shape of (..., 4).\n        \"\"\"\n        ctr, wh = boxes.split((2, 2), dim=-1)\n        return torch.cat([(ctr - wh / 2), (ctr + wh / 2)], dim=-1)\n\n    @staticmethod\n    def xyxy_to_cxcywh(boxes: Tensor) -> Tensor:\n        \"\"\"Convert box coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).\n\n        Args:\n            boxes (Tensor): xyxy boxes tensor with shape of (..., 4).\n\n        Returns:\n            Tensor: cxcywh boxes tensor with shape of (..., 4).\n        \"\"\"\n        xy1, xy2 = boxes.split((2, 2), dim=-1)\n        return torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1)\n\n    @property\n    def cxcywh(self) -> Tensor:\n        \"\"\"Return a tensor representing the cxcywh boxes.\"\"\"\n        return self.xyxy_to_cxcywh(self.tensor)\n\n    @property\n    def centers(self) -> Tensor:\n        \"\"\"Return a tensor representing the centers of boxes.\"\"\"\n        boxes = self.tensor\n        return (boxes[..., :2] + boxes[..., 2:]) / 2\n\n    @property\n    def areas(self) -> Tensor:\n        \"\"\"Return a tensor representing the areas of boxes.\"\"\"\n        boxes = self.tensor\n        return (boxes[..., 2] - boxes[..., 0]) * (\n            boxes[..., 3] - boxes[..., 1])\n\n    @property\n    def widths(self) -> Tensor:\n        \"\"\"Return a tensor representing the widths of boxes.\"\"\"\n        boxes = self.tensor\n        return boxes[..., 2] - boxes[..., 0]\n\n    @property\n    def heights(self) -> Tensor:\n        \"\"\"Return a tensor representing the heights of boxes.\"\"\"\n        boxes = self.tensor\n        return boxes[..., 3] - boxes[..., 1]\n\n    def flip_(self,\n              img_shape: Tuple[int, int],\n              direction: str = 'horizontal') -> None:\n        \"\"\"Flip boxes horizontally or vertically in-place.\n\n        Args:\n            img_shape (Tuple[int, int]): A tuple of image height and width.\n            direction (str): Flip direction, options are \"horizontal\",\n                \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n        \"\"\"\n        assert direction in ['horizontal', 'vertical', 'diagonal']\n        flipped = self.tensor\n        boxes = flipped.clone()\n        if direction == 'horizontal':\n            flipped[..., 0] = img_shape[1] - boxes[..., 2]\n            flipped[..., 2] = img_shape[1] - boxes[..., 0]\n        elif direction == 'vertical':\n            flipped[..., 1] = img_shape[0] - boxes[..., 3]\n            flipped[..., 3] = img_shape[0] - boxes[..., 1]\n        else:\n            flipped[..., 0] = img_shape[1] - boxes[..., 2]\n            flipped[..., 1] = img_shape[0] - boxes[..., 3]\n            flipped[..., 2] = img_shape[1] - boxes[..., 0]\n            flipped[..., 3] = img_shape[0] - boxes[..., 1]\n\n    def translate_(self, distances: Tuple[float, float]) -> None:\n        \"\"\"Translate boxes in-place.\n\n        Args:\n            distances (Tuple[float, float]): translate distances. The first\n                is horizontal distance and the second is vertical distance.\n        \"\"\"\n        boxes = self.tensor\n        assert len(distances) == 2\n        self.tensor = boxes + boxes.new_tensor(distances).repeat(2)\n\n    def clip_(self, img_shape: Tuple[int, int]) -> None:\n        \"\"\"Clip boxes according to the image shape in-place.\n\n        Args:\n            img_shape (Tuple[int, int]): A tuple of image height and width.\n        \"\"\"\n        boxes = self.tensor\n        boxes[..., 0::2] = boxes[..., 0::2].clamp(0, img_shape[1])\n        boxes[..., 1::2] = boxes[..., 1::2].clamp(0, img_shape[0])\n\n    def rotate_(self, center: Tuple[float, float], angle: float) -> None:\n        \"\"\"Rotate all boxes in-place.\n\n        Args:\n            center (Tuple[float, float]): Rotation origin.\n            angle (float): Rotation angle represented in degrees. Positive\n                values mean clockwise rotation.\n        \"\"\"\n        boxes = self.tensor\n        rotation_matrix = boxes.new_tensor(\n            cv2.getRotationMatrix2D(center, -angle, 1))\n\n        corners = self.hbox2corner(boxes)\n        corners = torch.cat(\n            [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1)\n        corners_T = torch.transpose(corners, -1, -2)\n        corners_T = torch.matmul(rotation_matrix, corners_T)\n        corners = torch.transpose(corners_T, -1, -2)\n        self.tensor = self.corner2hbox(corners)\n\n    def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:\n        \"\"\"Geometric transformat boxes in-place.\n\n        Args:\n            homography_matrix (Tensor or np.ndarray]):\n                Shape (3, 3) for geometric transformation.\n        \"\"\"\n        boxes = self.tensor\n        if isinstance(homography_matrix, np.ndarray):\n            homography_matrix = boxes.new_tensor(homography_matrix)\n        corners = self.hbox2corner(boxes)\n        corners = torch.cat(\n            [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1)\n        corners_T = torch.transpose(corners, -1, -2)\n        corners_T = torch.matmul(homography_matrix, corners_T)\n        corners = torch.transpose(corners_T, -1, -2)\n        # Convert to homogeneous coordinates by normalization\n        corners = corners[..., :2] / corners[..., 2:3]\n        self.tensor = self.corner2hbox(corners)\n\n    @staticmethod\n    def hbox2corner(boxes: Tensor) -> Tensor:\n        \"\"\"Convert box coordinates from (x1, y1, x2, y2) to corners ((x1, y1),\n        (x2, y1), (x1, y2), (x2, y2)).\n\n        Args:\n            boxes (Tensor): Horizontal box tensor with shape of (..., 4).\n\n        Returns:\n            Tensor: Corner tensor with shape of (..., 4, 2).\n        \"\"\"\n        x1, y1, x2, y2 = torch.split(boxes, 1, dim=-1)\n        corners = torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=-1)\n        return corners.reshape(*corners.shape[:-1], 4, 2)\n\n    @staticmethod\n    def corner2hbox(corners: Tensor) -> Tensor:\n        \"\"\"Convert box coordinates from corners ((x1, y1), (x2, y1), (x1, y2),\n        (x2, y2)) to (x1, y1, x2, y2).\n\n        Args:\n            corners (Tensor): Corner tensor with shape of (..., 4, 2).\n\n        Returns:\n            Tensor: Horizontal box tensor with shape of (..., 4).\n        \"\"\"\n        if corners.numel() == 0:\n            return corners.new_zeros((0, 4))\n        min_xy = corners.min(dim=-2)[0]\n        max_xy = corners.max(dim=-2)[0]\n        return torch.cat([min_xy, max_xy], dim=-1)\n\n    def rescale_(self, scale_factor: Tuple[float, float]) -> None:\n        \"\"\"Rescale boxes w.r.t. rescale_factor in-place.\n\n        Note:\n            Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n            w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n            changes the width and the height of boxes, but ``rescale_`` also\n            rescales the box centers simultaneously.\n\n        Args:\n            scale_factor (Tuple[float, float]): factors for scaling boxes.\n                The length should be 2.\n        \"\"\"\n        boxes = self.tensor\n        assert len(scale_factor) == 2\n        scale_factor = boxes.new_tensor(scale_factor).repeat(2)\n        self.tensor = boxes * scale_factor\n\n    def resize_(self, scale_factor: Tuple[float, float]) -> None:\n        \"\"\"Resize the box width and height w.r.t scale_factor in-place.\n\n        Note:\n            Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n            w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n            changes the width and the height of boxes, but ``rescale_`` also\n            rescales the box centers simultaneously.\n\n        Args:\n            scale_factor (Tuple[float, float]): factors for scaling box\n                shapes. The length should be 2.\n        \"\"\"\n        boxes = self.tensor\n        assert len(scale_factor) == 2\n        ctrs = (boxes[..., 2:] + boxes[..., :2]) / 2\n        wh = boxes[..., 2:] - boxes[..., :2]\n        scale_factor = boxes.new_tensor(scale_factor)\n        wh = wh * scale_factor\n        xy1 = ctrs - 0.5 * wh\n        xy2 = ctrs + 0.5 * wh\n        self.tensor = torch.cat([xy1, xy2], dim=-1)\n\n    def is_inside(self,\n                  img_shape: Tuple[int, int],\n                  all_inside: bool = False,\n                  allowed_border: int = 0) -> BoolTensor:\n        \"\"\"Find boxes inside the image.\n\n        Args:\n            img_shape (Tuple[int, int]): A tuple of image height and width.\n            all_inside (bool): Whether the boxes are all inside the image or\n                part inside the image. Defaults to False.\n            allowed_border (int): Boxes that extend beyond the image shape\n                boundary by more than ``allowed_border`` are considered\n                \"outside\" Defaults to 0.\n        Returns:\n            BoolTensor: A BoolTensor indicating whether the box is inside\n            the image. Assuming the original boxes have shape (m, n, 4),\n            the output has shape (m, n).\n        \"\"\"\n        img_h, img_w = img_shape\n        boxes = self.tensor\n        if all_inside:\n            return (boxes[:, 0] >= -allowed_border) & \\\n                (boxes[:, 1] >= -allowed_border) & \\\n                (boxes[:, 2] < img_w + allowed_border) & \\\n                (boxes[:, 3] < img_h + allowed_border)\n        else:\n            return (boxes[..., 0] < img_w + allowed_border) & \\\n                (boxes[..., 1] < img_h + allowed_border) & \\\n                (boxes[..., 2] > -allowed_border) & \\\n                (boxes[..., 3] > -allowed_border)\n\n    def find_inside_points(self,\n                           points: Tensor,\n                           is_aligned: bool = False) -> BoolTensor:\n        \"\"\"Find inside box points. Boxes dimension must be 2.\n\n        Args:\n            points (Tensor): Points coordinates. Has shape of (m, 2).\n            is_aligned (bool): Whether ``points`` has been aligned with boxes\n                or not. If True, the length of boxes and ``points`` should be\n                the same. Defaults to False.\n\n        Returns:\n            BoolTensor: A BoolTensor indicating whether a point is inside\n            boxes. Assuming the boxes has shape of (n, 4), if ``is_aligned``\n            is False. The index has shape of (m, n). If ``is_aligned`` is\n            True, m should be equal to n and the index has shape of (m, ).\n        \"\"\"\n        boxes = self.tensor\n        assert boxes.dim() == 2, 'boxes dimension must be 2.'\n\n        if not is_aligned:\n            boxes = boxes[None, :, :]\n            points = points[:, None, :]\n        else:\n            assert boxes.size(0) == points.size(0)\n\n        x_min, y_min, x_max, y_max = boxes.unbind(dim=-1)\n        return (points[..., 0] >= x_min) & (points[..., 0] <= x_max) & \\\n            (points[..., 1] >= y_min) & (points[..., 1] <= y_max)\n\n    @staticmethod\n    def overlaps(boxes1: BaseBoxes,\n                 boxes2: BaseBoxes,\n                 mode: str = 'iou',\n                 is_aligned: bool = False,\n                 eps: float = 1e-6) -> Tensor:\n        \"\"\"Calculate overlap between two set of boxes with their types\n        converted to ``HorizontalBoxes``.\n\n        Args:\n            boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)\n                or empty.\n            boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)\n                or empty.\n            mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n                over foreground). Defaults to \"iou\".\n            is_aligned (bool): If True, then m and n must be equal. Defaults\n                to False.\n            eps (float): A value added to the denominator for numerical\n                stability. Defaults to 1e-6.\n\n        Returns:\n            Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n        \"\"\"\n        boxes1 = boxes1.convert_to('hbox')\n        boxes2 = boxes2.convert_to('hbox')\n        return bbox_overlaps(\n            boxes1.tensor,\n            boxes2.tensor,\n            mode=mode,\n            is_aligned=is_aligned,\n            eps=eps)\n\n    @staticmethod\n    def from_instance_masks(masks: MaskType) -> 'HorizontalBoxes':\n        \"\"\"Create horizontal boxes from instance masks.\n\n        Args:\n            masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or\n                PolygonMasks instance with length of n.\n\n        Returns:\n            :obj:`HorizontalBoxes`: Converted boxes with shape of (n, 4).\n        \"\"\"\n        num_masks = len(masks)\n        boxes = np.zeros((num_masks, 4), dtype=np.float32)\n        if isinstance(masks, BitmapMasks):\n            x_any = masks.masks.any(axis=1)\n            y_any = masks.masks.any(axis=2)\n            for idx in range(num_masks):\n                x = np.where(x_any[idx, :])[0]\n                y = np.where(y_any[idx, :])[0]\n                if len(x) > 0 and len(y) > 0:\n                    # use +1 for x_max and y_max so that the right and bottom\n                    # boundary of instance masks are fully included by the box\n                    boxes[idx, :] = np.array(\n                        [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32)\n        elif isinstance(masks, PolygonMasks):\n            for idx, poly_per_obj in enumerate(masks.masks):\n                # simply use a number that is big enough for comparison with\n                # coordinates\n                xy_min = np.array([masks.width * 2, masks.height * 2],\n                                  dtype=np.float32)\n                xy_max = np.zeros(2, dtype=np.float32)\n                for p in poly_per_obj:\n                    xy = np.array(p).reshape(-1, 2).astype(np.float32)\n                    xy_min = np.minimum(xy_min, np.min(xy, axis=0))\n                    xy_max = np.maximum(xy_max, np.max(xy, axis=0))\n                boxes[idx, :2] = xy_min\n                boxes[idx, 2:] = xy_max\n        else:\n            raise TypeError(\n                '`masks` must be `BitmapMasks`  or `PolygonMasks`, '\n                f'but got {type(masks)}.')\n        return HorizontalBoxes(boxes)\n"
  },
  {
    "path": "mmdet/structures/bbox/transforms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.structures.bbox import BaseBoxes\n\n\ndef find_inside_bboxes(bboxes: Tensor, img_h: int, img_w: int) -> Tensor:\n    \"\"\"Find bboxes as long as a part of bboxes is inside the image.\n\n    Args:\n        bboxes (Tensor): Shape (N, 4).\n        img_h (int): Image height.\n        img_w (int): Image width.\n\n    Returns:\n        Tensor: Index of the remaining bboxes.\n    \"\"\"\n    inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \\\n        & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0)\n    return inside_inds\n\n\ndef bbox_flip(bboxes: Tensor,\n              img_shape: Tuple[int],\n              direction: str = 'horizontal') -> Tensor:\n    \"\"\"Flip bboxes horizontally or vertically.\n\n    Args:\n        bboxes (Tensor): Shape (..., 4*k)\n        img_shape (Tuple[int]): Image shape.\n        direction (str): Flip direction, options are \"horizontal\", \"vertical\",\n            \"diagonal\". Default: \"horizontal\"\n\n    Returns:\n        Tensor: Flipped bboxes.\n    \"\"\"\n    assert bboxes.shape[-1] % 4 == 0\n    assert direction in ['horizontal', 'vertical', 'diagonal']\n    flipped = bboxes.clone()\n    if direction == 'horizontal':\n        flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]\n        flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]\n    elif direction == 'vertical':\n        flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]\n        flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]\n    else:\n        flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]\n        flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]\n        flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]\n        flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]\n    return flipped\n\n\ndef bbox_mapping(bboxes: Tensor,\n                 img_shape: Tuple[int],\n                 scale_factor: Union[float, Tuple[float]],\n                 flip: bool,\n                 flip_direction: str = 'horizontal') -> Tensor:\n    \"\"\"Map bboxes from the original image scale to testing scale.\"\"\"\n    new_bboxes = bboxes * bboxes.new_tensor(scale_factor)\n    if flip:\n        new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)\n    return new_bboxes\n\n\ndef bbox_mapping_back(bboxes: Tensor,\n                      img_shape: Tuple[int],\n                      scale_factor: Union[float, Tuple[float]],\n                      flip: bool,\n                      flip_direction: str = 'horizontal') -> Tensor:\n    \"\"\"Map bboxes from testing scale to original image scale.\"\"\"\n    new_bboxes = bbox_flip(bboxes, img_shape,\n                           flip_direction) if flip else bboxes\n    new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)\n    return new_bboxes.view(bboxes.shape)\n\n\ndef bbox2roi(bbox_list: List[Union[Tensor, BaseBoxes]]) -> Tensor:\n    \"\"\"Convert a list of bboxes to roi format.\n\n    Args:\n        bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes\n            corresponding to a batch of images.\n\n    Returns:\n        Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the\n        different box types. For example, If the box type in ``bbox_list``\n        is HorizontalBoxes, the output shape is (n, 5). Each row of data\n        indicates [batch_ind, x1, y1, x2, y2].\n    \"\"\"\n    rois_list = []\n    for img_id, bboxes in enumerate(bbox_list):\n        bboxes = get_box_tensor(bboxes)\n        img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)\n        rois = torch.cat([img_inds, bboxes], dim=-1)\n        rois_list.append(rois)\n    rois = torch.cat(rois_list, 0)\n    return rois\n\n\ndef roi2bbox(rois: Tensor) -> List[Tensor]:\n    \"\"\"Convert rois to bounding box format.\n\n    Args:\n        rois (Tensor): RoIs with the shape (n, 5) where the first\n            column indicates batch id of each RoI.\n\n    Returns:\n        List[Tensor]: Converted boxes of corresponding rois.\n    \"\"\"\n    bbox_list = []\n    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)\n    for img_id in img_ids:\n        inds = (rois[:, 0] == img_id.item())\n        bbox = rois[inds, 1:]\n        bbox_list.append(bbox)\n    return bbox_list\n\n\n# TODO remove later\ndef bbox2result(bboxes: Union[Tensor, np.ndarray], labels: Union[Tensor,\n                                                                 np.ndarray],\n                num_classes: int) -> List[np.ndarray]:\n    \"\"\"Convert detection results to a list of numpy arrays.\n\n    Args:\n        bboxes (Tensor | np.ndarray): shape (n, 5)\n        labels (Tensor | np.ndarray): shape (n, )\n        num_classes (int): class number, including background class\n\n    Returns:\n        List(np.ndarray]): bbox results of each class\n    \"\"\"\n    if bboxes.shape[0] == 0:\n        return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]\n    else:\n        if isinstance(bboxes, torch.Tensor):\n            bboxes = bboxes.detach().cpu().numpy()\n            labels = labels.detach().cpu().numpy()\n        return [bboxes[labels == i, :] for i in range(num_classes)]\n\n\ndef distance2bbox(\n    points: Tensor,\n    distance: Tensor,\n    max_shape: Optional[Union[Sequence[int], Tensor,\n                              Sequence[Sequence[int]]]] = None\n) -> Tensor:\n    \"\"\"Decode distance prediction to bounding box.\n\n    Args:\n        points (Tensor): Shape (B, N, 2) or (N, 2).\n        distance (Tensor): Distance from the given point to 4\n            boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)\n        max_shape (Union[Sequence[int], Tensor, Sequence[Sequence[int]]],\n            optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B.\n\n    Returns:\n        Tensor: Boxes with shape (N, 4) or (B, N, 4)\n    \"\"\"\n\n    x1 = points[..., 0] - distance[..., 0]\n    y1 = points[..., 1] - distance[..., 1]\n    x2 = points[..., 0] + distance[..., 2]\n    y2 = points[..., 1] + distance[..., 3]\n\n    bboxes = torch.stack([x1, y1, x2, y2], -1)\n\n    if max_shape is not None:\n        if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export():\n            # speed up\n            bboxes[:, 0::2].clamp_(min=0, max=max_shape[1])\n            bboxes[:, 1::2].clamp_(min=0, max=max_shape[0])\n            return bboxes\n\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            # TODO: delete\n            from mmdet.core.export import dynamic_clip_for_onnx\n            x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)\n            bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = x1.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(x1)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = x1.new_tensor(0)\n        max_xy = torch.cat([max_shape, max_shape],\n                           dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n\n\ndef bbox2distance(points: Tensor,\n                  bbox: Tensor,\n                  max_dis: Optional[float] = None,\n                  eps: float = 0.1) -> Tensor:\n    \"\"\"Decode bounding box based on distances.\n\n    Args:\n        points (Tensor): Shape (n, 2) or (b, n, 2), [x, y].\n        bbox (Tensor): Shape (n, 4) or (b, n, 4), \"xyxy\" format\n        max_dis (float, optional): Upper bound of the distance.\n        eps (float): a small value to ensure target < max_dis, instead <=\n\n    Returns:\n        Tensor: Decoded distances.\n    \"\"\"\n    left = points[..., 0] - bbox[..., 0]\n    top = points[..., 1] - bbox[..., 1]\n    right = bbox[..., 2] - points[..., 0]\n    bottom = bbox[..., 3] - points[..., 1]\n    if max_dis is not None:\n        left = left.clamp(min=0, max=max_dis - eps)\n        top = top.clamp(min=0, max=max_dis - eps)\n        right = right.clamp(min=0, max=max_dis - eps)\n        bottom = bottom.clamp(min=0, max=max_dis - eps)\n    return torch.stack([left, top, right, bottom], -1)\n\n\ndef bbox_rescale(bboxes: Tensor, scale_factor: float = 1.0) -> Tensor:\n    \"\"\"Rescale bounding box w.r.t. scale_factor.\n\n    Args:\n        bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois\n        scale_factor (float): rescale factor\n\n    Returns:\n        Tensor: Rescaled bboxes.\n    \"\"\"\n    if bboxes.size(1) == 5:\n        bboxes_ = bboxes[:, 1:]\n        inds_ = bboxes[:, 0]\n    else:\n        bboxes_ = bboxes\n    cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5\n    cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5\n    w = bboxes_[:, 2] - bboxes_[:, 0]\n    h = bboxes_[:, 3] - bboxes_[:, 1]\n    w = w * scale_factor\n    h = h * scale_factor\n    x1 = cx - 0.5 * w\n    x2 = cx + 0.5 * w\n    y1 = cy - 0.5 * h\n    y2 = cy + 0.5 * h\n    if bboxes.size(1) == 5:\n        rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)\n    else:\n        rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n    return rescaled_bboxes\n\n\ndef bbox_cxcywh_to_xyxy(bbox: Tensor) -> Tensor:\n    \"\"\"Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).\n\n    Args:\n        bbox (Tensor): Shape (n, 4) for bboxes.\n\n    Returns:\n        Tensor: Converted bboxes.\n    \"\"\"\n    cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)\n    bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]\n    return torch.cat(bbox_new, dim=-1)\n\n\ndef bbox_xyxy_to_cxcywh(bbox: Tensor) -> Tensor:\n    \"\"\"Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).\n\n    Args:\n        bbox (Tensor): Shape (n, 4) for bboxes.\n\n    Returns:\n        Tensor: Converted bboxes.\n    \"\"\"\n    x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)\n    bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]\n    return torch.cat(bbox_new, dim=-1)\n\n\ndef bbox2corner(bboxes: torch.Tensor) -> torch.Tensor:\n    \"\"\"Convert bbox coordinates from (x1, y1, x2, y2) to corners ((x1, y1),\n    (x2, y1), (x1, y2), (x2, y2)).\n\n    Args:\n        bboxes (Tensor): Shape (n, 4) for bboxes.\n    Returns:\n        Tensor: Shape (n*4, 2) for corners.\n    \"\"\"\n    x1, y1, x2, y2 = torch.split(bboxes, 1, dim=1)\n    return torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=1).reshape(-1, 2)\n\n\ndef corner2bbox(corners: torch.Tensor) -> torch.Tensor:\n    \"\"\"Convert bbox coordinates from corners ((x1, y1), (x2, y1), (x1, y2),\n    (x2, y2)) to (x1, y1, x2, y2).\n\n    Args:\n        corners (Tensor): Shape (n*4, 2) for corners.\n    Returns:\n        Tensor: Shape (n, 4) for bboxes.\n    \"\"\"\n    corners = corners.reshape(-1, 4, 2)\n    min_xy = corners.min(dim=1)[0]\n    max_xy = corners.max(dim=1)[0]\n    return torch.cat([min_xy, max_xy], dim=1)\n\n\ndef bbox_project(\n    bboxes: Union[torch.Tensor, np.ndarray],\n    homography_matrix: Union[torch.Tensor, np.ndarray],\n    img_shape: Optional[Tuple[int, int]] = None\n) -> Union[torch.Tensor, np.ndarray]:\n    \"\"\"Geometric transformation for bbox.\n\n    Args:\n        bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes.\n        homography_matrix (Union[torch.Tensor, np.ndarray]):\n            Shape (3, 3) for geometric transformation.\n        img_shape (Tuple[int, int], optional): Image shape. Defaults to None.\n    Returns:\n        Union[torch.Tensor, np.ndarray]: Converted bboxes.\n    \"\"\"\n    bboxes_type = type(bboxes)\n    if bboxes_type is np.ndarray:\n        bboxes = torch.from_numpy(bboxes)\n    if isinstance(homography_matrix, np.ndarray):\n        homography_matrix = torch.from_numpy(homography_matrix)\n    corners = bbox2corner(bboxes)\n    corners = torch.cat(\n        [corners, corners.new_ones(corners.shape[0], 1)], dim=1)\n    corners = torch.matmul(homography_matrix, corners.t()).t()\n    # Convert to homogeneous coordinates by normalization\n    corners = corners[:, :2] / corners[:, 2:3]\n    bboxes = corner2bbox(corners)\n    if img_shape is not None:\n        bboxes[:, 0::2] = bboxes[:, 0::2].clamp(0, img_shape[1])\n        bboxes[:, 1::2] = bboxes[:, 1::2].clamp(0, img_shape[0])\n    if bboxes_type is np.ndarray:\n        bboxes = bboxes.numpy()\n    return bboxes\n\n\ndef cat_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n              dim: int = 0) -> Union[Tensor, BaseBoxes]:\n    \"\"\"Concatenate boxes with type of tensor or box type.\n\n    Args:\n        data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n            or box types need to be concatenated.\n            dim (int): The dimension over which the box are concatenated.\n                Defaults to 0.\n\n    Returns:\n        Union[Tensor, :obj`BaseBoxes`]: Concatenated results.\n    \"\"\"\n    if data_list and isinstance(data_list[0], BaseBoxes):\n        return data_list[0].cat(data_list, dim=dim)\n    else:\n        return torch.cat(data_list, dim=dim)\n\n\ndef stack_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n                dim: int = 0) -> Union[Tensor, BaseBoxes]:\n    \"\"\"Stack boxes with type of tensor or box type.\n\n    Args:\n        data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n            or box types need to be stacked.\n            dim (int): The dimension over which the box are stacked.\n                Defaults to 0.\n\n    Returns:\n        Union[Tensor, :obj`BaseBoxes`]: Stacked results.\n    \"\"\"\n    if data_list and isinstance(data_list[0], BaseBoxes):\n        return data_list[0].stack(data_list, dim=dim)\n    else:\n        return torch.stack(data_list, dim=dim)\n\n\ndef scale_boxes(boxes: Union[Tensor, BaseBoxes],\n                scale_factor: Tuple[float, float]) -> Union[Tensor, BaseBoxes]:\n    \"\"\"Scale boxes with type of tensor or box type.\n\n    Args:\n        boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type\n            can be a tensor or a box type.\n        scale_factor (Tuple[float, float]): factors for scaling boxes.\n            The length should be 2.\n\n    Returns:\n        Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes.\n    \"\"\"\n    if isinstance(boxes, BaseBoxes):\n        boxes.rescale_(scale_factor)\n        return boxes\n    else:\n        # Tensor boxes will be treated as horizontal boxes\n        repeat_num = int(boxes.size(-1) / 2)\n        scale_factor = boxes.new_tensor(scale_factor).repeat((1, repeat_num))\n        return boxes * scale_factor\n\n\ndef get_box_wh(boxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor, Tensor]:\n    \"\"\"Get the width and height of boxes with type of tensor or box type.\n\n    Args:\n        boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor\n            or box type.\n\n    Returns:\n        Tuple[Tensor, Tensor]: the width and height of boxes.\n    \"\"\"\n    if isinstance(boxes, BaseBoxes):\n        w = boxes.widths\n        h = boxes.heights\n    else:\n        # Tensor boxes will be treated as horizontal boxes by defaults\n        w = boxes[:, 2] - boxes[:, 0]\n        h = boxes[:, 3] - boxes[:, 1]\n    return w, h\n\n\ndef get_box_tensor(boxes: Union[Tensor, BaseBoxes]) -> Tensor:\n    \"\"\"Get tensor data from box type boxes.\n\n    Args:\n        boxes (Tensor or BaseBoxes): boxes with type of tensor or box type.\n            If its type is a tensor, the boxes will be directly returned.\n            If its type is a box type, the `boxes.tensor` will be returned.\n\n    Returns:\n        Tensor: boxes tensor.\n    \"\"\"\n    if isinstance(boxes, BaseBoxes):\n        boxes = boxes.tensor\n    return boxes\n\n\ndef empty_box_as(boxes: Union[Tensor, BaseBoxes]) -> Union[Tensor, BaseBoxes]:\n    \"\"\"Generate empty box according to input ``boxes` type and device.\n\n    Args:\n        boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor\n            or box type.\n\n    Returns:\n        Union[Tensor, BaseBoxes]: Generated empty box.\n    \"\"\"\n    if isinstance(boxes, BaseBoxes):\n        return boxes.empty_boxes()\n    else:\n        # Tensor boxes will be treated as horizontal boxes by defaults\n        return boxes.new_zeros(0, 4)\n"
  },
  {
    "path": "mmdet/structures/det_data_sample.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Optional\n\nfrom mmengine.structures import BaseDataElement, InstanceData, PixelData\n\n\nclass DetDataSample(BaseDataElement):\n    \"\"\"A data structure interface of MMDetection. They are used as interfaces\n    between different components.\n\n    The attributes in ``DetDataSample`` are divided into several parts:\n\n        - ``proposals``(InstanceData): Region proposals used in two-stage\n            detectors.\n        - ``gt_instances``(InstanceData): Ground truth of instance annotations.\n        - ``pred_instances``(InstanceData): Instances of model predictions.\n        - ``ignored_instances``(InstanceData): Instances to be ignored during\n            training/testing.\n        - ``gt_panoptic_seg``(PixelData): Ground truth of panoptic\n            segmentation.\n        - ``pred_panoptic_seg``(PixelData): Prediction of panoptic\n           segmentation.\n        - ``gt_sem_seg``(PixelData): Ground truth of semantic segmentation.\n        - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation.\n\n    Examples:\n         >>> import torch\n         >>> import numpy as np\n         >>> from mmengine.structures import InstanceData\n         >>> from mmdet.structures import DetDataSample\n\n         >>> data_sample = DetDataSample()\n         >>> img_meta = dict(img_shape=(800, 1196, 3),\n         ...                 pad_shape=(800, 1216, 3))\n         >>> gt_instances = InstanceData(metainfo=img_meta)\n         >>> gt_instances.bboxes = torch.rand((5, 4))\n         >>> gt_instances.labels = torch.rand((5,))\n         >>> data_sample.gt_instances = gt_instances\n         >>> assert 'img_shape' in data_sample.gt_instances.metainfo_keys()\n         >>> len(data_sample.gt_instances)\n         5\n         >>> print(data_sample)\n        <DetDataSample(\n\n            META INFORMATION\n\n            DATA FIELDS\n            gt_instances: <InstanceData(\n\n                    META INFORMATION\n                    pad_shape: (800, 1216, 3)\n                    img_shape: (800, 1196, 3)\n\n                    DATA FIELDS\n                    labels: tensor([0.8533, 0.1550, 0.5433, 0.7294, 0.5098])\n                    bboxes:\n                    tensor([[9.7725e-01, 5.8417e-01, 1.7269e-01, 6.5694e-01],\n                            [1.7894e-01, 5.1780e-01, 7.0590e-01, 4.8589e-01],\n                            [7.0392e-01, 6.6770e-01, 1.7520e-01, 1.4267e-01],\n                            [2.2411e-01, 5.1962e-01, 9.6953e-01, 6.6994e-01],\n                            [4.1338e-01, 2.1165e-01, 2.7239e-04, 6.8477e-01]])\n                ) at 0x7f21fb1b9190>\n        ) at 0x7f21fb1b9880>\n         >>> pred_instances = InstanceData(metainfo=img_meta)\n         >>> pred_instances.bboxes = torch.rand((5, 4))\n         >>> pred_instances.scores = torch.rand((5,))\n         >>> data_sample = DetDataSample(pred_instances=pred_instances)\n         >>> assert 'pred_instances' in data_sample\n\n         >>> data_sample = DetDataSample()\n         >>> gt_instances_data = dict(\n         ...                        bboxes=torch.rand(2, 4),\n         ...                        labels=torch.rand(2),\n         ...                        masks=np.random.rand(2, 2, 2))\n         >>> gt_instances = InstanceData(**gt_instances_data)\n         >>> data_sample.gt_instances = gt_instances\n         >>> assert 'gt_instances' in data_sample\n         >>> assert 'masks' in data_sample.gt_instances\n\n         >>> data_sample = DetDataSample()\n         >>> gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(2, 4))\n         >>> gt_panoptic_seg = PixelData(**gt_panoptic_seg_data)\n         >>> data_sample.gt_panoptic_seg = gt_panoptic_seg\n         >>> print(data_sample)\n        <DetDataSample(\n\n            META INFORMATION\n\n            DATA FIELDS\n            _gt_panoptic_seg: <BaseDataElement(\n\n                    META INFORMATION\n\n                    DATA FIELDS\n                    panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341],\n                                [0.3200, 0.7448, 0.1052, 0.5371]])\n                ) at 0x7f66c2bb7730>\n            gt_panoptic_seg: <BaseDataElement(\n\n                    META INFORMATION\n\n                    DATA FIELDS\n                    panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341],\n                                [0.3200, 0.7448, 0.1052, 0.5371]])\n                ) at 0x7f66c2bb7730>\n        ) at 0x7f66c2bb7280>\n        >>> data_sample = DetDataSample()\n        >>> gt_segm_seg_data = dict(segm_seg=torch.rand(2, 2, 2))\n        >>> gt_segm_seg = PixelData(**gt_segm_seg_data)\n        >>> data_sample.gt_segm_seg = gt_segm_seg\n        >>> assert 'gt_segm_seg' in data_sample\n        >>> assert 'segm_seg' in data_sample.gt_segm_seg\n    \"\"\"\n\n    @property\n    def proposals(self) -> InstanceData:\n        return self._proposals\n\n    @proposals.setter\n    def proposals(self, value: InstanceData):\n        self.set_field(value, '_proposals', dtype=InstanceData)\n\n    @proposals.deleter\n    def proposals(self):\n        del self._proposals\n\n    @property\n    def gt_instances(self) -> InstanceData:\n        return self._gt_instances\n\n    @gt_instances.setter\n    def gt_instances(self, value: InstanceData):\n        self.set_field(value, '_gt_instances', dtype=InstanceData)\n\n    @gt_instances.deleter\n    def gt_instances(self):\n        del self._gt_instances\n\n    @property\n    def pred_instances(self) -> InstanceData:\n        return self._pred_instances\n\n    @pred_instances.setter\n    def pred_instances(self, value: InstanceData):\n        self.set_field(value, '_pred_instances', dtype=InstanceData)\n\n    @pred_instances.deleter\n    def pred_instances(self):\n        del self._pred_instances\n\n    @property\n    def ignored_instances(self) -> InstanceData:\n        return self._ignored_instances\n\n    @ignored_instances.setter\n    def ignored_instances(self, value: InstanceData):\n        self.set_field(value, '_ignored_instances', dtype=InstanceData)\n\n    @ignored_instances.deleter\n    def ignored_instances(self):\n        del self._ignored_instances\n\n    @property\n    def gt_panoptic_seg(self) -> PixelData:\n        return self._gt_panoptic_seg\n\n    @gt_panoptic_seg.setter\n    def gt_panoptic_seg(self, value: PixelData):\n        self.set_field(value, '_gt_panoptic_seg', dtype=PixelData)\n\n    @gt_panoptic_seg.deleter\n    def gt_panoptic_seg(self):\n        del self._gt_panoptic_seg\n\n    @property\n    def pred_panoptic_seg(self) -> PixelData:\n        return self._pred_panoptic_seg\n\n    @pred_panoptic_seg.setter\n    def pred_panoptic_seg(self, value: PixelData):\n        self.set_field(value, '_pred_panoptic_seg', dtype=PixelData)\n\n    @pred_panoptic_seg.deleter\n    def pred_panoptic_seg(self):\n        del self._pred_panoptic_seg\n\n    @property\n    def gt_sem_seg(self) -> PixelData:\n        return self._gt_sem_seg\n\n    @gt_sem_seg.setter\n    def gt_sem_seg(self, value: PixelData):\n        self.set_field(value, '_gt_sem_seg', dtype=PixelData)\n\n    @gt_sem_seg.deleter\n    def gt_sem_seg(self):\n        del self._gt_sem_seg\n\n    @property\n    def pred_sem_seg(self) -> PixelData:\n        return self._pred_sem_seg\n\n    @pred_sem_seg.setter\n    def pred_sem_seg(self, value: PixelData):\n        self.set_field(value, '_pred_sem_seg', dtype=PixelData)\n\n    @pred_sem_seg.deleter\n    def pred_sem_seg(self):\n        del self._pred_sem_seg\n\n\nSampleList = List[DetDataSample]\nOptSampleList = Optional[SampleList]\n"
  },
  {
    "path": "mmdet/structures/mask/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .mask_target import mask_target\nfrom .structures import (BaseInstanceMasks, BitmapMasks, PolygonMasks,\n                         bitmap_to_polygon, polygon_to_bitmap)\nfrom .utils import encode_mask_results, mask2bbox, split_combined_polys\n\n__all__ = [\n    'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',\n    'PolygonMasks', 'encode_mask_results', 'mask2bbox', 'polygon_to_bitmap',\n    'bitmap_to_polygon'\n]\n"
  },
  {
    "path": "mmdet/structures/mask/mask_target.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair\n\n\ndef mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,\n                cfg):\n    \"\"\"Compute mask target for positive proposals in multiple images.\n\n    Args:\n        pos_proposals_list (list[Tensor]): Positive proposals in multiple\n            images, each has shape (num_pos, 4).\n        pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each\n            positive proposals, each has shape (num_pos,).\n        gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of\n            each image.\n        cfg (dict): Config dict that specifies the mask size.\n\n    Returns:\n        Tensor: Mask target of each image, has shape (num_pos, w, h).\n\n    Example:\n        >>> from mmengine.config import Config\n        >>> import mmdet\n        >>> from mmdet.data_elements.mask import BitmapMasks\n        >>> from mmdet.data_elements.mask.mask_target import *\n        >>> H, W = 17, 18\n        >>> cfg = Config({'mask_size': (13, 14)})\n        >>> rng = np.random.RandomState(0)\n        >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image\n        >>> pos_proposals_list = [\n        >>>     torch.Tensor([\n        >>>         [ 7.2425,  5.5929, 13.9414, 14.9541],\n        >>>         [ 7.3241,  3.6170, 16.3850, 15.3102],\n        >>>     ]),\n        >>>     torch.Tensor([\n        >>>         [ 4.8448, 6.4010, 7.0314, 9.7681],\n        >>>         [ 5.9790, 2.6989, 7.4416, 4.8580],\n        >>>         [ 0.0000, 0.0000, 0.1398, 9.8232],\n        >>>     ]),\n        >>> ]\n        >>> # Corresponding class index for each proposal for each image\n        >>> pos_assigned_gt_inds_list = [\n        >>>     torch.LongTensor([7, 0]),\n        >>>     torch.LongTensor([5, 4, 1]),\n        >>> ]\n        >>> # Ground truth mask for each true object for each image\n        >>> gt_masks_list = [\n        >>>     BitmapMasks(rng.rand(8, H, W), height=H, width=W),\n        >>>     BitmapMasks(rng.rand(6, H, W), height=H, width=W),\n        >>> ]\n        >>> mask_targets = mask_target(\n        >>>     pos_proposals_list, pos_assigned_gt_inds_list,\n        >>>     gt_masks_list, cfg)\n        >>> assert mask_targets.shape == (5,) + cfg['mask_size']\n    \"\"\"\n    cfg_list = [cfg for _ in range(len(pos_proposals_list))]\n    mask_targets = map(mask_target_single, pos_proposals_list,\n                       pos_assigned_gt_inds_list, gt_masks_list, cfg_list)\n    mask_targets = list(mask_targets)\n    if len(mask_targets) > 0:\n        mask_targets = torch.cat(mask_targets)\n    return mask_targets\n\n\ndef mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):\n    \"\"\"Compute mask target for each positive proposal in the image.\n\n    Args:\n        pos_proposals (Tensor): Positive proposals.\n        pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.\n        gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap\n            or Polygon.\n        cfg (dict): Config dict that indicate the mask size.\n\n    Returns:\n        Tensor: Mask target of each positive proposals in the image.\n\n    Example:\n        >>> from mmengine.config import Config\n        >>> import mmdet\n        >>> from mmdet.data_elements.mask import BitmapMasks\n        >>> from mmdet.data_elements.mask.mask_target import *  # NOQA\n        >>> H, W = 32, 32\n        >>> cfg = Config({'mask_size': (7, 11)})\n        >>> rng = np.random.RandomState(0)\n        >>> # Masks for each ground truth box (relative to the image)\n        >>> gt_masks_data = rng.rand(3, H, W)\n        >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)\n        >>> # Predicted positive boxes in one image\n        >>> pos_proposals = torch.FloatTensor([\n        >>>     [ 16.2,   5.5, 19.9, 20.9],\n        >>>     [ 17.3,  13.6, 19.3, 19.3],\n        >>>     [ 14.8,  16.4, 17.0, 23.7],\n        >>>     [  0.0,   0.0, 16.0, 16.0],\n        >>>     [  4.0,   0.0, 20.0, 16.0],\n        >>> ])\n        >>> # For each predicted proposal, its assignment to a gt mask\n        >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])\n        >>> mask_targets = mask_target_single(\n        >>>     pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)\n        >>> assert mask_targets.shape == (5,) + cfg['mask_size']\n    \"\"\"\n    device = pos_proposals.device\n    mask_size = _pair(cfg.mask_size)\n    binarize = not cfg.get('soft_mask_target', False)\n    num_pos = pos_proposals.size(0)\n    if num_pos > 0:\n        proposals_np = pos_proposals.cpu().numpy()\n        maxh, maxw = gt_masks.height, gt_masks.width\n        proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)\n        proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)\n        pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()\n\n        mask_targets = gt_masks.crop_and_resize(\n            proposals_np,\n            mask_size,\n            device=device,\n            inds=pos_assigned_gt_inds,\n            binarize=binarize).to_ndarray()\n\n        mask_targets = torch.from_numpy(mask_targets).float().to(device)\n    else:\n        mask_targets = pos_proposals.new_zeros((0, ) + mask_size)\n\n    return mask_targets\n"
  },
  {
    "path": "mmdet/structures/mask/structures.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Sequence, Type, TypeVar\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nimport torch\nfrom mmcv.ops.roi_align import roi_align\n\nT = TypeVar('T')\n\n\nclass BaseInstanceMasks(metaclass=ABCMeta):\n    \"\"\"Base class for instance masks.\"\"\"\n\n    @abstractmethod\n    def rescale(self, scale, interpolation='nearest'):\n        \"\"\"Rescale masks as large as possible while keeping the aspect ratio.\n        For details can refer to `mmcv.imrescale`.\n\n        Args:\n            scale (tuple[int]): The maximum size (h, w) of rescaled mask.\n            interpolation (str): Same as :func:`mmcv.imrescale`.\n\n        Returns:\n            BaseInstanceMasks: The rescaled masks.\n        \"\"\"\n\n    @abstractmethod\n    def resize(self, out_shape, interpolation='nearest'):\n        \"\"\"Resize masks to the given out_shape.\n\n        Args:\n            out_shape: Target (h, w) of resized mask.\n            interpolation (str): See :func:`mmcv.imresize`.\n\n        Returns:\n            BaseInstanceMasks: The resized masks.\n        \"\"\"\n\n    @abstractmethod\n    def flip(self, flip_direction='horizontal'):\n        \"\"\"Flip masks alone the given direction.\n\n        Args:\n            flip_direction (str): Either 'horizontal' or 'vertical'.\n\n        Returns:\n            BaseInstanceMasks: The flipped masks.\n        \"\"\"\n\n    @abstractmethod\n    def pad(self, out_shape, pad_val):\n        \"\"\"Pad masks to the given size of (h, w).\n\n        Args:\n            out_shape (tuple[int]): Target (h, w) of padded mask.\n            pad_val (int): The padded value.\n\n        Returns:\n            BaseInstanceMasks: The padded masks.\n        \"\"\"\n\n    @abstractmethod\n    def crop(self, bbox):\n        \"\"\"Crop each mask by the given bbox.\n\n        Args:\n            bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).\n\n        Return:\n            BaseInstanceMasks: The cropped masks.\n        \"\"\"\n\n    @abstractmethod\n    def crop_and_resize(self,\n                        bboxes,\n                        out_shape,\n                        inds,\n                        device,\n                        interpolation='bilinear',\n                        binarize=True):\n        \"\"\"Crop and resize masks by the given bboxes.\n\n        This function is mainly used in mask targets computation.\n        It firstly align mask to bboxes by assigned_inds, then crop mask by the\n        assigned bbox and resize to the size of (mask_h, mask_w)\n\n        Args:\n            bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)\n            out_shape (tuple[int]): Target (h, w) of resized mask\n            inds (ndarray): Indexes to assign masks to each bbox,\n                shape (N,) and values should be between [0, num_masks - 1].\n            device (str): Device of bboxes\n            interpolation (str): See `mmcv.imresize`\n            binarize (bool): if True fractional values are rounded to 0 or 1\n                after the resize operation. if False and unsupported an error\n                will be raised. Defaults to True.\n\n        Return:\n            BaseInstanceMasks: the cropped and resized masks.\n        \"\"\"\n\n    @abstractmethod\n    def expand(self, expanded_h, expanded_w, top, left):\n        \"\"\"see :class:`Expand`.\"\"\"\n\n    @property\n    @abstractmethod\n    def areas(self):\n        \"\"\"ndarray: areas of each instance.\"\"\"\n\n    @abstractmethod\n    def to_ndarray(self):\n        \"\"\"Convert masks to the format of ndarray.\n\n        Return:\n            ndarray: Converted masks in the format of ndarray.\n        \"\"\"\n\n    @abstractmethod\n    def to_tensor(self, dtype, device):\n        \"\"\"Convert masks to the format of Tensor.\n\n        Args:\n            dtype (str): Dtype of converted mask.\n            device (torch.device): Device of converted masks.\n\n        Returns:\n            Tensor: Converted masks in the format of Tensor.\n        \"\"\"\n\n    @abstractmethod\n    def translate(self,\n                  out_shape,\n                  offset,\n                  direction='horizontal',\n                  border_value=0,\n                  interpolation='bilinear'):\n        \"\"\"Translate the masks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            offset (int | float): The offset for translate.\n            direction (str): The translate direction, either \"horizontal\"\n                or \"vertical\".\n            border_value (int | float): Border value. Default 0.\n            interpolation (str): Same as :func:`mmcv.imtranslate`.\n\n        Returns:\n            Translated masks.\n        \"\"\"\n\n    def shear(self,\n              out_shape,\n              magnitude,\n              direction='horizontal',\n              border_value=0,\n              interpolation='bilinear'):\n        \"\"\"Shear the masks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            magnitude (int | float): The magnitude used for shear.\n            direction (str): The shear direction, either \"horizontal\"\n                or \"vertical\".\n            border_value (int | tuple[int]): Value used in case of a\n                constant border. Default 0.\n            interpolation (str): Same as in :func:`mmcv.imshear`.\n\n        Returns:\n            ndarray: Sheared masks.\n        \"\"\"\n\n    @abstractmethod\n    def rotate(self, out_shape, angle, center=None, scale=1.0, border_value=0):\n        \"\"\"Rotate the masks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            angle (int | float): Rotation angle in degrees. Positive values\n                mean counter-clockwise rotation.\n            center (tuple[float], optional): Center point (w, h) of the\n                rotation in source image. If not specified, the center of\n                the image will be used.\n            scale (int | float): Isotropic scale factor.\n            border_value (int | float): Border value. Default 0 for masks.\n\n        Returns:\n            Rotated masks.\n        \"\"\"\n\n    def get_bboxes(self, dst_type='hbb'):\n        \"\"\"Get the certain type boxes from masks.\n\n        Please refer to ``mmdet.structures.bbox.box_type`` for more details of\n        the box type.\n\n        Args:\n            dst_type: Destination box type.\n\n        Returns:\n            :obj:`BaseBoxes`: Certain type boxes.\n        \"\"\"\n        from ..bbox import get_box_type\n        _, box_type_cls = get_box_type(dst_type)\n        return box_type_cls.from_instance_masks(self)\n\n    @classmethod\n    @abstractmethod\n    def cat(cls: Type[T], masks: Sequence[T]) -> T:\n        \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n        Args:\n            masks (Sequence[T]): A sequence of mask instances.\n\n        Returns:\n            T: Concatenated mask instance.\n        \"\"\"\n\n\nclass BitmapMasks(BaseInstanceMasks):\n    \"\"\"This class represents masks in the form of bitmaps.\n\n    Args:\n        masks (ndarray): ndarray of masks in shape (N, H, W), where N is\n            the number of objects.\n        height (int): height of masks\n        width (int): width of masks\n\n    Example:\n        >>> from mmdet.data_elements.mask.structures import *  # NOQA\n        >>> num_masks, H, W = 3, 32, 32\n        >>> rng = np.random.RandomState(0)\n        >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int64)\n        >>> self = BitmapMasks(masks, height=H, width=W)\n\n        >>> # demo crop_and_resize\n        >>> num_boxes = 5\n        >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n        >>> out_shape = (14, 14)\n        >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n        >>> device = 'cpu'\n        >>> interpolation = 'bilinear'\n        >>> new = self.crop_and_resize(\n        ...     bboxes, out_shape, inds, device, interpolation)\n        >>> assert len(new) == num_boxes\n        >>> assert new.height, new.width == out_shape\n    \"\"\"\n\n    def __init__(self, masks, height, width):\n        self.height = height\n        self.width = width\n        if len(masks) == 0:\n            self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)\n        else:\n            assert isinstance(masks, (list, np.ndarray))\n            if isinstance(masks, list):\n                assert isinstance(masks[0], np.ndarray)\n                assert masks[0].ndim == 2  # (H, W)\n            else:\n                assert masks.ndim == 3  # (N, H, W)\n\n            self.masks = np.stack(masks).reshape(-1, height, width)\n            assert self.masks.shape[1] == self.height\n            assert self.masks.shape[2] == self.width\n\n    def __getitem__(self, index):\n        \"\"\"Index the BitmapMask.\n\n        Args:\n            index (int | ndarray): Indices in the format of integer or ndarray.\n\n        Returns:\n            :obj:`BitmapMasks`: Indexed bitmap masks.\n        \"\"\"\n        masks = self.masks[index].reshape(-1, self.height, self.width)\n        return BitmapMasks(masks, self.height, self.width)\n\n    def __iter__(self):\n        return iter(self.masks)\n\n    def __repr__(self):\n        s = self.__class__.__name__ + '('\n        s += f'num_masks={len(self.masks)}, '\n        s += f'height={self.height}, '\n        s += f'width={self.width})'\n        return s\n\n    def __len__(self):\n        \"\"\"Number of masks.\"\"\"\n        return len(self.masks)\n\n    def rescale(self, scale, interpolation='nearest'):\n        \"\"\"See :func:`BaseInstanceMasks.rescale`.\"\"\"\n        if len(self.masks) == 0:\n            new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n            rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)\n        else:\n            rescaled_masks = np.stack([\n                mmcv.imrescale(mask, scale, interpolation=interpolation)\n                for mask in self.masks\n            ])\n        height, width = rescaled_masks.shape[1:]\n        return BitmapMasks(rescaled_masks, height, width)\n\n    def resize(self, out_shape, interpolation='nearest'):\n        \"\"\"See :func:`BaseInstanceMasks.resize`.\"\"\"\n        if len(self.masks) == 0:\n            resized_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            resized_masks = np.stack([\n                mmcv.imresize(\n                    mask, out_shape[::-1], interpolation=interpolation)\n                for mask in self.masks\n            ])\n        return BitmapMasks(resized_masks, *out_shape)\n\n    def flip(self, flip_direction='horizontal'):\n        \"\"\"See :func:`BaseInstanceMasks.flip`.\"\"\"\n        assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n\n        if len(self.masks) == 0:\n            flipped_masks = self.masks\n        else:\n            flipped_masks = np.stack([\n                mmcv.imflip(mask, direction=flip_direction)\n                for mask in self.masks\n            ])\n        return BitmapMasks(flipped_masks, self.height, self.width)\n\n    def pad(self, out_shape, pad_val=0):\n        \"\"\"See :func:`BaseInstanceMasks.pad`.\"\"\"\n        if len(self.masks) == 0:\n            padded_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            padded_masks = np.stack([\n                mmcv.impad(mask, shape=out_shape, pad_val=pad_val)\n                for mask in self.masks\n            ])\n        return BitmapMasks(padded_masks, *out_shape)\n\n    def crop(self, bbox):\n        \"\"\"See :func:`BaseInstanceMasks.crop`.\"\"\"\n        assert isinstance(bbox, np.ndarray)\n        assert bbox.ndim == 1\n\n        # clip the boundary\n        bbox = bbox.copy()\n        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n        x1, y1, x2, y2 = bbox\n        w = np.maximum(x2 - x1, 1)\n        h = np.maximum(y2 - y1, 1)\n\n        if len(self.masks) == 0:\n            cropped_masks = np.empty((0, h, w), dtype=np.uint8)\n        else:\n            cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]\n        return BitmapMasks(cropped_masks, h, w)\n\n    def crop_and_resize(self,\n                        bboxes,\n                        out_shape,\n                        inds,\n                        device='cpu',\n                        interpolation='bilinear',\n                        binarize=True):\n        \"\"\"See :func:`BaseInstanceMasks.crop_and_resize`.\"\"\"\n        if len(self.masks) == 0:\n            empty_masks = np.empty((0, *out_shape), dtype=np.uint8)\n            return BitmapMasks(empty_masks, *out_shape)\n\n        # convert bboxes to tensor\n        if isinstance(bboxes, np.ndarray):\n            bboxes = torch.from_numpy(bboxes).to(device=device)\n        if isinstance(inds, np.ndarray):\n            inds = torch.from_numpy(inds).to(device=device)\n\n        num_bbox = bboxes.shape[0]\n        fake_inds = torch.arange(\n            num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]\n        rois = torch.cat([fake_inds, bboxes], dim=1)  # Nx5\n        rois = rois.to(device=device)\n        if num_bbox > 0:\n            gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(\n                0, inds).to(dtype=rois.dtype)\n            targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,\n                                1.0, 0, 'avg', True).squeeze(1)\n            if binarize:\n                resized_masks = (targets >= 0.5).cpu().numpy()\n            else:\n                resized_masks = targets.cpu().numpy()\n        else:\n            resized_masks = []\n        return BitmapMasks(resized_masks, *out_shape)\n\n    def expand(self, expanded_h, expanded_w, top, left):\n        \"\"\"See :func:`BaseInstanceMasks.expand`.\"\"\"\n        if len(self.masks) == 0:\n            expanded_mask = np.empty((0, expanded_h, expanded_w),\n                                     dtype=np.uint8)\n        else:\n            expanded_mask = np.zeros((len(self), expanded_h, expanded_w),\n                                     dtype=np.uint8)\n            expanded_mask[:, top:top + self.height,\n                          left:left + self.width] = self.masks\n        return BitmapMasks(expanded_mask, expanded_h, expanded_w)\n\n    def translate(self,\n                  out_shape,\n                  offset,\n                  direction='horizontal',\n                  border_value=0,\n                  interpolation='bilinear'):\n        \"\"\"Translate the BitmapMasks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            offset (int | float): The offset for translate.\n            direction (str): The translate direction, either \"horizontal\"\n                or \"vertical\".\n            border_value (int | float): Border value. Default 0 for masks.\n            interpolation (str): Same as :func:`mmcv.imtranslate`.\n\n        Returns:\n            BitmapMasks: Translated BitmapMasks.\n\n        Example:\n            >>> from mmdet.data_elements.mask.structures import BitmapMasks\n            >>> self = BitmapMasks.random(dtype=np.uint8)\n            >>> out_shape = (32, 32)\n            >>> offset = 4\n            >>> direction = 'horizontal'\n            >>> border_value = 0\n            >>> interpolation = 'bilinear'\n            >>> # Note, There seem to be issues when:\n            >>> # * the mask dtype is not supported by cv2.AffineWarp\n            >>> new = self.translate(out_shape, offset, direction,\n            >>>                      border_value, interpolation)\n            >>> assert len(new) == len(self)\n            >>> assert new.height, new.width == out_shape\n        \"\"\"\n        if len(self.masks) == 0:\n            translated_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            masks = self.masks\n            if masks.shape[-2:] != out_shape:\n                empty_masks = np.zeros((masks.shape[0], *out_shape),\n                                       dtype=masks.dtype)\n                min_h = min(out_shape[0], masks.shape[1])\n                min_w = min(out_shape[1], masks.shape[2])\n                empty_masks[:, :min_h, :min_w] = masks[:, :min_h, :min_w]\n                masks = empty_masks\n            translated_masks = mmcv.imtranslate(\n                masks.transpose((1, 2, 0)),\n                offset,\n                direction,\n                border_value=border_value,\n                interpolation=interpolation)\n            if translated_masks.ndim == 2:\n                translated_masks = translated_masks[:, :, None]\n            translated_masks = translated_masks.transpose(\n                (2, 0, 1)).astype(self.masks.dtype)\n        return BitmapMasks(translated_masks, *out_shape)\n\n    def shear(self,\n              out_shape,\n              magnitude,\n              direction='horizontal',\n              border_value=0,\n              interpolation='bilinear'):\n        \"\"\"Shear the BitmapMasks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            magnitude (int | float): The magnitude used for shear.\n            direction (str): The shear direction, either \"horizontal\"\n                or \"vertical\".\n            border_value (int | tuple[int]): Value used in case of a\n                constant border.\n            interpolation (str): Same as in :func:`mmcv.imshear`.\n\n        Returns:\n            BitmapMasks: The sheared masks.\n        \"\"\"\n        if len(self.masks) == 0:\n            sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            sheared_masks = mmcv.imshear(\n                self.masks.transpose((1, 2, 0)),\n                magnitude,\n                direction,\n                border_value=border_value,\n                interpolation=interpolation)\n            if sheared_masks.ndim == 2:\n                sheared_masks = sheared_masks[:, :, None]\n            sheared_masks = sheared_masks.transpose(\n                (2, 0, 1)).astype(self.masks.dtype)\n        return BitmapMasks(sheared_masks, *out_shape)\n\n    def rotate(self,\n               out_shape,\n               angle,\n               center=None,\n               scale=1.0,\n               border_value=0,\n               interpolation='bilinear'):\n        \"\"\"Rotate the BitmapMasks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            angle (int | float): Rotation angle in degrees. Positive values\n                mean counter-clockwise rotation.\n            center (tuple[float], optional): Center point (w, h) of the\n                rotation in source image. If not specified, the center of\n                the image will be used.\n            scale (int | float): Isotropic scale factor.\n            border_value (int | float): Border value. Default 0 for masks.\n            interpolation (str): Same as in :func:`mmcv.imrotate`.\n\n        Returns:\n            BitmapMasks: Rotated BitmapMasks.\n        \"\"\"\n        if len(self.masks) == 0:\n            rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)\n        else:\n            rotated_masks = mmcv.imrotate(\n                self.masks.transpose((1, 2, 0)),\n                angle,\n                center=center,\n                scale=scale,\n                border_value=border_value,\n                interpolation=interpolation)\n            if rotated_masks.ndim == 2:\n                # case when only one mask, (h, w)\n                rotated_masks = rotated_masks[:, :, None]  # (h, w, 1)\n            rotated_masks = rotated_masks.transpose(\n                (2, 0, 1)).astype(self.masks.dtype)\n        return BitmapMasks(rotated_masks, *out_shape)\n\n    @property\n    def areas(self):\n        \"\"\"See :py:attr:`BaseInstanceMasks.areas`.\"\"\"\n        return self.masks.sum((1, 2))\n\n    def to_ndarray(self):\n        \"\"\"See :func:`BaseInstanceMasks.to_ndarray`.\"\"\"\n        return self.masks\n\n    def to_tensor(self, dtype, device):\n        \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n        return torch.tensor(self.masks, dtype=dtype, device=device)\n\n    @classmethod\n    def random(cls,\n               num_masks=3,\n               height=32,\n               width=32,\n               dtype=np.uint8,\n               rng=None):\n        \"\"\"Generate random bitmap masks for demo / testing purposes.\n\n        Example:\n            >>> from mmdet.data_elements.mask.structures import BitmapMasks\n            >>> self = BitmapMasks.random()\n            >>> print('self = {}'.format(self))\n            self = BitmapMasks(num_masks=3, height=32, width=32)\n        \"\"\"\n        from mmdet.utils.util_random import ensure_rng\n        rng = ensure_rng(rng)\n        masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)\n        self = cls(masks, height=height, width=width)\n        return self\n\n    @classmethod\n    def cat(cls: Type[T], masks: Sequence[T]) -> T:\n        \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n        Args:\n            masks (Sequence[BitmapMasks]): A sequence of mask instances.\n\n        Returns:\n            BitmapMasks: Concatenated mask instance.\n        \"\"\"\n        assert isinstance(masks, Sequence)\n        if len(masks) == 0:\n            raise ValueError('masks should not be an empty list.')\n        assert all(isinstance(m, cls) for m in masks)\n\n        mask_array = np.concatenate([m.masks for m in masks], axis=0)\n        return cls(mask_array, *mask_array.shape[1:])\n\n\nclass PolygonMasks(BaseInstanceMasks):\n    \"\"\"This class represents masks in the form of polygons.\n\n    Polygons is a list of three levels. The first level of the list\n    corresponds to objects, the second level to the polys that compose the\n    object, the third level to the poly coordinates\n\n    Args:\n        masks (list[list[ndarray]]): The first level of the list\n            corresponds to objects, the second level to the polys that\n            compose the object, the third level to the poly coordinates\n        height (int): height of masks\n        width (int): width of masks\n\n    Example:\n        >>> from mmdet.data_elements.mask.structures import *  # NOQA\n        >>> masks = [\n        >>>     [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]\n        >>> ]\n        >>> height, width = 16, 16\n        >>> self = PolygonMasks(masks, height, width)\n\n        >>> # demo translate\n        >>> new = self.translate((16, 16), 4., direction='horizontal')\n        >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])\n        >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)\n\n        >>> # demo crop_and_resize\n        >>> num_boxes = 3\n        >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n        >>> out_shape = (16, 16)\n        >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n        >>> device = 'cpu'\n        >>> interpolation = 'bilinear'\n        >>> new = self.crop_and_resize(\n        ...     bboxes, out_shape, inds, device, interpolation)\n        >>> assert len(new) == num_boxes\n        >>> assert new.height, new.width == out_shape\n    \"\"\"\n\n    def __init__(self, masks, height, width):\n        assert isinstance(masks, list)\n        if len(masks) > 0:\n            assert isinstance(masks[0], list)\n            assert isinstance(masks[0][0], np.ndarray)\n\n        self.height = height\n        self.width = width\n        self.masks = masks\n\n    def __getitem__(self, index):\n        \"\"\"Index the polygon masks.\n\n        Args:\n            index (ndarray | List): The indices.\n\n        Returns:\n            :obj:`PolygonMasks`: The indexed polygon masks.\n        \"\"\"\n        if isinstance(index, np.ndarray):\n            if index.dtype == bool:\n                index = np.where(index)[0].tolist()\n            else:\n                index = index.tolist()\n        if isinstance(index, list):\n            masks = [self.masks[i] for i in index]\n        else:\n            try:\n                masks = self.masks[index]\n            except Exception:\n                raise ValueError(\n                    f'Unsupported input of type {type(index)} for indexing!')\n        if len(masks) and isinstance(masks[0], np.ndarray):\n            masks = [masks]  # ensure a list of three levels\n        return PolygonMasks(masks, self.height, self.width)\n\n    def __iter__(self):\n        return iter(self.masks)\n\n    def __repr__(self):\n        s = self.__class__.__name__ + '('\n        s += f'num_masks={len(self.masks)}, '\n        s += f'height={self.height}, '\n        s += f'width={self.width})'\n        return s\n\n    def __len__(self):\n        \"\"\"Number of masks.\"\"\"\n        return len(self.masks)\n\n    def rescale(self, scale, interpolation=None):\n        \"\"\"see :func:`BaseInstanceMasks.rescale`\"\"\"\n        new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n        if len(self.masks) == 0:\n            rescaled_masks = PolygonMasks([], new_h, new_w)\n        else:\n            rescaled_masks = self.resize((new_h, new_w))\n        return rescaled_masks\n\n    def resize(self, out_shape, interpolation=None):\n        \"\"\"see :func:`BaseInstanceMasks.resize`\"\"\"\n        if len(self.masks) == 0:\n            resized_masks = PolygonMasks([], *out_shape)\n        else:\n            h_scale = out_shape[0] / self.height\n            w_scale = out_shape[1] / self.width\n            resized_masks = []\n            for poly_per_obj in self.masks:\n                resized_poly = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    p[0::2] = p[0::2] * w_scale\n                    p[1::2] = p[1::2] * h_scale\n                    resized_poly.append(p)\n                resized_masks.append(resized_poly)\n            resized_masks = PolygonMasks(resized_masks, *out_shape)\n        return resized_masks\n\n    def flip(self, flip_direction='horizontal'):\n        \"\"\"see :func:`BaseInstanceMasks.flip`\"\"\"\n        assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n        if len(self.masks) == 0:\n            flipped_masks = PolygonMasks([], self.height, self.width)\n        else:\n            flipped_masks = []\n            for poly_per_obj in self.masks:\n                flipped_poly_per_obj = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    if flip_direction == 'horizontal':\n                        p[0::2] = self.width - p[0::2]\n                    elif flip_direction == 'vertical':\n                        p[1::2] = self.height - p[1::2]\n                    else:\n                        p[0::2] = self.width - p[0::2]\n                        p[1::2] = self.height - p[1::2]\n                    flipped_poly_per_obj.append(p)\n                flipped_masks.append(flipped_poly_per_obj)\n            flipped_masks = PolygonMasks(flipped_masks, self.height,\n                                         self.width)\n        return flipped_masks\n\n    def crop(self, bbox):\n        \"\"\"see :func:`BaseInstanceMasks.crop`\"\"\"\n        assert isinstance(bbox, np.ndarray)\n        assert bbox.ndim == 1\n\n        # clip the boundary\n        bbox = bbox.copy()\n        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n        x1, y1, x2, y2 = bbox\n        w = np.maximum(x2 - x1, 1)\n        h = np.maximum(y2 - y1, 1)\n\n        if len(self.masks) == 0:\n            cropped_masks = PolygonMasks([], h, w)\n        else:\n            cropped_masks = []\n            for poly_per_obj in self.masks:\n                cropped_poly_per_obj = []\n                for p in poly_per_obj:\n                    # pycocotools will clip the boundary\n                    p = p.copy()\n                    p[0::2] = p[0::2] - bbox[0]\n                    p[1::2] = p[1::2] - bbox[1]\n                    cropped_poly_per_obj.append(p)\n                cropped_masks.append(cropped_poly_per_obj)\n            cropped_masks = PolygonMasks(cropped_masks, h, w)\n        return cropped_masks\n\n    def pad(self, out_shape, pad_val=0):\n        \"\"\"padding has no effect on polygons`\"\"\"\n        return PolygonMasks(self.masks, *out_shape)\n\n    def expand(self, *args, **kwargs):\n        \"\"\"TODO: Add expand for polygon\"\"\"\n        raise NotImplementedError\n\n    def crop_and_resize(self,\n                        bboxes,\n                        out_shape,\n                        inds,\n                        device='cpu',\n                        interpolation='bilinear',\n                        binarize=True):\n        \"\"\"see :func:`BaseInstanceMasks.crop_and_resize`\"\"\"\n        out_h, out_w = out_shape\n        if len(self.masks) == 0:\n            return PolygonMasks([], out_h, out_w)\n\n        if not binarize:\n            raise ValueError('Polygons are always binary, '\n                             'setting binarize=False is unsupported')\n\n        resized_masks = []\n        for i in range(len(bboxes)):\n            mask = self.masks[inds[i]]\n            bbox = bboxes[i, :]\n            x1, y1, x2, y2 = bbox\n            w = np.maximum(x2 - x1, 1)\n            h = np.maximum(y2 - y1, 1)\n            h_scale = out_h / max(h, 0.1)  # avoid too large scale\n            w_scale = out_w / max(w, 0.1)\n\n            resized_mask = []\n            for p in mask:\n                p = p.copy()\n                # crop\n                # pycocotools will clip the boundary\n                p[0::2] = p[0::2] - bbox[0]\n                p[1::2] = p[1::2] - bbox[1]\n\n                # resize\n                p[0::2] = p[0::2] * w_scale\n                p[1::2] = p[1::2] * h_scale\n                resized_mask.append(p)\n            resized_masks.append(resized_mask)\n        return PolygonMasks(resized_masks, *out_shape)\n\n    def translate(self,\n                  out_shape,\n                  offset,\n                  direction='horizontal',\n                  border_value=None,\n                  interpolation=None):\n        \"\"\"Translate the PolygonMasks.\n\n        Example:\n            >>> self = PolygonMasks.random(dtype=np.int64)\n            >>> out_shape = (self.height, self.width)\n            >>> new = self.translate(out_shape, 4., direction='horizontal')\n            >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])\n            >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4)  # noqa: E501\n        \"\"\"\n        assert border_value is None or border_value == 0, \\\n            'Here border_value is not '\\\n            f'used, and defaultly should be None or 0. got {border_value}.'\n        if len(self.masks) == 0:\n            translated_masks = PolygonMasks([], *out_shape)\n        else:\n            translated_masks = []\n            for poly_per_obj in self.masks:\n                translated_poly_per_obj = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    if direction == 'horizontal':\n                        p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])\n                    elif direction == 'vertical':\n                        p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])\n                    translated_poly_per_obj.append(p)\n                translated_masks.append(translated_poly_per_obj)\n            translated_masks = PolygonMasks(translated_masks, *out_shape)\n        return translated_masks\n\n    def shear(self,\n              out_shape,\n              magnitude,\n              direction='horizontal',\n              border_value=0,\n              interpolation='bilinear'):\n        \"\"\"See :func:`BaseInstanceMasks.shear`.\"\"\"\n        if len(self.masks) == 0:\n            sheared_masks = PolygonMasks([], *out_shape)\n        else:\n            sheared_masks = []\n            if direction == 'horizontal':\n                shear_matrix = np.stack([[1, magnitude],\n                                         [0, 1]]).astype(np.float32)\n            elif direction == 'vertical':\n                shear_matrix = np.stack([[1, 0], [magnitude,\n                                                  1]]).astype(np.float32)\n            for poly_per_obj in self.masks:\n                sheared_poly = []\n                for p in poly_per_obj:\n                    p = np.stack([p[0::2], p[1::2]], axis=0)  # [2, n]\n                    new_coords = np.matmul(shear_matrix, p)  # [2, n]\n                    new_coords[0, :] = np.clip(new_coords[0, :], 0,\n                                               out_shape[1])\n                    new_coords[1, :] = np.clip(new_coords[1, :], 0,\n                                               out_shape[0])\n                    sheared_poly.append(\n                        new_coords.transpose((1, 0)).reshape(-1))\n                sheared_masks.append(sheared_poly)\n            sheared_masks = PolygonMasks(sheared_masks, *out_shape)\n        return sheared_masks\n\n    def rotate(self,\n               out_shape,\n               angle,\n               center=None,\n               scale=1.0,\n               border_value=0,\n               interpolation='bilinear'):\n        \"\"\"See :func:`BaseInstanceMasks.rotate`.\"\"\"\n        if len(self.masks) == 0:\n            rotated_masks = PolygonMasks([], *out_shape)\n        else:\n            rotated_masks = []\n            rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)\n            for poly_per_obj in self.masks:\n                rotated_poly = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    coords = np.stack([p[0::2], p[1::2]], axis=1)  # [n, 2]\n                    # pad 1 to convert from format [x, y] to homogeneous\n                    # coordinates format [x, y, 1]\n                    coords = np.concatenate(\n                        (coords, np.ones((coords.shape[0], 1), coords.dtype)),\n                        axis=1)  # [n, 3]\n                    rotated_coords = np.matmul(\n                        rotate_matrix[None, :, :],\n                        coords[:, :, None])[..., 0]  # [n, 2, 1] -> [n, 2]\n                    rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,\n                                                   out_shape[1])\n                    rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,\n                                                   out_shape[0])\n                    rotated_poly.append(rotated_coords.reshape(-1))\n                rotated_masks.append(rotated_poly)\n            rotated_masks = PolygonMasks(rotated_masks, *out_shape)\n        return rotated_masks\n\n    def to_bitmap(self):\n        \"\"\"convert polygon masks to bitmap masks.\"\"\"\n        bitmap_masks = self.to_ndarray()\n        return BitmapMasks(bitmap_masks, self.height, self.width)\n\n    @property\n    def areas(self):\n        \"\"\"Compute areas of masks.\n\n        This func is modified from `detectron2\n        <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.\n        The function only works with Polygons using the shoelace formula.\n\n        Return:\n            ndarray: areas of each instance\n        \"\"\"  # noqa: W501\n        area = []\n        for polygons_per_obj in self.masks:\n            area_per_obj = 0\n            for p in polygons_per_obj:\n                area_per_obj += self._polygon_area(p[0::2], p[1::2])\n            area.append(area_per_obj)\n        return np.asarray(area)\n\n    def _polygon_area(self, x, y):\n        \"\"\"Compute the area of a component of a polygon.\n\n        Using the shoelace formula:\n        https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n        Args:\n            x (ndarray): x coordinates of the component\n            y (ndarray): y coordinates of the component\n\n        Return:\n            float: the are of the component\n        \"\"\"  # noqa: 501\n        return 0.5 * np.abs(\n            np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n    def to_ndarray(self):\n        \"\"\"Convert masks to the format of ndarray.\"\"\"\n        if len(self.masks) == 0:\n            return np.empty((0, self.height, self.width), dtype=np.uint8)\n        bitmap_masks = []\n        for poly_per_obj in self.masks:\n            bitmap_masks.append(\n                polygon_to_bitmap(poly_per_obj, self.height, self.width))\n        return np.stack(bitmap_masks)\n\n    def to_tensor(self, dtype, device):\n        \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n        if len(self.masks) == 0:\n            return torch.empty((0, self.height, self.width),\n                               dtype=dtype,\n                               device=device)\n        ndarray_masks = self.to_ndarray()\n        return torch.tensor(ndarray_masks, dtype=dtype, device=device)\n\n    @classmethod\n    def random(cls,\n               num_masks=3,\n               height=32,\n               width=32,\n               n_verts=5,\n               dtype=np.float32,\n               rng=None):\n        \"\"\"Generate random polygon masks for demo / testing purposes.\n\n        Adapted from [1]_\n\n        References:\n            .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379  # noqa: E501\n\n        Example:\n            >>> from mmdet.data_elements.mask.structures import PolygonMasks\n            >>> self = PolygonMasks.random()\n            >>> print('self = {}'.format(self))\n        \"\"\"\n        from mmdet.utils.util_random import ensure_rng\n        rng = ensure_rng(rng)\n\n        def _gen_polygon(n, irregularity, spikeyness):\n            \"\"\"Creates the polygon by sampling points on a circle around the\n            centre.  Random noise is added by varying the angular spacing\n            between sequential points, and by varying the radial distance of\n            each point from the centre.\n\n            Based on original code by Mike Ounsworth\n\n            Args:\n                n (int): number of vertices\n                irregularity (float): [0,1] indicating how much variance there\n                    is in the angular spacing of vertices. [0,1] will map to\n                    [0, 2pi/numberOfVerts]\n                spikeyness (float): [0,1] indicating how much variance there is\n                    in each vertex from the circle of radius aveRadius. [0,1]\n                    will map to [0, aveRadius]\n\n            Returns:\n                a list of vertices, in CCW order.\n            \"\"\"\n            from scipy.stats import truncnorm\n\n            # Generate around the unit circle\n            cx, cy = (0.0, 0.0)\n            radius = 1\n\n            tau = np.pi * 2\n\n            irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n\n            spikeyness = np.clip(spikeyness, 1e-9, 1)\n\n            # generate n angle steps\n            lower = (tau / n) - irregularity\n            upper = (tau / n) + irregularity\n            angle_steps = rng.uniform(lower, upper, n)\n\n            # normalize the steps so that point 0 and point n+1 are the same\n            k = angle_steps.sum() / (2 * np.pi)\n            angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)\n\n            # Convert high and low values to be wrt the standard normal range\n            # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html\n            low = 0\n            high = 2 * radius\n            mean = radius\n            std = spikeyness\n            a = (low - mean) / std\n            b = (high - mean) / std\n            tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)\n\n            # now generate the points\n            radii = tnorm.rvs(n, random_state=rng)\n            x_pts = cx + radii * np.cos(angles)\n            y_pts = cy + radii * np.sin(angles)\n\n            points = np.hstack([x_pts[:, None], y_pts[:, None]])\n\n            # Scale to 0-1 space\n            points = points - points.min(axis=0)\n            points = points / points.max(axis=0)\n\n            # Randomly place within 0-1 space\n            points = points * (rng.rand() * .8 + .2)\n            min_pt = points.min(axis=0)\n            max_pt = points.max(axis=0)\n\n            high = (1 - max_pt)\n            low = (0 - min_pt)\n            offset = (rng.rand(2) * (high - low)) + low\n            points = points + offset\n            return points\n\n        def _order_vertices(verts):\n            \"\"\"\n            References:\n                https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise\n            \"\"\"\n            mlat = verts.T[0].sum() / len(verts)\n            mlng = verts.T[1].sum() / len(verts)\n\n            tau = np.pi * 2\n            angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +\n                     tau) % tau\n            sortx = angle.argsort()\n            verts = verts.take(sortx, axis=0)\n            return verts\n\n        # Generate a random exterior for each requested mask\n        masks = []\n        for _ in range(num_masks):\n            exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))\n            exterior = (exterior * [(width, height)]).astype(dtype)\n            masks.append([exterior.ravel()])\n\n        self = cls(masks, height, width)\n        return self\n\n    @classmethod\n    def cat(cls: Type[T], masks: Sequence[T]) -> T:\n        \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n        Args:\n            masks (Sequence[PolygonMasks]): A sequence of mask instances.\n\n        Returns:\n            PolygonMasks: Concatenated mask instance.\n        \"\"\"\n        assert isinstance(masks, Sequence)\n        if len(masks) == 0:\n            raise ValueError('masks should not be an empty list.')\n        assert all(isinstance(m, cls) for m in masks)\n\n        mask_list = list(itertools.chain(*[m.masks for m in masks]))\n        return cls(mask_list, masks[0].height, masks[0].width)\n\n\ndef polygon_to_bitmap(polygons, height, width):\n    \"\"\"Convert masks from the form of polygons to bitmaps.\n\n    Args:\n        polygons (list[ndarray]): masks in polygon representation\n        height (int): mask height\n        width (int): mask width\n\n    Return:\n        ndarray: the converted masks in bitmap representation\n    \"\"\"\n    rles = maskUtils.frPyObjects(polygons, height, width)\n    rle = maskUtils.merge(rles)\n    bitmap_mask = maskUtils.decode(rle).astype(bool)\n    return bitmap_mask\n\n\ndef bitmap_to_polygon(bitmap):\n    \"\"\"Convert masks from the form of bitmaps to polygons.\n\n    Args:\n        bitmap (ndarray): masks in bitmap representation.\n\n    Return:\n        list[ndarray]: the converted mask in polygon representation.\n        bool: whether the mask has holes.\n    \"\"\"\n    bitmap = np.ascontiguousarray(bitmap).astype(np.uint8)\n    # cv2.RETR_CCOMP: retrieves all of the contours and organizes them\n    #   into a two-level hierarchy. At the top level, there are external\n    #   boundaries of the components. At the second level, there are\n    #   boundaries of the holes. If there is another contour inside a hole\n    #   of a connected component, it is still put at the top level.\n    # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points.\n    outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n    contours = outs[-2]\n    hierarchy = outs[-1]\n    if hierarchy is None:\n        return [], False\n    # hierarchy[i]: 4 elements, for the indexes of next, previous,\n    # parent, or nested contours. If there is no corresponding contour,\n    # it will be -1.\n    with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any()\n    contours = [c.reshape(-1, 2) for c in contours]\n    return contours, with_hole\n"
  },
  {
    "path": "mmdet/structures/mask/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport pycocotools.mask as mask_util\nimport torch\nfrom mmengine.utils import slice_list\n\n\ndef split_combined_polys(polys, poly_lens, polys_per_mask):\n    \"\"\"Split the combined 1-D polys into masks.\n\n    A mask is represented as a list of polys, and a poly is represented as\n    a 1-D array. In dataset, all masks are concatenated into a single 1-D\n    tensor. Here we need to split the tensor into original representations.\n\n    Args:\n        polys (list): a list (length = image num) of 1-D tensors\n        poly_lens (list): a list (length = image num) of poly length\n        polys_per_mask (list): a list (length = image num) of poly number\n            of each mask\n\n    Returns:\n        list: a list (length = image num) of list (length = mask num) of \\\n            list (length = poly num) of numpy array.\n    \"\"\"\n    mask_polys_list = []\n    for img_id in range(len(polys)):\n        polys_single = polys[img_id]\n        polys_lens_single = poly_lens[img_id].tolist()\n        polys_per_mask_single = polys_per_mask[img_id].tolist()\n\n        split_polys = slice_list(polys_single, polys_lens_single)\n        mask_polys = slice_list(split_polys, polys_per_mask_single)\n        mask_polys_list.append(mask_polys)\n    return mask_polys_list\n\n\n# TODO: move this function to more proper place\ndef encode_mask_results(mask_results):\n    \"\"\"Encode bitmap mask to RLE code.\n\n    Args:\n        mask_results (list): bitmap mask results.\n\n    Returns:\n        list | tuple: RLE encoded mask.\n    \"\"\"\n    encoded_mask_results = []\n    for mask in mask_results:\n        encoded_mask_results.append(\n            mask_util.encode(\n                np.array(mask[:, :, np.newaxis], order='F',\n                         dtype='uint8'))[0])  # encoded with RLE\n    return encoded_mask_results\n\n\ndef mask2bbox(masks):\n    \"\"\"Obtain tight bounding boxes of binary masks.\n\n    Args:\n        masks (Tensor): Binary mask of shape (n, h, w).\n\n    Returns:\n        Tensor: Bboxe with shape (n, 4) of \\\n            positive region in binary mask.\n    \"\"\"\n    N = masks.shape[0]\n    bboxes = masks.new_zeros((N, 4), dtype=torch.float32)\n    x_any = torch.any(masks, dim=1)\n    y_any = torch.any(masks, dim=2)\n    for i in range(N):\n        x = torch.where(x_any[i, :])[0]\n        y = torch.where(y_any[i, :])[0]\n        if len(x) > 0 and len(y) > 0:\n            bboxes[i, :] = bboxes.new_tensor(\n                [x[0], y[0], x[-1] + 1, y[-1] + 1])\n\n    return bboxes\n"
  },
  {
    "path": "mmdet/testing/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ._fast_stop_training_hook import FastStopTrainingHook  # noqa: F401,F403\nfrom ._utils import (demo_mm_inputs, demo_mm_proposals,\n                     demo_mm_sampling_results, get_detector_cfg,\n                     get_roi_head_cfg, replace_to_ceph)\n\n__all__ = [\n    'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',\n    'demo_mm_proposals', 'demo_mm_sampling_results', 'replace_to_ceph'\n]\n"
  },
  {
    "path": "mmdet/testing/_fast_stop_training_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.hooks import Hook\n\nfrom mmdet.registry import HOOKS\n\n\n@HOOKS.register_module()\nclass FastStopTrainingHook(Hook):\n    \"\"\"Set runner's epoch information to the model.\"\"\"\n\n    def __init__(self, by_epoch, save_ckpt=False, stop_iter_or_epoch=5):\n        self.by_epoch = by_epoch\n        self.save_ckpt = save_ckpt\n        self.stop_iter_or_epoch = stop_iter_or_epoch\n\n    def after_train_iter(self, runner, batch_idx: int, data_batch: None,\n                         outputs: None) -> None:\n        if self.save_ckpt and self.by_epoch:\n            # If it is epoch-based and want to save weights,\n            # we must run at least 1 epoch.\n            return\n        if runner.iter >= self.stop_iter_or_epoch:\n            raise RuntimeError('quick exit')\n\n    def after_train_epoch(self, runner) -> None:\n        if runner.epoch >= self.stop_iter_or_epoch - 1:\n            raise RuntimeError('quick exit')\n"
  },
  {
    "path": "mmdet/testing/_utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom os.path import dirname, exists, join\n\nimport numpy as np\nimport torch\nfrom mmengine.config import Config\nfrom mmengine.dataset import pseudo_collate\nfrom mmengine.structures import InstanceData, PixelData\n\nfrom ..registry import TASK_UTILS\nfrom ..structures import DetDataSample\nfrom ..structures.bbox import HorizontalBoxes\n\n\ndef _get_config_directory():\n    \"\"\"Find the predefined detector config directory.\"\"\"\n    try:\n        # Assume we are running in the source mmdetection repo\n        repo_dpath = dirname(dirname(dirname(__file__)))\n    except NameError:\n        # For IPython development when this __file__ is not defined\n        import mmdet\n        repo_dpath = dirname(dirname(mmdet.__file__))\n    config_dpath = join(repo_dpath, 'configs')\n    if not exists(config_dpath):\n        raise Exception('Cannot find config path')\n    return config_dpath\n\n\ndef _get_config_module(fname):\n    \"\"\"Load a configuration as a python module.\"\"\"\n    config_dpath = _get_config_directory()\n    config_fpath = join(config_dpath, fname)\n    config_mod = Config.fromfile(config_fpath)\n    return config_mod\n\n\ndef get_detector_cfg(fname):\n    \"\"\"Grab configs necessary to create a detector.\n\n    These are deep copied to allow for safe modification of parameters without\n    influencing other tests.\n    \"\"\"\n    config = _get_config_module(fname)\n    model = copy.deepcopy(config.model)\n    return model\n\n\ndef get_roi_head_cfg(fname):\n    \"\"\"Grab configs necessary to create a roi_head.\n\n    These are deep copied to allow for safe modification of parameters without\n    influencing other tests.\n    \"\"\"\n    config = _get_config_module(fname)\n    model = copy.deepcopy(config.model)\n\n    roi_head = model.roi_head\n    train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn\n    test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn\n    roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg))\n    return roi_head\n\n\ndef _rand_bboxes(rng, num_boxes, w, h):\n    cx, cy, bw, bh = rng.rand(num_boxes, 4).T\n\n    tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)\n    tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)\n    br_x = ((cx * w) + (w * bw / 2)).clip(0, w)\n    br_y = ((cy * h) + (h * bh / 2)).clip(0, h)\n\n    bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T\n    return bboxes\n\n\ndef _rand_masks(rng, num_boxes, bboxes, img_w, img_h):\n    from mmdet.structures.mask import BitmapMasks\n    masks = np.zeros((num_boxes, img_h, img_w))\n    for i, bbox in enumerate(bboxes):\n        bbox = bbox.astype(np.int32)\n        mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >\n                0.3).astype(np.int64)\n        masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask\n    return BitmapMasks(masks, height=img_h, width=img_w)\n\n\ndef demo_mm_inputs(batch_size=2,\n                   image_shapes=(3, 128, 128),\n                   num_items=None,\n                   num_classes=10,\n                   sem_seg_output_strides=1,\n                   with_mask=False,\n                   with_semantic=False,\n                   use_box_type=False,\n                   device='cpu'):\n    \"\"\"Create a superset of inputs needed to run test or train batches.\n\n    Args:\n        batch_size (int): batch size. Defaults to 2.\n        image_shapes (List[tuple], Optional): image shape.\n            Defaults to (3, 128, 128)\n        num_items (None | List[int]): specifies the number\n            of boxes in each batch item. Default to None.\n        num_classes (int): number of different labels a\n            box might have. Defaults to 10.\n        with_mask (bool): Whether to return mask annotation.\n            Defaults to False.\n        with_semantic (bool): whether to return semantic.\n            Defaults to False.\n        device (str): Destination device type. Defaults to cpu.\n    \"\"\"\n    rng = np.random.RandomState(0)\n\n    if isinstance(image_shapes, list):\n        assert len(image_shapes) == batch_size\n    else:\n        image_shapes = [image_shapes] * batch_size\n\n    if isinstance(num_items, list):\n        assert len(num_items) == batch_size\n\n    packed_inputs = []\n    for idx in range(batch_size):\n        image_shape = image_shapes[idx]\n        c, h, w = image_shape\n\n        image = rng.randint(0, 255, size=image_shape, dtype=np.uint8)\n\n        mm_inputs = dict()\n        mm_inputs['inputs'] = torch.from_numpy(image).to(device)\n\n        img_meta = {\n            'img_id': idx,\n            'img_shape': image_shape[1:],\n            'ori_shape': image_shape[1:],\n            'filename': '<demo>.png',\n            'scale_factor': np.array([1.1, 1.2]),\n            'flip': False,\n            'flip_direction': None,\n            'border': [1, 1, 1, 1]  # Only used by CenterNet\n        }\n\n        data_sample = DetDataSample()\n        data_sample.set_metainfo(img_meta)\n\n        # gt_instances\n        gt_instances = InstanceData()\n        if num_items is None:\n            num_boxes = rng.randint(1, 10)\n        else:\n            num_boxes = num_items[idx]\n\n        bboxes = _rand_bboxes(rng, num_boxes, w, h)\n        labels = rng.randint(1, num_classes, size=num_boxes)\n        # TODO: remove this part when all model adapted with BaseBoxes\n        if use_box_type:\n            gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32)\n        else:\n            gt_instances.bboxes = torch.FloatTensor(bboxes)\n        gt_instances.labels = torch.LongTensor(labels)\n\n        if with_mask:\n            masks = _rand_masks(rng, num_boxes, bboxes, w, h)\n            gt_instances.masks = masks\n\n        # TODO: waiting for ci to be fixed\n        # masks = np.random.randint(0, 2, (len(bboxes), h, w), dtype=np.uint8)\n        # gt_instances.mask = BitmapMasks(masks, h, w)\n\n        data_sample.gt_instances = gt_instances\n\n        # ignore_instances\n        ignore_instances = InstanceData()\n        bboxes = _rand_bboxes(rng, num_boxes, w, h)\n        if use_box_type:\n            ignore_instances.bboxes = HorizontalBoxes(\n                bboxes, dtype=torch.float32)\n        else:\n            ignore_instances.bboxes = torch.FloatTensor(bboxes)\n        data_sample.ignored_instances = ignore_instances\n\n        # gt_sem_seg\n        if with_semantic:\n            # assume gt_semantic_seg using scale 1/8 of the img\n            gt_semantic_seg = torch.from_numpy(\n                np.random.randint(\n                    0,\n                    num_classes, (1, h // sem_seg_output_strides,\n                                  w // sem_seg_output_strides),\n                    dtype=np.uint8))\n            gt_sem_seg_data = dict(sem_seg=gt_semantic_seg)\n            data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)\n\n        mm_inputs['data_samples'] = data_sample.to(device)\n\n        # TODO: gt_ignore\n\n        packed_inputs.append(mm_inputs)\n    data = pseudo_collate(packed_inputs)\n    return data\n\n\ndef demo_mm_proposals(image_shapes, num_proposals, device='cpu'):\n    \"\"\"Create a list of fake porposals.\n\n    Args:\n        image_shapes (list[tuple[int]]): Batch image shapes.\n        num_proposals (int): The number of fake proposals.\n    \"\"\"\n    rng = np.random.RandomState(0)\n\n    results = []\n    for img_shape in image_shapes:\n        result = InstanceData()\n        w, h = img_shape[1:]\n        proposals = _rand_bboxes(rng, num_proposals, w, h)\n        result.bboxes = torch.from_numpy(proposals).float()\n        result.scores = torch.from_numpy(rng.rand(num_proposals)).float()\n        result.labels = torch.zeros(num_proposals).long()\n        results.append(result.to(device))\n    return results\n\n\ndef demo_mm_sampling_results(proposals_list,\n                             batch_gt_instances,\n                             batch_gt_instances_ignore=None,\n                             assigner_cfg=None,\n                             sampler_cfg=None,\n                             feats=None):\n    \"\"\"Create sample results that can be passed to BBoxHead.get_targets.\"\"\"\n    assert len(proposals_list) == len(batch_gt_instances)\n    if batch_gt_instances_ignore is None:\n        batch_gt_instances_ignore = [None for _ in batch_gt_instances]\n    else:\n        assert len(batch_gt_instances_ignore) == len(batch_gt_instances)\n\n    default_assigner_cfg = dict(\n        type='MaxIoUAssigner',\n        pos_iou_thr=0.5,\n        neg_iou_thr=0.5,\n        min_pos_iou=0.5,\n        ignore_iof_thr=-1)\n    assigner_cfg = assigner_cfg if assigner_cfg is not None \\\n        else default_assigner_cfg\n    default_sampler_cfg = dict(\n        type='RandomSampler',\n        num=512,\n        pos_fraction=0.25,\n        neg_pos_ub=-1,\n        add_gt_as_proposals=True)\n    sampler_cfg = sampler_cfg if sampler_cfg is not None \\\n        else default_sampler_cfg\n    bbox_assigner = TASK_UTILS.build(assigner_cfg)\n    bbox_sampler = TASK_UTILS.build(sampler_cfg)\n\n    sampling_results = []\n    for i in range(len(batch_gt_instances)):\n        if feats is not None:\n            feats = [lvl_feat[i][None] for lvl_feat in feats]\n        # rename proposals.bboxes to proposals.priors\n        proposals = proposals_list[i]\n        proposals.priors = proposals.pop('bboxes')\n\n        assign_result = bbox_assigner.assign(proposals, batch_gt_instances[i],\n                                             batch_gt_instances_ignore[i])\n        sampling_result = bbox_sampler.sample(\n            assign_result, proposals, batch_gt_instances[i], feats=feats)\n        sampling_results.append(sampling_result)\n\n    return sampling_results\n\n\n# TODO: Support full ceph\ndef replace_to_ceph(cfg):\n    file_client_args = dict(\n        backend='petrel',\n        path_mapping=dict({\n            './data/': 's3://openmmlab/datasets/detection/',\n            'data/': 's3://openmmlab/datasets/detection/'\n        }))\n\n    # TODO: name is a reserved interface, which will be used later.\n    def _process_pipeline(dataset, name):\n\n        def replace_img(pipeline):\n            if pipeline['type'] == 'LoadImageFromFile':\n                pipeline['file_client_args'] = file_client_args\n\n        def replace_ann(pipeline):\n            if pipeline['type'] == 'LoadAnnotations' or pipeline[\n                    'type'] == 'LoadPanopticAnnotations':\n                pipeline['file_client_args'] = file_client_args\n\n        if 'pipeline' in dataset:\n            replace_img(dataset.pipeline[0])\n            replace_ann(dataset.pipeline[1])\n            if 'dataset' in dataset:\n                # dataset wrapper\n                replace_img(dataset.dataset.pipeline[0])\n                replace_ann(dataset.dataset.pipeline[1])\n        else:\n            # dataset wrapper\n            replace_img(dataset.dataset.pipeline[0])\n            replace_ann(dataset.dataset.pipeline[1])\n\n    def _process_evaluator(evaluator, name):\n        if evaluator['type'] == 'CocoPanopticMetric':\n            evaluator['file_client_args'] = file_client_args\n\n    # half ceph\n    _process_pipeline(cfg.train_dataloader.dataset, cfg.filename)\n    _process_pipeline(cfg.val_dataloader.dataset, cfg.filename)\n    _process_pipeline(cfg.test_dataloader.dataset, cfg.filename)\n    _process_evaluator(cfg.val_evaluator, cfg.filename)\n    _process_evaluator(cfg.test_evaluator, cfg.filename)\n"
  },
  {
    "path": "mmdet/utils/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .collect_env import collect_env\nfrom .compat_config import compat_cfg\nfrom .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,\n                         sync_random_seed)\nfrom .logger import get_caller_name, log_img_scale\nfrom .memory import AvoidCUDAOOM, AvoidOOM\nfrom .misc import (find_latest_checkpoint, get_test_pipeline_cfg,\n                   update_data_root)\nfrom .replace_cfg_vals import replace_cfg_vals\nfrom .setup_env import register_all_modules, setup_multi_processes\nfrom .split_batch import split_batch\nfrom .typing_utils import (ConfigType, InstanceList, MultiConfig,\n                           OptConfigType, OptInstanceList, OptMultiConfig,\n                           OptPixelList, PixelList, RangeType)\n\n__all__ = [\n    'collect_env', 'find_latest_checkpoint', 'update_data_root',\n    'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',\n    'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',\n    'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',\n    'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',\n    'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',\n    'PixelList', 'RangeType', 'get_test_pipeline_cfg'\n]\n"
  },
  {
    "path": "mmdet/utils/benchmark.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport time\nfrom functools import partial\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import fuse_conv_bn\n# TODO need update\n# from mmcv.runner import wrap_fp16_model\nfrom mmengine import MMLogger\nfrom mmengine.config import Config\nfrom mmengine.device import get_max_cuda_memory\nfrom mmengine.dist import get_world_size\nfrom mmengine.runner import Runner, load_checkpoint\nfrom mmengine.utils.dl_utils import set_multi_processing\nfrom torch.nn.parallel import DistributedDataParallel\n\nfrom mmdet.registry import DATASETS, MODELS\n\ntry:\n    import psutil\nexcept ImportError:\n    psutil = None\n\n\ndef custom_round(value: Union[int, float],\n                 factor: Union[int, float],\n                 precision: int = 2) -> float:\n    \"\"\"Custom round function.\"\"\"\n    return round(value / factor, precision)\n\n\ngb_round = partial(custom_round, factor=1024**3)\n\n\ndef print_log(msg: str, logger: Optional[MMLogger] = None) -> None:\n    \"\"\"Print a log message.\"\"\"\n    if logger is None:\n        print(msg, flush=True)\n    else:\n        logger.info(msg)\n\n\ndef print_process_memory(p: psutil.Process,\n                         logger: Optional[MMLogger] = None) -> None:\n    \"\"\"print process memory info.\"\"\"\n    mem_used = gb_round(psutil.virtual_memory().used)\n    memory_full_info = p.memory_full_info()\n    uss_mem = gb_round(memory_full_info.uss)\n    pss_mem = gb_round(memory_full_info.pss)\n    for children in p.children():\n        child_mem_info = children.memory_full_info()\n        uss_mem += gb_round(child_mem_info.uss)\n        pss_mem += gb_round(child_mem_info.pss)\n    process_count = 1 + len(p.children())\n    print_log(\n        f'(GB) mem_used: {mem_used:.2f} | uss: {uss_mem:.2f} | '\n        f'pss: {pss_mem:.2f} | total_proc: {process_count}', logger)\n\n\nclass BaseBenchmark:\n    \"\"\"The benchmark base class.\n\n    The ``run`` method is an external calling interface, and it will\n    call the ``run_once`` method ``repeat_num`` times for benchmarking.\n    Finally, call the ``average_multiple_runs`` method to further process\n    the results of multiple runs.\n\n    Args:\n        max_iter (int): maximum iterations of benchmark.\n        log_interval (int): interval of logging.\n        num_warmup (int): Number of Warmup.\n        logger (MMLogger, optional): Formatted logger used to record messages.\n    \"\"\"\n\n    def __init__(self,\n                 max_iter: int,\n                 log_interval: int,\n                 num_warmup: int,\n                 logger: Optional[MMLogger] = None):\n        self.max_iter = max_iter\n        self.log_interval = log_interval\n        self.num_warmup = num_warmup\n        self.logger = logger\n\n    def run(self, repeat_num: int = 1) -> dict:\n        \"\"\"benchmark entry method.\n\n        Args:\n            repeat_num (int): Number of repeat benchmark.\n                Defaults to 1.\n        \"\"\"\n        assert repeat_num >= 1\n\n        results = []\n        for _ in range(repeat_num):\n            results.append(self.run_once())\n\n        results = self.average_multiple_runs(results)\n        return results\n\n    def run_once(self) -> dict:\n        \"\"\"Executes the benchmark once.\"\"\"\n        raise NotImplementedError()\n\n    def average_multiple_runs(self, results: List[dict]) -> dict:\n        \"\"\"Average the results of multiple runs.\"\"\"\n        raise NotImplementedError()\n\n\nclass InferenceBenchmark(BaseBenchmark):\n    \"\"\"The inference benchmark class. It will be statistical inference FPS,\n    CUDA memory and CPU memory information.\n\n    Args:\n        cfg (mmengine.Config): config.\n        checkpoint (str): Accept local filepath, URL, ``torchvision://xxx``,\n            ``open-mmlab://xxx``.\n        distributed (bool): distributed testing flag.\n        is_fuse_conv_bn (bool): Whether to fuse conv and bn, this will\n            slightly increase the inference speed.\n        max_iter (int): maximum iterations of benchmark. Defaults to 2000.\n        log_interval (int): interval of logging. Defaults to 50.\n        num_warmup (int): Number of Warmup. Defaults to 5.\n        logger (MMLogger, optional): Formatted logger used to record messages.\n    \"\"\"\n\n    def __init__(self,\n                 cfg: Config,\n                 checkpoint: str,\n                 distributed: bool,\n                 is_fuse_conv_bn: bool,\n                 max_iter: int = 2000,\n                 log_interval: int = 50,\n                 num_warmup: int = 5,\n                 logger: Optional[MMLogger] = None):\n        super().__init__(max_iter, log_interval, num_warmup, logger)\n\n        assert get_world_size(\n        ) == 1, 'Inference benchmark does not allow distributed multi-GPU'\n\n        self.cfg = copy.deepcopy(cfg)\n        self.distributed = distributed\n\n        if psutil is None:\n            raise ImportError('psutil is not installed, please install it by: '\n                              'pip install psutil')\n\n        self._process = psutil.Process()\n        env_cfg = self.cfg.get('env_cfg')\n        if env_cfg.get('cudnn_benchmark'):\n            torch.backends.cudnn.benchmark = True\n\n        mp_cfg: dict = env_cfg.get('mp_cfg', {})\n        set_multi_processing(**mp_cfg, distributed=self.distributed)\n\n        print_log('before build: ', self.logger)\n        print_process_memory(self._process, self.logger)\n\n        self.cfg.model.pretrained = None\n        self.model = self._init_model(checkpoint, is_fuse_conv_bn)\n\n        # Because multiple processes will occupy additional CPU resources,\n        # FPS statistics will be more unstable when num_workers is not 0.\n        # It is reasonable to set num_workers to 0.\n        dataloader_cfg = cfg.test_dataloader\n        dataloader_cfg['num_workers'] = 0\n        dataloader_cfg['batch_size'] = 1\n        dataloader_cfg['persistent_workers'] = False\n        self.data_loader = Runner.build_dataloader(dataloader_cfg)\n\n        print_log('after build: ', self.logger)\n        print_process_memory(self._process, self.logger)\n\n    def _init_model(self, checkpoint: str, is_fuse_conv_bn: bool) -> nn.Module:\n        \"\"\"Initialize the model.\"\"\"\n        model = MODELS.build(self.cfg.model)\n        # TODO need update\n        # fp16_cfg = self.cfg.get('fp16', None)\n        # if fp16_cfg is not None:\n        #     wrap_fp16_model(model)\n\n        load_checkpoint(model, checkpoint, map_location='cpu')\n        if is_fuse_conv_bn:\n            model = fuse_conv_bn(model)\n\n        model = model.cuda()\n\n        if self.distributed:\n            model = DistributedDataParallel(\n                model,\n                device_ids=[torch.cuda.current_device()],\n                broadcast_buffers=False,\n                find_unused_parameters=False)\n\n        model.eval()\n        return model\n\n    def run_once(self) -> dict:\n        \"\"\"Executes the benchmark once.\"\"\"\n        pure_inf_time = 0\n        fps = 0\n\n        for i, data in enumerate(self.data_loader):\n\n            if (i + 1) % self.log_interval == 0:\n                print_log('==================================', self.logger)\n\n            torch.cuda.synchronize()\n            start_time = time.perf_counter()\n\n            with torch.no_grad():\n                self.model(data, return_loss=False)\n\n            torch.cuda.synchronize()\n            elapsed = time.perf_counter() - start_time\n\n            if i >= self.num_warmup:\n                pure_inf_time += elapsed\n                if (i + 1) % self.log_interval == 0:\n                    fps = (i + 1 - self.num_warmup) / pure_inf_time\n                    cuda_memory = get_max_cuda_memory()\n\n                    print_log(\n                        f'Done image [{i + 1:<3}/{self.max_iter}], '\n                        f'fps: {fps:.1f} img/s, '\n                        f'times per image: {1000 / fps:.1f} ms/img, '\n                        f'cuda memory: {cuda_memory} MB', self.logger)\n                    print_process_memory(self._process, self.logger)\n\n            if (i + 1) == self.max_iter:\n                fps = (i + 1 - self.num_warmup) / pure_inf_time\n                break\n\n        return {'fps': fps}\n\n    def average_multiple_runs(self, results: List[dict]) -> dict:\n        \"\"\"Average the results of multiple runs.\"\"\"\n        print_log('============== Done ==================', self.logger)\n\n        fps_list_ = [round(result['fps'], 1) for result in results]\n        avg_fps_ = sum(fps_list_) / len(fps_list_)\n        outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_}\n\n        if len(fps_list_) > 1:\n            times_pre_image_list_ = [\n                round(1000 / result['fps'], 1) for result in results\n            ]\n            avg_times_pre_image_ = sum(times_pre_image_list_) / len(\n                times_pre_image_list_)\n\n            print_log(\n                f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, '\n                'times per image: '\n                f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] '\n                'ms/img', self.logger)\n        else:\n            print_log(\n                f'Overall fps: {fps_list_[0]:.1f} img/s, '\n                f'times per image: {1000 / fps_list_[0]:.1f} ms/img',\n                self.logger)\n\n        print_log(f'cuda memory: {get_max_cuda_memory()} MB', self.logger)\n        print_process_memory(self._process, self.logger)\n\n        return outputs\n\n\nclass DataLoaderBenchmark(BaseBenchmark):\n    \"\"\"The dataloader benchmark class. It will be statistical inference FPS and\n    CPU memory information.\n\n    Args:\n        cfg (mmengine.Config): config.\n        distributed (bool): distributed testing flag.\n        dataset_type (str): benchmark data type, only supports ``train``,\n            ``val`` and ``test``.\n        max_iter (int): maximum iterations of benchmark. Defaults to 2000.\n        log_interval (int): interval of logging. Defaults to 50.\n        num_warmup (int): Number of Warmup. Defaults to 5.\n        logger (MMLogger, optional): Formatted logger used to record messages.\n    \"\"\"\n\n    def __init__(self,\n                 cfg: Config,\n                 distributed: bool,\n                 dataset_type: str,\n                 max_iter: int = 2000,\n                 log_interval: int = 50,\n                 num_warmup: int = 5,\n                 logger: Optional[MMLogger] = None):\n        super().__init__(max_iter, log_interval, num_warmup, logger)\n\n        assert dataset_type in ['train', 'val', 'test'], \\\n            'dataset_type only supports train,' \\\n            f' val and test, but got {dataset_type}'\n        assert get_world_size(\n        ) == 1, 'Dataloader benchmark does not allow distributed multi-GPU'\n\n        self.cfg = copy.deepcopy(cfg)\n        self.distributed = distributed\n\n        if psutil is None:\n            raise ImportError('psutil is not installed, please install it by: '\n                              'pip install psutil')\n        self._process = psutil.Process()\n\n        mp_cfg = self.cfg.get('env_cfg', {}).get('mp_cfg')\n        if mp_cfg is not None:\n            set_multi_processing(distributed=self.distributed, **mp_cfg)\n        else:\n            set_multi_processing(distributed=self.distributed)\n\n        print_log('before build: ', self.logger)\n        print_process_memory(self._process, self.logger)\n\n        if dataset_type == 'train':\n            self.data_loader = Runner.build_dataloader(cfg.train_dataloader)\n        elif dataset_type == 'test':\n            self.data_loader = Runner.build_dataloader(cfg.test_dataloader)\n        else:\n            self.data_loader = Runner.build_dataloader(cfg.val_dataloader)\n\n        self.batch_size = self.data_loader.batch_size\n        self.num_workers = self.data_loader.num_workers\n\n        print_log('after build: ', self.logger)\n        print_process_memory(self._process, self.logger)\n\n    def run_once(self) -> dict:\n        \"\"\"Executes the benchmark once.\"\"\"\n        pure_inf_time = 0\n        fps = 0\n\n        # benchmark with 2000 image and take the average\n        start_time = time.perf_counter()\n        for i, data in enumerate(self.data_loader):\n            elapsed = time.perf_counter() - start_time\n\n            if (i + 1) % self.log_interval == 0:\n                print_log('==================================', self.logger)\n\n            if i >= self.num_warmup:\n                pure_inf_time += elapsed\n                if (i + 1) % self.log_interval == 0:\n                    fps = (i + 1 - self.num_warmup) / pure_inf_time\n\n                    print_log(\n                        f'Done batch [{i + 1:<3}/{self.max_iter}], '\n                        f'fps: {fps:.1f} batch/s, '\n                        f'times per batch: {1000 / fps:.1f} ms/batch, '\n                        f'batch size: {self.batch_size}, num_workers: '\n                        f'{self.num_workers}', self.logger)\n                    print_process_memory(self._process, self.logger)\n\n            if (i + 1) == self.max_iter:\n                fps = (i + 1 - self.num_warmup) / pure_inf_time\n                break\n\n            start_time = time.perf_counter()\n\n        return {'fps': fps}\n\n    def average_multiple_runs(self, results: List[dict]) -> dict:\n        \"\"\"Average the results of multiple runs.\"\"\"\n        print_log('============== Done ==================', self.logger)\n\n        fps_list_ = [round(result['fps'], 1) for result in results]\n        avg_fps_ = sum(fps_list_) / len(fps_list_)\n        outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_}\n\n        if len(fps_list_) > 1:\n            times_pre_image_list_ = [\n                round(1000 / result['fps'], 1) for result in results\n            ]\n            avg_times_pre_image_ = sum(times_pre_image_list_) / len(\n                times_pre_image_list_)\n\n            print_log(\n                f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, '\n                'times per batch: '\n                f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] '\n                f'ms/batch, batch size: {self.batch_size}, num_workers: '\n                f'{self.num_workers}', self.logger)\n        else:\n            print_log(\n                f'Overall fps: {fps_list_[0]:.1f} batch/s, '\n                f'times per batch: {1000 / fps_list_[0]:.1f} ms/batch, '\n                f'batch size: {self.batch_size}, num_workers: '\n                f'{self.num_workers}', self.logger)\n\n        print_process_memory(self._process, self.logger)\n\n        return outputs\n\n\nclass DatasetBenchmark(BaseBenchmark):\n    \"\"\"The dataset benchmark class. It will be statistical inference FPS, FPS\n    pre transform and CPU memory information.\n\n    Args:\n        cfg (mmengine.Config): config.\n        dataset_type (str): benchmark data type, only supports ``train``,\n            ``val`` and ``test``.\n        max_iter (int): maximum iterations of benchmark. Defaults to 2000.\n        log_interval (int): interval of logging. Defaults to 50.\n        num_warmup (int): Number of Warmup. Defaults to 5.\n        logger (MMLogger, optional): Formatted logger used to record messages.\n    \"\"\"\n\n    def __init__(self,\n                 cfg: Config,\n                 dataset_type: str,\n                 max_iter: int = 2000,\n                 log_interval: int = 50,\n                 num_warmup: int = 5,\n                 logger: Optional[MMLogger] = None):\n        super().__init__(max_iter, log_interval, num_warmup, logger)\n        assert dataset_type in ['train', 'val', 'test'], \\\n            'dataset_type only supports train,' \\\n            f' val and test, but got {dataset_type}'\n        assert get_world_size(\n        ) == 1, 'Dataset benchmark does not allow distributed multi-GPU'\n        self.cfg = copy.deepcopy(cfg)\n\n        if dataset_type == 'train':\n            dataloader_cfg = copy.deepcopy(cfg.train_dataloader)\n        elif dataset_type == 'test':\n            dataloader_cfg = copy.deepcopy(cfg.test_dataloader)\n        else:\n            dataloader_cfg = copy.deepcopy(cfg.val_dataloader)\n\n        dataset_cfg = dataloader_cfg.pop('dataset')\n        dataset = DATASETS.build(dataset_cfg)\n        if hasattr(dataset, 'full_init'):\n            dataset.full_init()\n        self.dataset = dataset\n\n    def run_once(self) -> dict:\n        \"\"\"Executes the benchmark once.\"\"\"\n        pure_inf_time = 0\n        fps = 0\n\n        total_index = list(range(len(self.dataset)))\n        np.random.shuffle(total_index)\n\n        start_time = time.perf_counter()\n        for i, idx in enumerate(total_index):\n            if (i + 1) % self.log_interval == 0:\n                print_log('==================================', self.logger)\n\n            get_data_info_start_time = time.perf_counter()\n            data_info = self.dataset.get_data_info(idx)\n            get_data_info_elapsed = time.perf_counter(\n            ) - get_data_info_start_time\n\n            if (i + 1) % self.log_interval == 0:\n                print_log(f'get_data_info - {get_data_info_elapsed * 1000} ms',\n                          self.logger)\n\n            for t in self.dataset.pipeline.transforms:\n                transform_start_time = time.perf_counter()\n                data_info = t(data_info)\n                transform_elapsed = time.perf_counter() - transform_start_time\n\n                if (i + 1) % self.log_interval == 0:\n                    print_log(\n                        f'{t.__class__.__name__} - '\n                        f'{transform_elapsed * 1000} ms', self.logger)\n\n                if data_info is None:\n                    break\n\n            elapsed = time.perf_counter() - start_time\n\n            if i >= self.num_warmup:\n                pure_inf_time += elapsed\n                if (i + 1) % self.log_interval == 0:\n                    fps = (i + 1 - self.num_warmup) / pure_inf_time\n\n                    print_log(\n                        f'Done img [{i + 1:<3}/{self.max_iter}], '\n                        f'fps: {fps:.1f} img/s, '\n                        f'times per img: {1000 / fps:.1f} ms/img', self.logger)\n\n            if (i + 1) == self.max_iter:\n                fps = (i + 1 - self.num_warmup) / pure_inf_time\n                break\n\n            start_time = time.perf_counter()\n\n        return {'fps': fps}\n\n    def average_multiple_runs(self, results: List[dict]) -> dict:\n        \"\"\"Average the results of multiple runs.\"\"\"\n        print_log('============== Done ==================', self.logger)\n\n        fps_list_ = [round(result['fps'], 1) for result in results]\n        avg_fps_ = sum(fps_list_) / len(fps_list_)\n        outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_}\n\n        if len(fps_list_) > 1:\n            times_pre_image_list_ = [\n                round(1000 / result['fps'], 1) for result in results\n            ]\n            avg_times_pre_image_ = sum(times_pre_image_list_) / len(\n                times_pre_image_list_)\n\n            print_log(\n                f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, '\n                'times per img: '\n                f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] '\n                'ms/img', self.logger)\n        else:\n            print_log(\n                f'Overall fps: {fps_list_[0]:.1f} img/s, '\n                f'times per img: {1000 / fps_list_[0]:.1f} ms/img',\n                self.logger)\n\n        return outputs\n"
  },
  {
    "path": "mmdet/utils/collect_env.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmengine.utils import get_git_hash\nfrom mmengine.utils.dl_utils import collect_env as collect_base_env\n\nimport mmdet\n\n\ndef collect_env():\n    \"\"\"Collect the information of the running environments.\"\"\"\n    env_info = collect_base_env()\n    env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]\n    return env_info\n\n\nif __name__ == '__main__':\n    for name, val in collect_env().items():\n        print(f'{name}: {val}')\n"
  },
  {
    "path": "mmdet/utils/compat_config.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nfrom mmengine.config import ConfigDict\n\n\ndef compat_cfg(cfg):\n    \"\"\"This function would modify some filed to keep the compatibility of\n    config.\n\n    For example, it will move some args which will be deprecated to the correct\n    fields.\n    \"\"\"\n    cfg = copy.deepcopy(cfg)\n    cfg = compat_imgs_per_gpu(cfg)\n    cfg = compat_loader_args(cfg)\n    cfg = compat_runner_args(cfg)\n    return cfg\n\n\ndef compat_runner_args(cfg):\n    if 'runner' not in cfg:\n        cfg.runner = ConfigDict({\n            'type': 'EpochBasedRunner',\n            'max_epochs': cfg.total_epochs\n        })\n        warnings.warn(\n            'config is now expected to have a `runner` section, '\n            'please set `runner` in your config.', UserWarning)\n    else:\n        if 'total_epochs' in cfg:\n            assert cfg.total_epochs == cfg.runner.max_epochs\n    return cfg\n\n\ndef compat_imgs_per_gpu(cfg):\n    cfg = copy.deepcopy(cfg)\n    if 'imgs_per_gpu' in cfg.data:\n        warnings.warn('\"imgs_per_gpu\" is deprecated in MMDet V2.0. '\n                      'Please use \"samples_per_gpu\" instead')\n        if 'samples_per_gpu' in cfg.data:\n            warnings.warn(\n                f'Got \"imgs_per_gpu\"={cfg.data.imgs_per_gpu} and '\n                f'\"samples_per_gpu\"={cfg.data.samples_per_gpu}, \"imgs_per_gpu\"'\n                f'={cfg.data.imgs_per_gpu} is used in this experiments')\n        else:\n            warnings.warn('Automatically set \"samples_per_gpu\"=\"imgs_per_gpu\"='\n                          f'{cfg.data.imgs_per_gpu} in this experiments')\n        cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu\n    return cfg\n\n\ndef compat_loader_args(cfg):\n    \"\"\"Deprecated sample_per_gpu in cfg.data.\"\"\"\n\n    cfg = copy.deepcopy(cfg)\n    if 'train_dataloader' not in cfg.data:\n        cfg.data['train_dataloader'] = ConfigDict()\n    if 'val_dataloader' not in cfg.data:\n        cfg.data['val_dataloader'] = ConfigDict()\n    if 'test_dataloader' not in cfg.data:\n        cfg.data['test_dataloader'] = ConfigDict()\n\n    # special process for train_dataloader\n    if 'samples_per_gpu' in cfg.data:\n\n        samples_per_gpu = cfg.data.pop('samples_per_gpu')\n        assert 'samples_per_gpu' not in \\\n               cfg.data.train_dataloader, ('`samples_per_gpu` are set '\n                                           'in `data` field and ` '\n                                           'data.train_dataloader` '\n                                           'at the same time. '\n                                           'Please only set it in '\n                                           '`data.train_dataloader`. ')\n        cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu\n\n    if 'persistent_workers' in cfg.data:\n\n        persistent_workers = cfg.data.pop('persistent_workers')\n        assert 'persistent_workers' not in \\\n               cfg.data.train_dataloader, ('`persistent_workers` are set '\n                                           'in `data` field and ` '\n                                           'data.train_dataloader` '\n                                           'at the same time. '\n                                           'Please only set it in '\n                                           '`data.train_dataloader`. ')\n        cfg.data.train_dataloader['persistent_workers'] = persistent_workers\n\n    if 'workers_per_gpu' in cfg.data:\n\n        workers_per_gpu = cfg.data.pop('workers_per_gpu')\n        cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu\n        cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu\n        cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu\n\n    # special process for val_dataloader\n    if 'samples_per_gpu' in cfg.data.val:\n        # keep default value of `sample_per_gpu` is 1\n        assert 'samples_per_gpu' not in \\\n               cfg.data.val_dataloader, ('`samples_per_gpu` are set '\n                                         'in `data.val` field and ` '\n                                         'data.val_dataloader` at '\n                                         'the same time. '\n                                         'Please only set it in '\n                                         '`data.val_dataloader`. ')\n        cfg.data.val_dataloader['samples_per_gpu'] = \\\n            cfg.data.val.pop('samples_per_gpu')\n    # special process for val_dataloader\n\n    # in case the test dataset is concatenated\n    if isinstance(cfg.data.test, dict):\n        if 'samples_per_gpu' in cfg.data.test:\n            assert 'samples_per_gpu' not in \\\n                   cfg.data.test_dataloader, ('`samples_per_gpu` are set '\n                                              'in `data.test` field and ` '\n                                              'data.test_dataloader` '\n                                              'at the same time. '\n                                              'Please only set it in '\n                                              '`data.test_dataloader`. ')\n\n            cfg.data.test_dataloader['samples_per_gpu'] = \\\n                cfg.data.test.pop('samples_per_gpu')\n\n    elif isinstance(cfg.data.test, list):\n        for ds_cfg in cfg.data.test:\n            if 'samples_per_gpu' in ds_cfg:\n                assert 'samples_per_gpu' not in \\\n                       cfg.data.test_dataloader, ('`samples_per_gpu` are set '\n                                                  'in `data.test` field and ` '\n                                                  'data.test_dataloader` at'\n                                                  ' the same time. '\n                                                  'Please only set it in '\n                                                  '`data.test_dataloader`. ')\n        samples_per_gpu = max(\n            [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])\n        cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu\n\n    return cfg\n"
  },
  {
    "path": "mmdet/utils/contextmanagers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport asyncio\nimport contextlib\nimport logging\nimport os\nimport time\nfrom typing import List\n\nimport torch\n\nlogger = logging.getLogger(__name__)\n\nDEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))\n\n\n@contextlib.asynccontextmanager\nasync def completed(trace_name='',\n                    name='',\n                    sleep_interval=0.05,\n                    streams: List[torch.cuda.Stream] = None):\n    \"\"\"Async context manager that waits for work to complete on given CUDA\n    streams.\"\"\"\n    if not torch.cuda.is_available():\n        yield\n        return\n\n    stream_before_context_switch = torch.cuda.current_stream()\n    if not streams:\n        streams = [stream_before_context_switch]\n    else:\n        streams = [s if s else stream_before_context_switch for s in streams]\n\n    end_events = [\n        torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams\n    ]\n\n    if DEBUG_COMPLETED_TIME:\n        start = torch.cuda.Event(enable_timing=True)\n        stream_before_context_switch.record_event(start)\n\n        cpu_start = time.monotonic()\n    logger.debug('%s %s starting, streams: %s', trace_name, name, streams)\n    grad_enabled_before = torch.is_grad_enabled()\n    try:\n        yield\n    finally:\n        current_stream = torch.cuda.current_stream()\n        assert current_stream == stream_before_context_switch\n\n        if DEBUG_COMPLETED_TIME:\n            cpu_end = time.monotonic()\n        for i, stream in enumerate(streams):\n            event = end_events[i]\n            stream.record_event(event)\n\n        grad_enabled_after = torch.is_grad_enabled()\n\n        # observed change of torch.is_grad_enabled() during concurrent run of\n        # async_test_bboxes code\n        assert (grad_enabled_before == grad_enabled_after\n                ), 'Unexpected is_grad_enabled() value change'\n\n        are_done = [e.query() for e in end_events]\n        logger.debug('%s %s completed: %s streams: %s', trace_name, name,\n                     are_done, streams)\n        with torch.cuda.stream(stream_before_context_switch):\n            while not all(are_done):\n                await asyncio.sleep(sleep_interval)\n                are_done = [e.query() for e in end_events]\n                logger.debug(\n                    '%s %s completed: %s streams: %s',\n                    trace_name,\n                    name,\n                    are_done,\n                    streams,\n                )\n\n        current_stream = torch.cuda.current_stream()\n        assert current_stream == stream_before_context_switch\n\n        if DEBUG_COMPLETED_TIME:\n            cpu_time = (cpu_end - cpu_start) * 1000\n            stream_times_ms = ''\n            for i, stream in enumerate(streams):\n                elapsed_time = start.elapsed_time(end_events[i])\n                stream_times_ms += f' {stream} {elapsed_time:.2f} ms'\n            logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,\n                        stream_times_ms)\n\n\n@contextlib.asynccontextmanager\nasync def concurrent(streamqueue: asyncio.Queue,\n                     trace_name='concurrent',\n                     name='stream'):\n    \"\"\"Run code concurrently in different streams.\n\n    :param streamqueue: asyncio.Queue instance.\n\n    Queue tasks define the pool of streams used for concurrent execution.\n    \"\"\"\n    if not torch.cuda.is_available():\n        yield\n        return\n\n    initial_stream = torch.cuda.current_stream()\n\n    with torch.cuda.stream(initial_stream):\n        stream = await streamqueue.get()\n        assert isinstance(stream, torch.cuda.Stream)\n\n        try:\n            with torch.cuda.stream(stream):\n                logger.debug('%s %s is starting, stream: %s', trace_name, name,\n                             stream)\n                yield\n                current = torch.cuda.current_stream()\n                assert current == stream\n                logger.debug('%s %s has finished, stream: %s', trace_name,\n                             name, stream)\n        finally:\n            streamqueue.task_done()\n            streamqueue.put_nowait(stream)\n"
  },
  {
    "path": "mmdet/utils/dist_utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport functools\nimport pickle\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom mmengine.dist import get_dist_info\nfrom torch._utils import (_flatten_dense_tensors, _take_tensors,\n                          _unflatten_dense_tensors)\n\n\ndef _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):\n    if bucket_size_mb > 0:\n        bucket_size_bytes = bucket_size_mb * 1024 * 1024\n        buckets = _take_tensors(tensors, bucket_size_bytes)\n    else:\n        buckets = OrderedDict()\n        for tensor in tensors:\n            tp = tensor.type()\n            if tp not in buckets:\n                buckets[tp] = []\n            buckets[tp].append(tensor)\n        buckets = buckets.values()\n\n    for bucket in buckets:\n        flat_tensors = _flatten_dense_tensors(bucket)\n        dist.all_reduce(flat_tensors)\n        flat_tensors.div_(world_size)\n        for tensor, synced in zip(\n                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):\n            tensor.copy_(synced)\n\n\ndef allreduce_grads(params, coalesce=True, bucket_size_mb=-1):\n    \"\"\"Allreduce gradients.\n\n    Args:\n        params (list[torch.Parameters]): List of parameters of a model\n        coalesce (bool, optional): Whether allreduce parameters as a whole.\n            Defaults to True.\n        bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n            Defaults to -1.\n    \"\"\"\n    grads = [\n        param.grad.data for param in params\n        if param.requires_grad and param.grad is not None\n    ]\n    world_size = dist.get_world_size()\n    if coalesce:\n        _allreduce_coalesced(grads, world_size, bucket_size_mb)\n    else:\n        for tensor in grads:\n            dist.all_reduce(tensor.div_(world_size))\n\n\ndef reduce_mean(tensor):\n    \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n    if not (dist.is_available() and dist.is_initialized()):\n        return tensor\n    tensor = tensor.clone()\n    dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n    return tensor\n\n\ndef obj2tensor(pyobj, device='cuda'):\n    \"\"\"Serialize picklable python object to tensor.\"\"\"\n    storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))\n    return torch.ByteTensor(storage).to(device=device)\n\n\ndef tensor2obj(tensor):\n    \"\"\"Deserialize tensor to picklable python object.\"\"\"\n    return pickle.loads(tensor.cpu().numpy().tobytes())\n\n\n@functools.lru_cache()\ndef _get_global_gloo_group():\n    \"\"\"Return a process group based on gloo backend, containing all the ranks\n    The result is cached.\"\"\"\n    if dist.get_backend() == 'nccl':\n        return dist.new_group(backend='gloo')\n    else:\n        return dist.group.WORLD\n\n\ndef all_reduce_dict(py_dict, op='sum', group=None, to_float=True):\n    \"\"\"Apply all reduce function for python dict object.\n\n    The code is modified from https://github.com/Megvii-\n    BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.\n\n    NOTE: make sure that py_dict in different ranks has the same keys and\n    the values should be in the same shape. Currently only supports\n    nccl backend.\n\n    Args:\n        py_dict (dict): Dict to be applied all reduce op.\n        op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'\n        group (:obj:`torch.distributed.group`, optional): Distributed group,\n            Default: None.\n        to_float (bool): Whether to convert all values of dict to float.\n            Default: True.\n\n    Returns:\n        OrderedDict: reduced python dict object.\n    \"\"\"\n    warnings.warn(\n        'group` is deprecated. Currently only supports NCCL backend.')\n    _, world_size = get_dist_info()\n    if world_size == 1:\n        return py_dict\n\n    # all reduce logic across different devices.\n    py_key = list(py_dict.keys())\n    if not isinstance(py_dict, OrderedDict):\n        py_key_tensor = obj2tensor(py_key)\n        dist.broadcast(py_key_tensor, src=0)\n        py_key = tensor2obj(py_key_tensor)\n\n    tensor_shapes = [py_dict[k].shape for k in py_key]\n    tensor_numels = [py_dict[k].numel() for k in py_key]\n\n    if to_float:\n        warnings.warn('Note: the \"to_float\" is True, you need to '\n                      'ensure that the behavior is reasonable.')\n        flatten_tensor = torch.cat(\n            [py_dict[k].flatten().float() for k in py_key])\n    else:\n        flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])\n\n    dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)\n    if op == 'mean':\n        flatten_tensor /= world_size\n\n    split_tensors = [\n        x.reshape(shape) for x, shape in zip(\n            torch.split(flatten_tensor, tensor_numels), tensor_shapes)\n    ]\n    out_dict = {k: v for k, v in zip(py_key, split_tensors)}\n    if isinstance(py_dict, OrderedDict):\n        out_dict = OrderedDict(out_dict)\n    return out_dict\n\n\ndef sync_random_seed(seed=None, device='cuda'):\n    \"\"\"Make sure different ranks share the same seed.\n\n    All workers must call this function, otherwise it will deadlock.\n    This method is generally used in `DistributedSampler`,\n    because the seed should be identical across all processes\n    in the distributed group.\n\n    In distributed sampling, different ranks should sample non-overlapped\n    data in the dataset. Therefore, this function is used to make sure that\n    each rank shuffles the data indices in the same order based\n    on the same seed. Then different ranks could use different indices\n    to select non-overlapped data from the same data list.\n\n    Args:\n        seed (int, Optional): The seed. Default to None.\n        device (str): The device where the seed will be put on.\n            Default to 'cuda'.\n\n    Returns:\n        int: Seed to be used.\n    \"\"\"\n    if seed is None:\n        seed = np.random.randint(2**31)\n    assert isinstance(seed, int)\n\n    rank, world_size = get_dist_info()\n\n    if world_size == 1:\n        return seed\n\n    if rank == 0:\n        random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n    else:\n        random_num = torch.tensor(0, dtype=torch.int32, device=device)\n    dist.broadcast(random_num, src=0)\n    return random_num.item()\n"
  },
  {
    "path": "mmdet/utils/logger.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport inspect\n\nfrom mmengine.logging import print_log\n\n\ndef get_caller_name():\n    \"\"\"Get name of caller method.\"\"\"\n    # this_func_frame = inspect.stack()[0][0]  # i.e., get_caller_name\n    # callee_frame = inspect.stack()[1][0]  # e.g., log_img_scale\n    caller_frame = inspect.stack()[2][0]  # e.g., caller of log_img_scale\n    caller_method = caller_frame.f_code.co_name\n    try:\n        caller_class = caller_frame.f_locals['self'].__class__.__name__\n        return f'{caller_class}.{caller_method}'\n    except KeyError:  # caller is a function\n        return caller_method\n\n\ndef log_img_scale(img_scale, shape_order='hw', skip_square=False):\n    \"\"\"Log image size.\n\n    Args:\n        img_scale (tuple): Image size to be logged.\n        shape_order (str, optional): The order of image shape.\n            'hw' for (height, width) and 'wh' for (width, height).\n            Defaults to 'hw'.\n        skip_square (bool, optional): Whether to skip logging for square\n            img_scale. Defaults to False.\n\n    Returns:\n        bool: Whether to have done logging.\n    \"\"\"\n    if shape_order == 'hw':\n        height, width = img_scale\n    elif shape_order == 'wh':\n        width, height = img_scale\n    else:\n        raise ValueError(f'Invalid shape_order {shape_order}.')\n\n    if skip_square and (height == width):\n        return False\n\n    caller = get_caller_name()\n    print_log(\n        f'image shape: height={height}, width={width} in {caller}',\n        logger='current')\n\n    return True\n"
  },
  {
    "path": "mmdet/utils/memory.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom collections import abc\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport torch\nfrom mmengine.logging import MMLogger\n\n\ndef cast_tensor_type(inputs, src_type=None, dst_type=None):\n    \"\"\"Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.\n\n    Args:\n        inputs: Inputs that to be casted.\n        src_type (torch.dtype | torch.device): Source type.\n        src_type (torch.dtype | torch.device): Destination type.\n\n    Returns:\n        The same type with inputs, but all contained Tensors have been cast.\n    \"\"\"\n    assert dst_type is not None\n    if isinstance(inputs, torch.Tensor):\n        if isinstance(dst_type, torch.device):\n            # convert Tensor to dst_device\n            if hasattr(inputs, 'to') and \\\n                    hasattr(inputs, 'device') and \\\n                    (inputs.device == src_type or src_type is None):\n                return inputs.to(dst_type)\n            else:\n                return inputs\n        else:\n            # convert Tensor to dst_dtype\n            if hasattr(inputs, 'to') and \\\n                    hasattr(inputs, 'dtype') and \\\n                    (inputs.dtype == src_type or src_type is None):\n                return inputs.to(dst_type)\n            else:\n                return inputs\n        # we need to ensure that the type of inputs to be casted are the same\n        # as the argument `src_type`.\n    elif isinstance(inputs, abc.Mapping):\n        return type(inputs)({\n            k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)\n            for k, v in inputs.items()\n        })\n    elif isinstance(inputs, abc.Iterable):\n        return type(inputs)(\n            cast_tensor_type(item, src_type=src_type, dst_type=dst_type)\n            for item in inputs)\n    # TODO: Currently not supported\n    # elif isinstance(inputs, InstanceData):\n    #     for key, value in inputs.items():\n    #         inputs[key] = cast_tensor_type(\n    #             value, src_type=src_type, dst_type=dst_type)\n    #     return inputs\n    else:\n        return inputs\n\n\n@contextmanager\ndef _ignore_torch_cuda_oom():\n    \"\"\"A context which ignores CUDA OOM exception from pytorch.\n\n    Code is modified from\n    <https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py>  # noqa: E501\n    \"\"\"\n    try:\n        yield\n    except RuntimeError as e:\n        # NOTE: the string may change?\n        if 'CUDA out of memory. ' in str(e):\n            pass\n        else:\n            raise\n\n\nclass AvoidOOM:\n    \"\"\"Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of\n    Memory error. It will do the following steps:\n\n        1. First retry after calling `torch.cuda.empty_cache()`.\n        2. If that still fails, it will then retry by converting inputs\n          to FP16.\n        3. If that still fails trying to convert inputs to CPUs.\n          In this case, it expects the function to dispatch to\n          CPU implementation.\n\n    Args:\n        to_cpu (bool): Whether to convert outputs to CPU if get an OOM\n            error. This will slow down the code significantly.\n            Defaults to True.\n        test (bool): Skip `_ignore_torch_cuda_oom` operate that can use\n            lightweight data in unit test, only used in\n            test unit. Defaults to False.\n\n    Examples:\n        >>> from mmdet.utils.memory import AvoidOOM\n        >>> AvoidCUDAOOM = AvoidOOM()\n        >>> output = AvoidOOM.retry_if_cuda_oom(\n        >>>     some_torch_function)(input1, input2)\n        >>> # To use as a decorator\n        >>> # from mmdet.utils import AvoidCUDAOOM\n        >>> @AvoidCUDAOOM.retry_if_cuda_oom\n        >>> def function(*args, **kwargs):\n        >>>     return None\n    ```\n\n    Note:\n        1. The output may be on CPU even if inputs are on GPU. Processing\n            on CPU will slow down the code significantly.\n        2. When converting inputs to CPU, it will only look at each argument\n            and check if it has `.device` and `.to` for conversion. Nested\n            structures of tensors are not supported.\n        3. Since the function might be called more than once, it has to be\n            stateless.\n    \"\"\"\n\n    def __init__(self, to_cpu=True, test=False):\n        self.to_cpu = to_cpu\n        self.test = test\n\n    def retry_if_cuda_oom(self, func):\n        \"\"\"Makes a function retry itself after encountering pytorch's CUDA OOM\n        error.\n\n        The implementation logic is referred to\n        https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py\n\n        Args:\n            func: a stateless callable that takes tensor-like objects\n                as arguments.\n        Returns:\n            func: a callable which retries `func` if OOM is encountered.\n        \"\"\"  # noqa: W605\n\n        @wraps(func)\n        def wrapped(*args, **kwargs):\n\n            # raw function\n            if not self.test:\n                with _ignore_torch_cuda_oom():\n                    return func(*args, **kwargs)\n\n                # Clear cache and retry\n                torch.cuda.empty_cache()\n                with _ignore_torch_cuda_oom():\n                    return func(*args, **kwargs)\n\n            # get the type and device of first tensor\n            dtype, device = None, None\n            values = args + tuple(kwargs.values())\n            for value in values:\n                if isinstance(value, torch.Tensor):\n                    dtype = value.dtype\n                    device = value.device\n                    break\n            if dtype is None or device is None:\n                raise ValueError('There is no tensor in the inputs, '\n                                 'cannot get dtype and device.')\n\n            # Convert to FP16\n            fp16_args = cast_tensor_type(args, dst_type=torch.half)\n            fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)\n            logger = MMLogger.get_current_instance()\n            logger.warning(f'Attempting to copy inputs of {str(func)} '\n                           'to FP16 due to CUDA OOM')\n\n            # get input tensor type, the output type will same as\n            # the first parameter type.\n            with _ignore_torch_cuda_oom():\n                output = func(*fp16_args, **fp16_kwargs)\n                output = cast_tensor_type(\n                    output, src_type=torch.half, dst_type=dtype)\n                if not self.test:\n                    return output\n            logger.warning('Using FP16 still meet CUDA OOM')\n\n            # Try on CPU. This will slow down the code significantly,\n            # therefore print a notice.\n            if self.to_cpu:\n                logger.warning(f'Attempting to copy inputs of {str(func)} '\n                               'to CPU due to CUDA OOM')\n                cpu_device = torch.empty(0).device\n                cpu_args = cast_tensor_type(args, dst_type=cpu_device)\n                cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)\n\n                # convert outputs to GPU\n                with _ignore_torch_cuda_oom():\n                    logger.warning(f'Convert outputs to GPU (device={device})')\n                    output = func(*cpu_args, **cpu_kwargs)\n                    output = cast_tensor_type(\n                        output, src_type=cpu_device, dst_type=device)\n                    return output\n\n                warnings.warn('Cannot convert output to GPU due to CUDA OOM, '\n                              'the output is now on CPU, which might cause '\n                              'errors if the output need to interact with GPU '\n                              'data in subsequent operations')\n                logger.warning('Cannot convert output to GPU due to '\n                               'CUDA OOM, the output is on CPU now.')\n\n                return func(*cpu_args, **cpu_kwargs)\n            else:\n                # may still get CUDA OOM error\n                return func(*args, **kwargs)\n\n        return wrapped\n\n\n# To use AvoidOOM as a decorator\nAvoidCUDAOOM = AvoidOOM()\n"
  },
  {
    "path": "mmdet/utils/misc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport glob\nimport os\nimport os.path as osp\nimport warnings\nfrom typing import Union\n\nfrom mmengine.config import Config, ConfigDict\nfrom mmengine.logging import print_log\n\n\ndef find_latest_checkpoint(path, suffix='pth'):\n    \"\"\"Find the latest checkpoint from the working directory.\n\n    Args:\n        path(str): The path to find checkpoints.\n        suffix(str): File extension.\n            Defaults to pth.\n\n    Returns:\n        latest_path(str | None): File path of the latest checkpoint.\n    References:\n        .. [1] https://github.com/microsoft/SoftTeacher\n                  /blob/main/ssod/utils/patch.py\n    \"\"\"\n    if not osp.exists(path):\n        warnings.warn('The path of checkpoints does not exist.')\n        return None\n    if osp.exists(osp.join(path, f'latest.{suffix}')):\n        return osp.join(path, f'latest.{suffix}')\n\n    checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))\n    if len(checkpoints) == 0:\n        warnings.warn('There are no checkpoints in the path.')\n        return None\n    latest = -1\n    latest_path = None\n    for checkpoint in checkpoints:\n        count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])\n        if count > latest:\n            latest = count\n            latest_path = checkpoint\n    return latest_path\n\n\ndef update_data_root(cfg, logger=None):\n    \"\"\"Update data root according to env MMDET_DATASETS.\n\n    If set env MMDET_DATASETS, update cfg.data_root according to\n    MMDET_DATASETS. Otherwise, using cfg.data_root as default.\n\n    Args:\n        cfg (:obj:`Config`): The model config need to modify\n        logger (logging.Logger | str | None): the way to print msg\n    \"\"\"\n    assert isinstance(cfg, Config), \\\n        f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'\n\n    if 'MMDET_DATASETS' in os.environ:\n        dst_root = os.environ['MMDET_DATASETS']\n        print_log(f'MMDET_DATASETS has been set to be {dst_root}.'\n                  f'Using {dst_root} as data root.')\n    else:\n        return\n\n    assert isinstance(cfg, Config), \\\n        f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'\n\n    def update(cfg, src_str, dst_str):\n        for k, v in cfg.items():\n            if isinstance(v, ConfigDict):\n                update(cfg[k], src_str, dst_str)\n            if isinstance(v, str) and src_str in v:\n                cfg[k] = v.replace(src_str, dst_str)\n\n    update(cfg.data, cfg.data_root, dst_root)\n    cfg.data_root = dst_root\n\n\ndef get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:\n    \"\"\"Get the test dataset pipeline from entire config.\n\n    Args:\n        cfg (str or :obj:`ConfigDict`): the entire config. Can be a config\n            file or a ``ConfigDict``.\n\n    Returns:\n        :obj:`ConfigDict`: the config of test dataset.\n    \"\"\"\n    if isinstance(cfg, str):\n        cfg = Config.fromfile(cfg)\n\n    def _get_test_pipeline_cfg(dataset_cfg):\n        if 'pipeline' in dataset_cfg:\n            return dataset_cfg.pipeline\n        # handle dataset wrapper\n        elif 'dataset' in dataset_cfg:\n            return _get_test_pipeline_cfg(dataset_cfg.dataset)\n        # handle dataset wrappers like ConcatDataset\n        elif 'datasets' in dataset_cfg:\n            return _get_test_pipeline_cfg(dataset_cfg.datasets[0])\n\n        raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')\n\n    return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)\n"
  },
  {
    "path": "mmdet/utils/profiling.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport contextlib\nimport sys\nimport time\n\nimport torch\n\nif sys.version_info >= (3, 7):\n\n    @contextlib.contextmanager\n    def profile_time(trace_name,\n                     name,\n                     enabled=True,\n                     stream=None,\n                     end_stream=None):\n        \"\"\"Print time spent by CPU and GPU.\n\n        Useful as a temporary context manager to find sweet spots of code\n        suitable for async implementation.\n        \"\"\"\n        if (not enabled) or not torch.cuda.is_available():\n            yield\n            return\n        stream = stream if stream else torch.cuda.current_stream()\n        end_stream = end_stream if end_stream else stream\n        start = torch.cuda.Event(enable_timing=True)\n        end = torch.cuda.Event(enable_timing=True)\n        stream.record_event(start)\n        try:\n            cpu_start = time.monotonic()\n            yield\n        finally:\n            cpu_end = time.monotonic()\n            end_stream.record_event(end)\n            end.synchronize()\n            cpu_time = (cpu_end - cpu_start) * 1000\n            gpu_time = start.elapsed_time(end)\n            msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '\n            msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'\n            print(msg, end_stream)\n"
  },
  {
    "path": "mmdet/utils/replace_cfg_vals.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport re\n\nfrom mmengine.config import Config\n\n\ndef replace_cfg_vals(ori_cfg):\n    \"\"\"Replace the string \"${key}\" with the corresponding value.\n\n    Replace the \"${key}\" with the value of ori_cfg.key in the config. And\n    support replacing the chained ${key}. Such as, replace \"${key0.key1}\"\n    with the value of cfg.key0.key1. Code is modified from `vars.py\n    < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_  # noqa: E501\n\n    Args:\n        ori_cfg (mmengine.config.Config):\n            The origin config with \"${key}\" generated from a file.\n\n    Returns:\n        updated_cfg [mmengine.config.Config]:\n            The config with \"${key}\" replaced by the corresponding value.\n    \"\"\"\n\n    def get_value(cfg, key):\n        for k in key.split('.'):\n            cfg = cfg[k]\n        return cfg\n\n    def replace_value(cfg):\n        if isinstance(cfg, dict):\n            return {key: replace_value(value) for key, value in cfg.items()}\n        elif isinstance(cfg, list):\n            return [replace_value(item) for item in cfg]\n        elif isinstance(cfg, tuple):\n            return tuple([replace_value(item) for item in cfg])\n        elif isinstance(cfg, str):\n            # the format of string cfg may be:\n            # 1) \"${key}\", which will be replaced with cfg.key directly\n            # 2) \"xxx${key}xxx\" or \"xxx${key1}xxx${key2}xxx\",\n            # which will be replaced with the string of the cfg.key\n            keys = pattern_key.findall(cfg)\n            values = [get_value(ori_cfg, key[2:-1]) for key in keys]\n            if len(keys) == 1 and keys[0] == cfg:\n                # the format of string cfg is \"${key}\"\n                cfg = values[0]\n            else:\n                for key, value in zip(keys, values):\n                    # the format of string cfg is\n                    # \"xxx${key}xxx\" or \"xxx${key1}xxx${key2}xxx\"\n                    assert not isinstance(value, (dict, list, tuple)), \\\n                        f'for the format of string cfg is ' \\\n                        f\"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', \" \\\n                        f\"the type of the value of '${key}' \" \\\n                        f'can not be dict, list, or tuple' \\\n                        f'but you input {type(value)} in {cfg}'\n                    cfg = cfg.replace(key, str(value))\n            return cfg\n        else:\n            return cfg\n\n    # the pattern of string \"${key}\"\n    pattern_key = re.compile(r'\\$\\{[a-zA-Z\\d_.]*\\}')\n    # the type of ori_cfg._cfg_dict is mmengine.config.ConfigDict\n    updated_cfg = Config(\n        replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)\n    # replace the model with model_wrapper\n    if updated_cfg.get('model_wrapper', None) is not None:\n        updated_cfg.model = updated_cfg.model_wrapper\n        updated_cfg.pop('model_wrapper')\n    return updated_cfg\n"
  },
  {
    "path": "mmdet/utils/setup_env.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport datetime\nimport os\nimport platform\nimport warnings\n\nimport cv2\nimport torch.multiprocessing as mp\nfrom mmengine import DefaultScope\n\n\ndef setup_multi_processes(cfg):\n    \"\"\"Setup multi-processing environment variables.\"\"\"\n    # set multi-process start method as `fork` to speed up the training\n    if platform.system() != 'Windows':\n        mp_start_method = cfg.get('mp_start_method', 'fork')\n        current_method = mp.get_start_method(allow_none=True)\n        if current_method is not None and current_method != mp_start_method:\n            warnings.warn(\n                f'Multi-processing start method `{mp_start_method}` is '\n                f'different from the previous setting `{current_method}`.'\n                f'It will be force set to `{mp_start_method}`. You can change '\n                f'this behavior by changing `mp_start_method` in your config.')\n        mp.set_start_method(mp_start_method, force=True)\n\n    # disable opencv multithreading to avoid system being overloaded\n    opencv_num_threads = cfg.get('opencv_num_threads', 0)\n    cv2.setNumThreads(opencv_num_threads)\n\n    # setup OMP threads\n    # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py  # noqa\n    workers_per_gpu = cfg.data.get('workers_per_gpu', 1)\n    if 'train_dataloader' in cfg.data:\n        workers_per_gpu = \\\n            max(cfg.data.train_dataloader.get('workers_per_gpu', 1),\n                workers_per_gpu)\n\n    if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1:\n        omp_num_threads = 1\n        warnings.warn(\n            f'Setting OMP_NUM_THREADS environment variable for each process '\n            f'to be {omp_num_threads} in default, to avoid your system being '\n            f'overloaded, please further tune the variable for optimal '\n            f'performance in your application as needed.')\n        os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)\n\n    # setup MKL threads\n    if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1:\n        mkl_num_threads = 1\n        warnings.warn(\n            f'Setting MKL_NUM_THREADS environment variable for each process '\n            f'to be {mkl_num_threads} in default, to avoid your system being '\n            f'overloaded, please further tune the variable for optimal '\n            f'performance in your application as needed.')\n        os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)\n\n\ndef register_all_modules(init_default_scope: bool = True) -> None:\n    \"\"\"Register all modules in mmdet into the registries.\n\n    Args:\n        init_default_scope (bool): Whether initialize the mmdet default scope.\n            When `init_default_scope=True`, the global default scope will be\n            set to `mmdet`, and all registries will build modules from mmdet's\n            registry node. To understand more about the registry, please refer\n            to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md\n            Defaults to True.\n    \"\"\"  # noqa\n    import mmdet.datasets  # noqa: F401,F403\n    import mmdet.engine  # noqa: F401,F403\n    import mmdet.evaluation  # noqa: F401,F403\n    import mmdet.models  # noqa: F401,F403\n    import mmdet.visualization  # noqa: F401,F403\n\n    if init_default_scope:\n        never_created = DefaultScope.get_current_instance() is None \\\n                        or not DefaultScope.check_instance_created('mmdet')\n        if never_created:\n            DefaultScope.get_instance('mmdet', scope_name='mmdet')\n            return\n        current_scope = DefaultScope.get_current_instance()\n        if current_scope.scope_name != 'mmdet':\n            warnings.warn('The current default scope '\n                          f'\"{current_scope.scope_name}\" is not \"mmdet\", '\n                          '`register_all_modules` will force the current'\n                          'default scope to be \"mmdet\". If this is not '\n                          'expected, please set `init_default_scope=False`.')\n            # avoid name conflict\n            new_instance_name = f'mmdet-{datetime.datetime.now()}'\n            DefaultScope.get_instance(new_instance_name, scope_name='mmdet')\n"
  },
  {
    "path": "mmdet/utils/split_batch.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef split_batch(img, img_metas, kwargs):\n    \"\"\"Split data_batch by tags.\n\n    Code is modified from\n    <https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/structure_utils.py> # noqa: E501\n\n    Args:\n        img (Tensor): of shape (N, C, H, W) encoding input images.\n            Typically these should be mean centered and std scaled.\n        img_metas (list[dict]): List of image info dict where each dict\n            has: 'img_shape', 'scale_factor', 'flip', and may also contain\n            'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n            For details on the values of these keys, see\n            :class:`mmdet.datasets.pipelines.Collect`.\n        kwargs (dict): Specific to concrete implementation.\n\n    Returns:\n        data_groups (dict): a dict that data_batch splited by tags,\n            such as 'sup', 'unsup_teacher', and 'unsup_student'.\n    \"\"\"\n\n    # only stack img in the batch\n    def fuse_list(obj_list, obj):\n        return torch.stack(obj_list) if isinstance(obj,\n                                                   torch.Tensor) else obj_list\n\n    # select data with tag from data_batch\n    def select_group(data_batch, current_tag):\n        group_flag = [tag == current_tag for tag in data_batch['tag']]\n        return {\n            k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v)\n            for k, v in data_batch.items()\n        }\n\n    kwargs.update({'img': img, 'img_metas': img_metas})\n    kwargs.update({'tag': [meta['tag'] for meta in img_metas]})\n    tags = list(set(kwargs['tag']))\n    data_groups = {tag: select_group(kwargs, tag) for tag in tags}\n    for tag, group in data_groups.items():\n        group.pop('tag')\n    return data_groups\n"
  },
  {
    "path": "mmdet/utils/typing_utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Collecting some commonly used type hint in mmdetection.\"\"\"\nfrom typing import List, Optional, Sequence, Tuple, Union\n\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData, PixelData\n\n# TODO: Need to avoid circular import with assigner and sampler\n# Type hint of config data\nConfigType = Union[ConfigDict, dict]\nOptConfigType = Optional[ConfigType]\n# Type hint of one or more config data\nMultiConfig = Union[ConfigType, List[ConfigType]]\nOptMultiConfig = Optional[MultiConfig]\n\nInstanceList = List[InstanceData]\nOptInstanceList = Optional[InstanceList]\n\nPixelList = List[PixelData]\nOptPixelList = Optional[PixelList]\n\nRangeType = Sequence[Tuple[int, int]]\n"
  },
  {
    "path": "mmdet/utils/util_mixins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"This module defines the :class:`NiceRepr` mixin class, which defines a\n``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``\nmethod, which you must define. This means you only have to overload one\nfunction instead of two.  Furthermore, if the object defines a ``__len__``\nmethod, then the ``__nice__`` method defaults to something sensible, otherwise\nit is treated as abstract and raises ``NotImplementedError``.\n\nTo use simply have your object inherit from :class:`NiceRepr`\n(multi-inheritance should be ok).\n\nThis code was copied from the ubelt library: https://github.com/Erotemic/ubelt\n\nExample:\n    >>> # Objects that define __nice__ have a default __str__ and __repr__\n    >>> class Student(NiceRepr):\n    ...    def __init__(self, name):\n    ...        self.name = name\n    ...    def __nice__(self):\n    ...        return self.name\n    >>> s1 = Student('Alice')\n    >>> s2 = Student('Bob')\n    >>> print(f's1 = {s1}')\n    >>> print(f's2 = {s2}')\n    s1 = <Student(Alice)>\n    s2 = <Student(Bob)>\n\nExample:\n    >>> # Objects that define __len__ have a default __nice__\n    >>> class Group(NiceRepr):\n    ...    def __init__(self, data):\n    ...        self.data = data\n    ...    def __len__(self):\n    ...        return len(self.data)\n    >>> g = Group([1, 2, 3])\n    >>> print(f'g = {g}')\n    g = <Group(3)>\n\"\"\"\nimport warnings\n\n\nclass NiceRepr:\n    \"\"\"Inherit from this class and define ``__nice__`` to \"nicely\" print your\n    objects.\n\n    Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n    Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n    If the inheriting class has a ``__len__``, method then the default\n    ``__nice__`` method will return its length.\n\n    Example:\n        >>> class Foo(NiceRepr):\n        ...    def __nice__(self):\n        ...        return 'info'\n        >>> foo = Foo()\n        >>> assert str(foo) == '<Foo(info)>'\n        >>> assert repr(foo).startswith('<Foo(info) at ')\n\n    Example:\n        >>> class Bar(NiceRepr):\n        ...    pass\n        >>> bar = Bar()\n        >>> import pytest\n        >>> with pytest.warns(None) as record:\n        >>>     assert 'object at' in str(bar)\n        >>>     assert 'object at' in repr(bar)\n\n    Example:\n        >>> class Baz(NiceRepr):\n        ...    def __len__(self):\n        ...        return 5\n        >>> baz = Baz()\n        >>> assert str(baz) == '<Baz(5)>'\n    \"\"\"\n\n    def __nice__(self):\n        \"\"\"str: a \"nice\" summary string describing this module\"\"\"\n        if hasattr(self, '__len__'):\n            # It is a common pattern for objects to use __len__ in __nice__\n            # As a convenience we define a default __nice__ for these objects\n            return str(len(self))\n        else:\n            # In all other cases force the subclass to overload __nice__\n            raise NotImplementedError(\n                f'Define the __nice__ method for {self.__class__!r}')\n\n    def __repr__(self):\n        \"\"\"str: the string of the module\"\"\"\n        try:\n            nice = self.__nice__()\n            classname = self.__class__.__name__\n            return f'<{classname}({nice}) at {hex(id(self))}>'\n        except NotImplementedError as ex:\n            warnings.warn(str(ex), category=RuntimeWarning)\n            return object.__repr__(self)\n\n    def __str__(self):\n        \"\"\"str: the string of the module\"\"\"\n        try:\n            classname = self.__class__.__name__\n            nice = self.__nice__()\n            return f'<{classname}({nice})>'\n        except NotImplementedError as ex:\n            warnings.warn(str(ex), category=RuntimeWarning)\n            return object.__repr__(self)\n"
  },
  {
    "path": "mmdet/utils/util_random.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Helpers for random number generators.\"\"\"\nimport numpy as np\n\n\ndef ensure_rng(rng=None):\n    \"\"\"Coerces input into a random number generator.\n\n    If the input is None, then a global random state is returned.\n\n    If the input is a numeric value, then that is used as a seed to construct a\n    random state. Otherwise the input is returned as-is.\n\n    Adapted from [1]_.\n\n    Args:\n        rng (int | numpy.random.RandomState | None):\n            if None, then defaults to the global rng. Otherwise this can be an\n            integer or a RandomState class\n    Returns:\n        (numpy.random.RandomState) : rng -\n            a numpy random number generator\n\n    References:\n        .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270  # noqa: E501\n    \"\"\"\n\n    if rng is None:\n        rng = np.random.mtrand._rand\n    elif isinstance(rng, int):\n        rng = np.random.RandomState(rng)\n    else:\n        rng = rng\n    return rng\n"
  },
  {
    "path": "mmdet/version.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n__version__ = '3.0.0rc6'\nshort_version = __version__\n\n\ndef parse_version_info(version_str):\n    \"\"\"Parse a version string into a tuple.\n\n    Args:\n        version_str (str): The version string.\n    Returns:\n        tuple[int | str]: The version info, e.g., \"1.3.0\" is parsed into\n            (1, 3, 0), and \"2.0.0rc1\" is parsed into (2, 0, 0, 'rc1').\n    \"\"\"\n    version_info = []\n    for x in version_str.split('.'):\n        if x.isdigit():\n            version_info.append(int(x))\n        elif x.find('rc') != -1:\n            patch_version = x.split('rc')\n            version_info.append(int(patch_version[0]))\n            version_info.append(f'rc{patch_version[1]}')\n    return tuple(version_info)\n\n\nversion_info = parse_version_info(__version__)\n"
  },
  {
    "path": "mmdet/visualization/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .local_visualizer import DetLocalVisualizer\nfrom .palette import get_palette, jitter_color, palette_val\n\n__all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color']\n"
  },
  {
    "path": "mmdet/visualization/local_visualizer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmengine.dist import master_only\nfrom mmengine.structures import InstanceData, PixelData\nfrom mmengine.visualization import Visualizer\n\nfrom ..evaluation import INSTANCE_OFFSET\nfrom ..registry import VISUALIZERS\nfrom ..structures import DetDataSample\nfrom ..structures.mask import BitmapMasks, PolygonMasks, bitmap_to_polygon\nfrom .palette import _get_adaptive_scales, get_palette, jitter_color\n\n\n@VISUALIZERS.register_module()\nclass DetLocalVisualizer(Visualizer):\n    \"\"\"MMDetection Local Visualizer.\n\n    Args:\n        name (str): Name of the instance. Defaults to 'visualizer'.\n        image (np.ndarray, optional): the origin image to draw. The format\n            should be RGB. Defaults to None.\n        vis_backends (list, optional): Visual backend config list.\n            Defaults to None.\n        save_dir (str, optional): Save file dir for all storage backends.\n            If it is None, the backend storage will not save any data.\n        bbox_color (str, tuple(int), optional): Color of bbox lines.\n            The tuple of color should be in BGR order. Defaults to None.\n        text_color (str, tuple(int), optional): Color of texts.\n            The tuple of color should be in BGR order.\n            Defaults to (200, 200, 200).\n        mask_color (str, tuple(int), optional): Color of masks.\n            The tuple of color should be in BGR order.\n            Defaults to None.\n        line_width (int, float): The linewidth of lines.\n            Defaults to 3.\n        alpha (int, float): The transparency of bboxes or mask.\n            Defaults to 0.8.\n\n    Examples:\n        >>> import numpy as np\n        >>> import torch\n        >>> from mmengine.structures import InstanceData\n        >>> from mmdet.structures import DetDataSample\n        >>> from mmdet.visualization import DetLocalVisualizer\n\n        >>> det_local_visualizer = DetLocalVisualizer()\n        >>> image = np.random.randint(0, 256,\n        ...                     size=(10, 12, 3)).astype('uint8')\n        >>> gt_instances = InstanceData()\n        >>> gt_instances.bboxes = torch.Tensor([[1, 2, 2, 5]])\n        >>> gt_instances.labels = torch.randint(0, 2, (1,))\n        >>> gt_det_data_sample = DetDataSample()\n        >>> gt_det_data_sample.gt_instances = gt_instances\n        >>> det_local_visualizer.add_datasample('image', image,\n        ...                         gt_det_data_sample)\n        >>> det_local_visualizer.add_datasample(\n        ...                       'image', image, gt_det_data_sample,\n        ...                        out_file='out_file.jpg')\n        >>> det_local_visualizer.add_datasample(\n        ...                        'image', image, gt_det_data_sample,\n        ...                         show=True)\n        >>> pred_instances = InstanceData()\n        >>> pred_instances.bboxes = torch.Tensor([[2, 4, 4, 8]])\n        >>> pred_instances.labels = torch.randint(0, 2, (1,))\n        >>> pred_det_data_sample = DetDataSample()\n        >>> pred_det_data_sample.pred_instances = pred_instances\n        >>> det_local_visualizer.add_datasample('image', image,\n        ...                         gt_det_data_sample,\n        ...                         pred_det_data_sample)\n    \"\"\"\n\n    def __init__(self,\n                 name: str = 'visualizer',\n                 image: Optional[np.ndarray] = None,\n                 vis_backends: Optional[Dict] = None,\n                 save_dir: Optional[str] = None,\n                 bbox_color: Optional[Union[str, Tuple[int]]] = None,\n                 text_color: Optional[Union[str,\n                                            Tuple[int]]] = (200, 200, 200),\n                 mask_color: Optional[Union[str, Tuple[int]]] = None,\n                 line_width: Union[int, float] = 3,\n                 alpha: float = 0.8) -> None:\n        super().__init__(\n            name=name,\n            image=image,\n            vis_backends=vis_backends,\n            save_dir=save_dir)\n        self.bbox_color = bbox_color\n        self.text_color = text_color\n        self.mask_color = mask_color\n        self.line_width = line_width\n        self.alpha = alpha\n        # Set default value. When calling\n        # `DetLocalVisualizer().dataset_meta=xxx`,\n        # it will override the default value.\n        self.dataset_meta = {}\n\n    def _draw_instances(self, image: np.ndarray, instances: ['InstanceData'],\n                        classes: Optional[List[str]],\n                        palette: Optional[List[tuple]]) -> np.ndarray:\n        \"\"\"Draw instances of GT or prediction.\n\n        Args:\n            image (np.ndarray): The image to draw.\n            instances (:obj:`InstanceData`): Data structure for\n                instance-level annotations or predictions.\n            classes (List[str], optional): Category information.\n            palette (List[tuple], optional): Palette information\n                corresponding to the category.\n\n        Returns:\n            np.ndarray: the drawn image which channel is RGB.\n        \"\"\"\n        self.set_image(image)\n\n        if 'bboxes' in instances:\n            bboxes = instances.bboxes\n            labels = instances.labels\n\n            max_label = int(max(labels) if len(labels) > 0 else 0)\n            text_palette = get_palette(self.text_color, max_label + 1)\n            text_colors = [text_palette[label] for label in labels]\n\n            bbox_color = palette if self.bbox_color is None \\\n                else self.bbox_color\n            bbox_palette = get_palette(bbox_color, max_label + 1)\n            colors = [bbox_palette[label] for label in labels]\n            self.draw_bboxes(\n                bboxes,\n                edge_colors=colors,\n                alpha=self.alpha,\n                line_widths=self.line_width)\n\n            positions = bboxes[:, :2] + self.line_width\n            areas = (bboxes[:, 3] - bboxes[:, 1]) * (\n                bboxes[:, 2] - bboxes[:, 0])\n            scales = _get_adaptive_scales(areas)\n\n            for i, (pos, label) in enumerate(zip(positions, labels)):\n                label_text = classes[\n                    label] if classes is not None else f'class {label}'\n                if 'scores' in instances:\n                    score = round(float(instances.scores[i]) * 100, 1)\n                    label_text += f': {score}'\n\n                self.draw_texts(\n                    label_text,\n                    pos,\n                    colors=text_colors[i],\n                    font_sizes=int(13 * scales[i]),\n                    bboxes=[{\n                        'facecolor': 'black',\n                        'alpha': 0.8,\n                        'pad': 0.7,\n                        'edgecolor': 'none'\n                    }])\n\n        if 'masks' in instances:\n            labels = instances.labels\n            masks = instances.masks\n            if isinstance(masks, torch.Tensor):\n                masks = masks.numpy()\n            elif isinstance(masks, (PolygonMasks, BitmapMasks)):\n                masks = masks.to_ndarray()\n\n            masks = masks.astype(bool)\n\n            max_label = int(max(labels) if len(labels) > 0 else 0)\n            mask_color = palette if self.mask_color is None \\\n                else self.mask_color\n            mask_palette = get_palette(mask_color, max_label + 1)\n            colors = [jitter_color(mask_palette[label]) for label in labels]\n            text_palette = get_palette(self.text_color, max_label + 1)\n            text_colors = [text_palette[label] for label in labels]\n\n            polygons = []\n            for i, mask in enumerate(masks):\n                contours, _ = bitmap_to_polygon(mask)\n                polygons.extend(contours)\n            self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha)\n            self.draw_binary_masks(masks, colors=colors, alphas=self.alpha)\n\n            if len(labels) > 0 and \\\n                    ('bboxes' not in instances or\n                     instances.bboxes.sum() == 0):\n                # instances.bboxes.sum()==0 represent dummy bboxes.\n                # A typical example of SOLO does not exist bbox branch.\n                areas = []\n                positions = []\n                for mask in masks:\n                    _, _, stats, centroids = cv2.connectedComponentsWithStats(\n                        mask.astype(np.uint8), connectivity=8)\n                    if stats.shape[0] > 1:\n                        largest_id = np.argmax(stats[1:, -1]) + 1\n                        positions.append(centroids[largest_id])\n                        areas.append(stats[largest_id, -1])\n                areas = np.stack(areas, axis=0)\n                scales = _get_adaptive_scales(areas)\n\n                for i, (pos, label) in enumerate(zip(positions, labels)):\n                    label_text = classes[\n                        label] if classes is not None else f'class {label}'\n                    if 'scores' in instances:\n                        score = round(float(instances.scores[i]) * 100, 1)\n                        label_text += f': {score}'\n\n                    self.draw_texts(\n                        label_text,\n                        pos,\n                        colors=text_colors[i],\n                        font_sizes=int(13 * scales[i]),\n                        horizontal_alignments='center',\n                        bboxes=[{\n                            'facecolor': 'black',\n                            'alpha': 0.8,\n                            'pad': 0.7,\n                            'edgecolor': 'none'\n                        }])\n        return self.get_image()\n\n    def _draw_panoptic_seg(self, image: np.ndarray,\n                           panoptic_seg: ['PixelData'],\n                           classes: Optional[List[str]]) -> np.ndarray:\n        \"\"\"Draw panoptic seg of GT or prediction.\n\n        Args:\n            image (np.ndarray): The image to draw.\n            panoptic_seg (:obj:`PixelData`): Data structure for\n                pixel-level annotations or predictions.\n            classes (List[str], optional): Category information.\n\n        Returns:\n            np.ndarray: the drawn image which channel is RGB.\n        \"\"\"\n        # TODO: Is there a way to bypass？\n        num_classes = len(classes)\n\n        panoptic_seg = panoptic_seg.sem_seg[0]\n        ids = np.unique(panoptic_seg)[::-1]\n        legal_indices = ids != num_classes  # for VOID label\n        ids = ids[legal_indices]\n\n        labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n        segms = (panoptic_seg[None] == ids[:, None, None])\n\n        max_label = int(max(labels) if len(labels) > 0 else 0)\n        mask_palette = get_palette(self.mask_color, max_label + 1)\n        colors = [mask_palette[label] for label in labels]\n\n        self.set_image(image)\n\n        # draw segm\n        polygons = []\n        for i, mask in enumerate(segms):\n            contours, _ = bitmap_to_polygon(mask)\n            polygons.extend(contours)\n        self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha)\n        self.draw_binary_masks(segms, colors=colors, alphas=self.alpha)\n\n        # draw label\n        areas = []\n        positions = []\n        for mask in segms:\n            _, _, stats, centroids = cv2.connectedComponentsWithStats(\n                mask.astype(np.uint8), connectivity=8)\n            max_id = np.argmax(stats[1:, -1]) + 1\n            positions.append(centroids[max_id])\n            areas.append(stats[max_id, -1])\n        areas = np.stack(areas, axis=0)\n        scales = _get_adaptive_scales(areas)\n\n        text_palette = get_palette(self.text_color, max_label + 1)\n        text_colors = [text_palette[label] for label in labels]\n\n        for i, (pos, label) in enumerate(zip(positions, labels)):\n            label_text = classes[label]\n\n            self.draw_texts(\n                label_text,\n                pos,\n                colors=text_colors[i],\n                font_sizes=int(13 * scales[i]),\n                bboxes=[{\n                    'facecolor': 'black',\n                    'alpha': 0.8,\n                    'pad': 0.7,\n                    'edgecolor': 'none'\n                }],\n                horizontal_alignments='center')\n        return self.get_image()\n\n    @master_only\n    def add_datasample(\n            self,\n            name: str,\n            image: np.ndarray,\n            data_sample: Optional['DetDataSample'] = None,\n            draw_gt: bool = True,\n            draw_pred: bool = True,\n            show: bool = False,\n            wait_time: float = 0,\n            # TODO: Supported in mmengine's Viusalizer.\n            out_file: Optional[str] = None,\n            pred_score_thr: float = 0.3,\n            step: int = 0) -> None:\n        \"\"\"Draw datasample and save to all backends.\n\n        - If GT and prediction are plotted at the same time, they are\n        displayed in a stitched image where the left image is the\n        ground truth and the right image is the prediction.\n        - If ``show`` is True, all storage backends are ignored, and\n        the images will be displayed in a local window.\n        - If ``out_file`` is specified, the drawn image will be\n        saved to ``out_file``. t is usually used when the display\n        is not available.\n\n        Args:\n            name (str): The image identifier.\n            image (np.ndarray): The image to draw.\n            data_sample (:obj:`DetDataSample`, optional): A data\n                sample that contain annotations and predictions.\n                Defaults to None.\n            draw_gt (bool): Whether to draw GT DetDataSample. Default to True.\n            draw_pred (bool): Whether to draw Prediction DetDataSample.\n                Defaults to True.\n            show (bool): Whether to display the drawn image. Default to False.\n            wait_time (float): The interval of show (s). Defaults to 0.\n            out_file (str): Path to output file. Defaults to None.\n            pred_score_thr (float): The threshold to visualize the bboxes\n                and masks. Defaults to 0.3.\n            step (int): Global step value to record. Defaults to 0.\n        \"\"\"\n        image = image.clip(0, 255).astype(np.uint8)\n        classes = self.dataset_meta.get('classes', None)\n        palette = self.dataset_meta.get('palette', None)\n\n        gt_img_data = None\n        pred_img_data = None\n\n        if data_sample is not None:\n            data_sample = data_sample.cpu()\n\n        if draw_gt and data_sample is not None:\n            gt_img_data = image\n            if 'gt_instances' in data_sample:\n                gt_img_data = self._draw_instances(image,\n                                                   data_sample.gt_instances,\n                                                   classes, palette)\n\n            if 'gt_panoptic_seg' in data_sample:\n                assert classes is not None, 'class information is ' \\\n                                            'not provided when ' \\\n                                            'visualizing panoptic ' \\\n                                            'segmentation results.'\n                gt_img_data = self._draw_panoptic_seg(\n                    gt_img_data, data_sample.gt_panoptic_seg, classes)\n\n        if draw_pred and data_sample is not None:\n            pred_img_data = image\n            if 'pred_instances' in data_sample:\n                pred_instances = data_sample.pred_instances\n                pred_instances = pred_instances[\n                    pred_instances.scores > pred_score_thr]\n                pred_img_data = self._draw_instances(image, pred_instances,\n                                                     classes, palette)\n            if 'pred_panoptic_seg' in data_sample:\n                assert classes is not None, 'class information is ' \\\n                                            'not provided when ' \\\n                                            'visualizing panoptic ' \\\n                                            'segmentation results.'\n                pred_img_data = self._draw_panoptic_seg(\n                    pred_img_data, data_sample.pred_panoptic_seg.numpy(),\n                    classes)\n\n        if gt_img_data is not None and pred_img_data is not None:\n            drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1)\n        elif gt_img_data is not None:\n            drawn_img = gt_img_data\n        elif pred_img_data is not None:\n            drawn_img = pred_img_data\n        else:\n            # Display the original image directly if nothing is drawn.\n            drawn_img = image\n\n        # It is convenient for users to obtain the drawn image.\n        # For example, the user wants to obtain the drawn image and\n        # save it as a video during video inference.\n        self.set_image(drawn_img)\n\n        if show:\n            self.show(drawn_img, win_name=name, wait_time=wait_time)\n\n        if out_file is not None:\n            mmcv.imwrite(drawn_img[..., ::-1], out_file)\n        else:\n            self.add_image(name, drawn_img, step)\n"
  },
  {
    "path": "mmdet/visualization/palette.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Tuple, Union\n\nimport mmcv\nimport numpy as np\nfrom mmengine.utils import is_str\n\n\ndef palette_val(palette: List[tuple]) -> List[tuple]:\n    \"\"\"Convert palette to matplotlib palette.\n\n    Args:\n        palette (List[tuple]): A list of color tuples.\n\n    Returns:\n        List[tuple[float]]: A list of RGB matplotlib color tuples.\n    \"\"\"\n    new_palette = []\n    for color in palette:\n        color = [c / 255 for c in color]\n        new_palette.append(tuple(color))\n    return new_palette\n\n\ndef get_palette(palette: Union[List[tuple], str, tuple],\n                num_classes: int) -> List[Tuple[int]]:\n    \"\"\"Get palette from various inputs.\n\n    Args:\n        palette (list[tuple] | str | tuple): palette inputs.\n        num_classes (int): the number of classes.\n\n    Returns:\n        list[tuple[int]]: A list of color tuples.\n    \"\"\"\n    assert isinstance(num_classes, int)\n\n    if isinstance(palette, list):\n        dataset_palette = palette\n    elif isinstance(palette, tuple):\n        dataset_palette = [palette] * num_classes\n    elif palette == 'random' or palette is None:\n        state = np.random.get_state()\n        # random color\n        np.random.seed(42)\n        palette = np.random.randint(0, 256, size=(num_classes, 3))\n        np.random.set_state(state)\n        dataset_palette = [tuple(c) for c in palette]\n    elif palette == 'coco':\n        from mmdet.datasets import CocoDataset, CocoPanopticDataset\n        dataset_palette = CocoDataset.METAINFO['palette']\n        if len(dataset_palette) < num_classes:\n            dataset_palette = CocoPanopticDataset.METAINFO['palette']\n    elif palette == 'citys':\n        from mmdet.datasets import CityscapesDataset\n        dataset_palette = CityscapesDataset.METAINFO['palette']\n    elif palette == 'voc':\n        from mmdet.datasets import VOCDataset\n        dataset_palette = VOCDataset.METAINFO['palette']\n    elif is_str(palette):\n        dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes\n    else:\n        raise TypeError(f'Invalid type for palette: {type(palette)}')\n\n    assert len(dataset_palette) >= num_classes, \\\n        'The length of palette should not be less than `num_classes`.'\n    return dataset_palette\n\n\ndef _get_adaptive_scales(areas: np.ndarray,\n                         min_area: int = 800,\n                         max_area: int = 30000) -> np.ndarray:\n    \"\"\"Get adaptive scales according to areas.\n\n    The scale range is [0.5, 1.0]. When the area is less than\n    ``min_area``, the scale is 0.5 while the area is larger than\n    ``max_area``, the scale is 1.0.\n\n    Args:\n        areas (ndarray): The areas of bboxes or masks with the\n            shape of (n, ).\n        min_area (int): Lower bound areas for adaptive scales.\n            Defaults to 800.\n        max_area (int): Upper bound areas for adaptive scales.\n            Defaults to 30000.\n\n    Returns:\n        ndarray: The adaotive scales with the shape of (n, ).\n    \"\"\"\n    scales = 0.5 + (areas - min_area) / (max_area - min_area)\n    scales = np.clip(scales, 0.5, 1.0)\n    return scales\n\n\ndef jitter_color(color: tuple) -> tuple:\n    \"\"\"Randomly jitter the given color in order to better distinguish instances\n    with the same class.\n\n    Args:\n        color (tuple): The RGB color tuple. Each value is between [0, 255].\n\n    Returns:\n        tuple: The jittered color tuple.\n    \"\"\"\n    jitter = np.random.rand(3)\n    jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255\n    color = np.clip(jitter + color, 0, 255).astype(np.uint8)\n    return tuple(color)\n"
  },
  {
    "path": "model-index.yml",
    "content": "Import:\n  - configs/atss/metafile.yml\n  - configs/autoassign/metafile.yml\n  - configs/carafe/metafile.yml\n  - configs/cascade_rcnn/metafile.yml\n  - configs/cascade_rpn/metafile.yml\n  - configs/centernet/metafile.yml\n  - configs/centripetalnet/metafile.yml\n  - configs/cornernet/metafile.yml\n  - configs/condinst/metafile.yml\n  - configs/convnext/metafile.yml\n  - configs/dcn/metafile.yml\n  - configs/dcnv2/metafile.yml\n  - configs/ddod/metafile.yml\n  - configs/deformable_detr/metafile.yml\n  - configs/detectors/metafile.yml\n  - configs/detr/metafile.yml\n  - configs/double_heads/metafile.yml\n  - configs/dyhead/metafile.yml\n  - configs/dynamic_rcnn/metafile.yml\n  - configs/efficientnet/metafile.yml\n  - configs/empirical_attention/metafile.yml\n  - configs/faster_rcnn/metafile.yml\n  - configs/fcos/metafile.yml\n  - configs/foveabox/metafile.yml\n  - configs/fpg/metafile.yml\n  - configs/free_anchor/metafile.yml\n  - configs/fsaf/metafile.yml\n  - configs/gcnet/metafile.yml\n  - configs/gfl/metafile.yml\n  - configs/ghm/metafile.yml\n  - configs/gn/metafile.yml\n  - configs/gn+ws/metafile.yml\n  - configs/grid_rcnn/metafile.yml\n  - configs/groie/metafile.yml\n  - configs/guided_anchoring/metafile.yml\n  - configs/hrnet/metafile.yml\n  - configs/htc/metafile.yml\n  - configs/instaboost/metafile.yml\n  - configs/lad/metafile.yml\n  - configs/ld/metafile.yml\n  - configs/libra_rcnn/metafile.yml\n  - configs/mask2former/metafile.yml\n  - configs/mask_rcnn/metafile.yml\n  - configs/maskformer/metafile.yml\n  - configs/ms_rcnn/metafile.yml\n  - configs/nas_fcos/metafile.yml\n  - configs/nas_fpn/metafile.yml\n  - configs/openimages/metafile.yml\n  - configs/paa/metafile.yml\n  - configs/pafpn/metafile.yml\n  - configs/panoptic_fpn/metafile.yml\n  - configs/pvt/metafile.yml\n  - configs/pisa/metafile.yml\n  - configs/point_rend/metafile.yml\n  - configs/queryinst/metafile.yml\n  - configs/rtmdet/metafile.yml\n  - configs/regnet/metafile.yml\n  - configs/reppoints/metafile.yml\n  - configs/res2net/metafile.yml\n  - configs/resnest/metafile.yml\n  - configs/resnet_strikes_back/metafile.yml\n  - configs/retinanet/metafile.yml\n  - configs/rtmdet/metafile.yml\n  - configs/sabl/metafile.yml\n  - configs/scnet/metafile.yml\n  - configs/scratch/metafile.yml\n  - configs/seesaw_loss/metafile.yml\n  - configs/simple_copy_paste/metafile.yml\n  - configs/sparse_rcnn/metafile.yml\n  - configs/solo/metafile.yml\n  - configs/solov2/metafile.yml\n  - configs/ssd/metafile.yml\n  - configs/swin/metafile.yml\n  - configs/tridentnet/metafile.yml\n  - configs/tood/metafile.yml\n  - configs/vfnet/metafile.yml\n  - configs/yolact/metafile.yml\n  - configs/yolo/metafile.yml\n  - configs/yolof/metafile.yml\n  - configs/yolox/metafile.yml\n"
  },
  {
    "path": "projects/ConvNeXt-V2/README.md",
    "content": "# ConvNeXt-V2\n\n> [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808)\n\n## Abstract\n\nDriven by improved architectures and better representation learning frameworks, the field of visual recognition has enjoyed rapid modernization and performance boost in the early 2020s. For example, modern ConvNets, represented by ConvNeXt \\[52\\], have demonstrated strong performance in various scenarios. While these models were originally designed for supervised learning with ImageNet labels, they can also potentially benefit from self-supervised learning techniques such as masked autoencoders (MAE) . However, we found that simply combining these two approaches leads to subpar performance. In this paper, we propose a fully convolutional masked autoencoder framework and a new Global Response Normalization (GRN) layer that can be added to the ConvNeXt architecture to enhance inter-channel feature competition. This co-design of self-supervised learning techniques and architectural improvement results in a new model family called ConvNeXt V2, which significantly improves the performance of pure ConvNets on various recognition benchmarks, including ImageNet classification, COCO detection, and ADE20K segmentation. We also provide pre-trained ConvNeXt V2 models of various sizes, ranging from an efficient 3.7Mparameter Atto model with 76.7% top-1 accuracy on Im-ageNet, to a 650M Huge model that achieves a state-of-theart 88.9% accuracy using only public training data.\n\n<div align=center>\n<img src=\"https://user-images.githubusercontent.com/12907710/212588579-02d621d8-5796-4f0d-b4d2-758fe9c2f395.png\" width=\"50%\"/>\n</div>\n\n## Results and models\n\n|   Method   |   Backbone    | Pretrain | Lr schd | Augmentation | Mem (GB) | box AP | mask AP |                            Config                            |                                                                                                                                                                                        Download                                                                                                                                                                                         |\n| :--------: | :-----------: | :------: | :-----: | :----------: | :------: | :----: | :-----: | :----------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n| Mask R-CNN | ConvNeXt-V2-B |  FCMAE   |   3x    |     LSJ      |   22.5   |  52.9  |  46.4   | [config](./mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/convnextv2/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco_20230113_110947-757ee2dd.pth)  \\| [log](https://download.openmmlab.com/mmdetection/v3.0/convnextv2/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco_20230113_110947.log.json) |\n\n**Note**:\n\n- This is a pre-release version of ConvNeXt-V2 object detection. The official finetuning setting of ConvNeXt-V2 has not been released yet.\n- ConvNeXt backbone needs to install [MMClassification dev-1.x branch](https://github.com/open-mmlab/mmclassification/tree/dev-1.x) first, which has abundant backbones for downstream tasks.\n\n```shell\ngit clone -b dev-1.x https://github.com/open-mmlab/mmclassification.git\ncd mmclassification\npip install -U openmim && mim install -e .\n```\n\n## Citation\n\n```bibtex\n@article{Woo2023ConvNeXtV2,\n  title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders},\n  author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie},\n  year={2023},\n  journal={arXiv preprint arXiv:2301.00808},\n}\n```\n"
  },
  {
    "path": "projects/ConvNeXt-V2/configs/mask-rcnn_convnext-v2-b_fpn_lsj-3x-fcmae_coco.py",
    "content": "_base_ = [\n    'mmdet::_base_/models/mask-rcnn_r50_fpn.py',\n    'mmdet::_base_/datasets/coco_instance.py',\n    'mmdet::_base_/schedules/schedule_1x.py',\n    'mmdet::_base_/default_runtime.py'\n]\n\n# please install the mmclassification dev-1.x branch\n# import mmcls.models to trigger register_module in mmcls\ncustom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)\ncheckpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth'  # noqa\nimage_size = (1024, 1024)\n\nmodel = dict(\n    backbone=dict(\n        _delete_=True,\n        type='mmcls.ConvNeXt',\n        arch='base',\n        out_indices=[0, 1, 2, 3],\n        # TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}\n        drop_path_rate=0.4,\n        layer_scale_init_value=0.,  # disable layer scale when using GRN\n        gap_before_final_norm=False,\n        use_grn=True,  # V2 uses GRN\n        init_cfg=dict(\n            type='Pretrained', checkpoint=checkpoint_file,\n            prefix='backbone.')),\n    neck=dict(in_channels=[128, 256, 512, 1024]),\n    test_cfg=dict(\n        rpn=dict(nms=dict(type='nms')),  # TODO: does RPN use soft_nms?\n        rcnn=dict(nms=dict(type='soft_nms'))))\n\ntrain_pipeline = [\n    dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(\n        type='RandomResize',\n        scale=image_size,\n        ratio_range=(0.1, 2.0),\n        keep_ratio=True),\n    dict(\n        type='RandomCrop',\n        crop_type='absolute_range',\n        crop_size=image_size,\n        recompute_bbox=True,\n        allow_negative_crop=True),\n    dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\n\ntrain_dataloader = dict(\n    batch_size=4,  # total_batch_size 32 = 8 GPUS x 4 images\n    num_workers=8,\n    dataset=dict(pipeline=train_pipeline))\n\nmax_epochs = 36\ntrain_cfg = dict(max_epochs=max_epochs)\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,\n        end=1000),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=max_epochs,\n        by_epoch=True,\n        milestones=[27, 33],\n        gamma=0.1)\n]\n\n# Enable automatic-mixed-precision training with AmpOptimWrapper.\noptim_wrapper = dict(\n    type='AmpOptimWrapper',\n    constructor='LearningRateDecayOptimizerConstructor',\n    paramwise_cfg={\n        'decay_rate': 0.95,\n        'decay_type': 'layer_wise',  # TODO: sweep layer-wise lr decay?\n        'num_layers': 12\n    },\n    optimizer=dict(\n        _delete_=True,\n        type='AdamW',\n        lr=0.0001,\n        betas=(0.9, 0.999),\n        weight_decay=0.05,\n    ))\n\ndefault_hooks = dict(checkpoint=dict(max_keep_ckpts=1))\n"
  },
  {
    "path": "projects/Detic/README.md",
    "content": "# Detecting Twenty-thousand Classes using Image-level Supervision\n\n## Description\n\n**Detic**: A **Det**ector with **i**mage **c**lasses that can use image-level labels to easily train detectors.\n\n<p align=\"center\"> <img src='https://github.com/facebookresearch/Detic/blob/main/docs/teaser.jpeg?raw=true' align=\"center\" height=\"300px\"> </p>\n\n> [**Detecting Twenty-thousand Classes using Image-level Supervision**](http://arxiv.org/abs/2201.02605),\n> Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krähenbühl, Ishan Misra,\n> *ECCV 2022 ([arXiv 2201.02605](http://arxiv.org/abs/2201.02605))*\n\n## Usage\n\n<!-- For a typical model, this section should contain the commands for training and testing. You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. -->\n\n## Installation\n\nDetic requires to install CLIP.\n\n```shell\npip install git+https://github.com/openai/CLIP.git\n```\n\n### Demo\n\n#### Inference with existing dataset vocabulary embeddings\n\nFirst, go to the Detic project folder.\n\n```shell\ncd projects/Detic\n```\n\nThen, download the pre-computed CLIP embeddings from [dataset metainfo](https://github.com/facebookresearch/Detic/tree/main/datasets/metadata) to the `datasets/metadata` folder.\nThe CLIP embeddings will be loaded to the zero-shot classifier during inference.\nFor example, you can download LVIS's class name embeddings with the following command:\n\n```shell\nwget -P datasets/metadata https://raw.githubusercontent.com/facebookresearch/Detic/main/datasets/metadata/lvis_v1_clip_a%2Bcname.npy\n```\n\nYou can run demo like this:\n\n```shell\npython demo.py \\\n  ${IMAGE_PATH} \\\n  ${CONFIG_PATH} \\\n  ${MODEL_PATH} \\\n  --show \\\n  --score-thr 0.5 \\\n  --dataset lvis\n```\n\n![image](https://user-images.githubusercontent.com/12907710/213624759-f0a2ba0c-0f5c-4424-a350-5ba5349e5842.png)\n\n### Inference with custom vocabularies\n\n- Detic can detects any class given class names by using CLIP.\n\nYou can detect custom classes with `--class-name` command:\n\n```\npython demo.py \\\n  ${IMAGE_PATH} \\\n  ${CONFIG_PATH} \\\n  ${MODEL_PATH} \\\n  --show \\\n  --score-thr 0.3 \\\n  --class-name headphone webcam paper coffe\n```\n\n![image](https://user-images.githubusercontent.com/12907710/213624637-e9e8a313-9821-4782-a18a-4408c876852b.png)\n\nNote that `headphone`, `paper` and `coffe` (typo intended) are not LVIS classes. Despite the misspelled class name, Detic can produce a reasonable detection for `coffe`.\n\n## Results\n\nHere we only provide the Detic Swin-B model for the open vocabulary demo. Multi-dataset training and open-vocabulary testing will be supported in the future.\n\nTo find more variants, please visit the [official model zoo](https://github.com/facebookresearch/Detic/blob/main/docs/MODEL_ZOO.md).\n\n| Backbone |       Training data        |                                Config                                 |                                                                                      Download                                                                                      |\n| :------: | :------------------------: | :-------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|  Swin-B  | ImageNet-21K & LVIS & COCO | [config](./configs/detic_centernet2_swin-b_fpn_4x_lvis-coco-in21k.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/detic/detic_centernet2_swin-b_fpn_4x_lvis-coco-in21k/detic_centernet2_swin-b_fpn_4x_lvis-coco-in21k_20230120-0d301978.pth) |\n\n## Citation\n\nIf you find Detic is useful in your research or applications, please consider giving a star 🌟 to the [official repository](https://github.com/facebookresearch/Detic) and citing Detic by the following BibTeX entry.\n\n```BibTeX\n@inproceedings{zhou2022detecting,\n  title={Detecting Twenty-thousand Classes using Image-level Supervision},\n  author={Zhou, Xingyi and Girdhar, Rohit and Joulin, Armand and Kr{\\\"a}henb{\\\"u}hl, Philipp and Misra, Ishan},\n  booktitle={ECCV},\n  year={2022}\n}\n\n```\n\n## Checklist\n\n<!-- Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR.\nOpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone.\nNote that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed.\nA project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. -->\n\n- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.\n\n  - [x] Finish the code\n\n    <!-- The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmdet.registry.MODELS` and configurable via a config file. -->\n\n  - [x] Basic docstrings & proper citation\n\n    <!-- Each major object should contain a docstring, describing its functionality and arguments. If you have adapted the code from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) -->\n\n  - [x] Test-time correctness\n\n    <!-- If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. -->\n\n  - [x] A full README\n\n    <!-- As this template does. -->\n\n- [ ] Milestone 2: Indicates a successful model implementation.\n\n  - [ ] Training-time correctness\n\n    <!-- If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. -->\n\n- [ ] Milestone 3: Good to be a part of our core package!\n\n  - [ ] Type hints and docstrings\n\n    <!-- Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/mmdet/datasets/transforms/transforms.py#L41-L169) -->\n\n  - [ ] Unit tests\n\n    <!-- Unit tests for each module are required. [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/tests/test_datasets/test_transforms/test_transforms.py#L35-L88) -->\n\n  - [ ] Code polishing\n\n    <!-- Refactor your code according to reviewer's comment. -->\n\n  - [ ] Metafile.yml\n\n    <!-- It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/metafile.yml) -->\n\n- [ ] Move your modules into the core package following the codebase's file hierarchy structure.\n\n  <!-- In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/README.md) -->\n\n- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.\n"
  },
  {
    "path": "projects/Detic/configs/detic_centernet2_swin-b_fpn_4x_lvis-coco-in21k.py",
    "content": "_base_ = 'mmdet::common/lsj-200e_coco-detection.py'\n\ncustom_imports = dict(\n    imports=['projects.Detic.detic'], allow_failed_imports=False)\n\nimage_size = (1024, 1024)\nbatch_augments = [dict(type='BatchFixedSizePad', size=image_size)]\n\ncls_layer = dict(\n    type='ZeroShotClassifier',\n    zs_weight_path='rand',\n    zs_weight_dim=512,\n    use_bias=0.0,\n    norm_weight=True,\n    norm_temperature=50.0)\nreg_layer = [\n    dict(type='Linear', in_features=1024, out_features=1024),\n    dict(type='ReLU', inplace=True),\n    dict(type='Linear', in_features=1024, out_features=4)\n]\n\nnum_classes = 22047\n\nmodel = dict(\n    type='CascadeRCNN',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32,\n        batch_augments=batch_augments),\n    backbone=dict(\n        type='SwinTransformer',\n        embed_dims=128,\n        depths=[2, 2, 18, 2],\n        num_heads=[4, 8, 16, 32],\n        window_size=7,\n        mlp_ratio=4,\n        qkv_bias=True,\n        qk_scale=None,\n        drop_rate=0.,\n        attn_drop_rate=0.,\n        drop_path_rate=0.3,\n        patch_norm=True,\n        out_indices=(1, 2, 3),\n        with_cp=False),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024],\n        out_channels=256,\n        start_level=0,\n        add_extra_convs='on_output',\n        num_outs=5,\n        init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),\n        relu_before_extra_convs=True),\n    rpn_head=dict(\n        type='CenterNetRPNHead',\n        num_classes=1,\n        in_channels=256,\n        stacked_convs=4,\n        feat_channels=256,\n        strides=[8, 16, 32, 64, 128],\n        conv_bias=True,\n        norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n        loss_cls=dict(\n            type='GaussianFocalLoss',\n            pos_weight=0.25,\n            neg_weight=0.75,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n    ),\n    roi_head=dict(\n        type='DeticRoIHead',\n        num_stages=3,\n        stage_loss_weights=[1, 0.5, 0.25],\n        bbox_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(\n                type='RoIAlign',\n                output_size=7,\n                sampling_ratio=0,\n                use_torchvision=True),\n            out_channels=256,\n            featmap_strides=[8, 16, 32],\n            # approximately equal to\n            # canonical_box_size=224, canonical_level=4 in D2\n            finest_scale=112),\n        bbox_head=[\n            dict(\n                type='DeticBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=num_classes,\n                cls_predictor_cfg=cls_layer,\n                reg_predictor_cfg=reg_layer,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.1, 0.1, 0.2, 0.2]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss', use_sigmoid=True,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                               loss_weight=1.0)),\n            dict(\n                type='DeticBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=num_classes,\n                cls_predictor_cfg=cls_layer,\n                reg_predictor_cfg=reg_layer,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.05, 0.05, 0.1, 0.1]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss', use_sigmoid=True,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                               loss_weight=1.0)),\n            dict(\n                type='DeticBBoxHead',\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=num_classes,\n                cls_predictor_cfg=cls_layer,\n                reg_predictor_cfg=reg_layer,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.033, 0.033, 0.067, 0.067]),\n                reg_class_agnostic=True,\n                loss_cls=dict(\n                    type='CrossEntropyLoss', use_sigmoid=True,\n                    loss_weight=1.0),\n                loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n        ],\n        mask_roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),\n            out_channels=256,\n            featmap_strides=[8, 16, 32],\n            # approximately equal to\n            # canonical_box_size=224, canonical_level=4 in D2\n            finest_scale=112),\n        mask_head=dict(\n            type='FCNMaskHead',\n            num_convs=4,\n            in_channels=256,\n            conv_out_channels=256,\n            class_agnostic=True,\n            num_classes=num_classes,\n            loss_mask=dict(\n                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),\n    # model training and testing settings\n    train_cfg=dict(\n        rpn=dict(\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=0,\n            pos_weight=-1,\n            debug=False),\n        rpn_proposal=dict(\n            nms_pre=2000,\n            max_per_img=2000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0),\n        rcnn=[\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.6,\n                    neg_iou_thr=0.6,\n                    min_pos_iou=0.6,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                mask_size=28,\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.7,\n                    neg_iou_thr=0.7,\n                    min_pos_iou=0.7,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                mask_size=28,\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.8,\n                    neg_iou_thr=0.8,\n                    min_pos_iou=0.8,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=512,\n                    pos_fraction=0.25,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=True),\n                mask_size=28,\n                pos_weight=-1,\n                debug=False)\n        ]),\n    test_cfg=dict(\n        rpn=dict(\n            score_thr=0.0001,\n            nms_pre=1000,\n            max_per_img=256,\n            nms=dict(type='nms', iou_threshold=0.9),\n            min_bbox_size=0),\n        rcnn=dict(\n            score_thr=0.02,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=300,\n            mask_thr_binary=0.5)))\n\nbackend = 'pillow'\ntest_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args=_base_.file_client_args,\n        imdecode_backend=backend),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),\n    dict(\n        type='LoadAnnotations',\n        with_bbox=True,\n        with_mask=True,\n        poly2mask=False),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(batch_size=8, num_workers=4)\nval_dataloader = dict(dataset=dict(pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n# Enable automatic-mixed-precision training with AmpOptimWrapper.\noptim_wrapper = dict(\n    type='AmpOptimWrapper',\n    optimizer=dict(\n        type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),\n    paramwise_cfg=dict(norm_decay_mult=0.))\n\nparam_scheduler = [\n    dict(\n        type='LinearLR',\n        start_factor=0.00025,\n        by_epoch=False,\n        begin=0,\n        end=4000),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=25,\n        by_epoch=True,\n        milestones=[22, 24],\n        gamma=0.1)\n]\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (8 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64)\n"
  },
  {
    "path": "projects/Detic/demo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport urllib\nfrom argparse import ArgumentParser\n\nimport mmcv\nimport torch\nfrom mmengine.logging import print_log\nfrom mmengine.utils import ProgressBar, scandir\n\nfrom mmdet.apis import inference_detector, init_detector\nfrom mmdet.registry import VISUALIZERS\nfrom mmdet.utils import register_all_modules\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',\n                  '.tiff', '.webp')\n\n\ndef get_file_list(source_root: str) -> [list, dict]:\n    \"\"\"Get file list.\n\n    Args:\n        source_root (str): image or video source path\n\n    Return:\n        source_file_path_list (list): A list for all source file.\n        source_type (dict): Source type: file or url or dir.\n    \"\"\"\n    is_dir = os.path.isdir(source_root)\n    is_url = source_root.startswith(('http:/', 'https:/'))\n    is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS\n\n    source_file_path_list = []\n    if is_dir:\n        # when input source is dir\n        for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):\n            source_file_path_list.append(os.path.join(source_root, file))\n    elif is_url:\n        # when input source is url\n        filename = os.path.basename(\n            urllib.parse.unquote(source_root).split('?')[0])\n        file_save_path = os.path.join(os.getcwd(), filename)\n        print(f'Downloading source file to {file_save_path}')\n        torch.hub.download_url_to_file(source_root, file_save_path)\n        source_file_path_list = [file_save_path]\n    elif is_file:\n        # when input source is single image\n        source_file_path_list = [source_root]\n    else:\n        print('Cannot find image file.')\n\n    source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)\n\n    return source_file_path_list, source_type\n\n\ndef parse_args():\n    parser = ArgumentParser()\n    parser.add_argument(\n        'img', help='Image path, include image file, dir and URL.')\n    parser.add_argument('config', help='Config file')\n    parser.add_argument('checkpoint', help='Checkpoint file')\n    parser.add_argument(\n        '--out-dir', default='./output', help='Path to output file')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for inference')\n    parser.add_argument(\n        '--show', action='store_true', help='Show the detection results')\n    parser.add_argument(\n        '--score-thr', type=float, default=0.3, help='Bbox score threshold')\n    parser.add_argument(\n        '--dataset', type=str, help='dataset name to load the text embedding')\n    parser.add_argument(\n        '--class-name', nargs='+', type=str, help='custom class names')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    # register all modules in mmdet into the registries\n    register_all_modules()\n\n    # build the model from a config file and a checkpoint file\n    model = init_detector(args.config, args.checkpoint, device=args.device)\n\n    if not os.path.exists(args.out_dir) and not args.show:\n        os.mkdir(args.out_dir)\n\n    # init visualizer\n    visualizer = VISUALIZERS.build(model.cfg.visualizer)\n    visualizer.dataset_meta = model.dataset_meta\n\n    # get file list\n    files, source_type = get_file_list(args.img)\n    from detic.utils import (get_class_names, get_text_embeddings,\n                             reset_cls_layer_weight)\n\n    # class name embeddings\n    if args.class_name:\n        dataset_classes = args.class_name\n    elif args.dataset:\n        dataset_classes = get_class_names(args.dataset)\n    embedding = get_text_embeddings(\n        dataset=args.dataset, custom_vocabulary=args.class_name)\n    visualizer.dataset_meta['classes'] = dataset_classes\n    reset_cls_layer_weight(model, embedding)\n\n    # start detector inference\n    progress_bar = ProgressBar(len(files))\n    for file in files:\n        result = inference_detector(model, file)\n\n        img = mmcv.imread(file)\n        img = mmcv.imconvert(img, 'bgr', 'rgb')\n\n        if source_type['is_dir']:\n            filename = os.path.relpath(file, args.img).replace('/', '_')\n        else:\n            filename = os.path.basename(file)\n        out_file = None if args.show else os.path.join(args.out_dir, filename)\n\n        progress_bar.update()\n\n        visualizer.add_datasample(\n            filename,\n            img,\n            data_sample=result,\n            draw_gt=False,\n            show=args.show,\n            wait_time=0,\n            out_file=out_file,\n            pred_score_thr=args.score_thr)\n\n    if not args.show:\n        print_log(\n            f'\\nResults have been saved at {os.path.abspath(args.out_dir)}')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "projects/Detic/detic/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .centernet_rpn_head import CenterNetRPNHead\nfrom .detic_bbox_head import DeticBBoxHead\nfrom .detic_roi_head import DeticRoIHead\nfrom .zero_shot_classifier import ZeroShotClassifier\n\n__all__ = [\n    'CenterNetRPNHead', 'DeticBBoxHead', 'DeticRoIHead', 'ZeroShotClassifier'\n]\n"
  },
  {
    "path": "projects/Detic/detic/centernet_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom typing import List, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Scale\nfrom mmengine import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.dense_heads import CenterNetUpdateHead\nfrom mmdet.models.utils import multi_apply\nfrom mmdet.registry import MODELS\n\nINF = 1000000000\nRangeType = Sequence[Tuple[int, int]]\n\n\n@MODELS.register_module(force=True)  # avoid bug\nclass CenterNetRPNHead(CenterNetUpdateHead):\n    \"\"\"CenterNetUpdateHead is an improved version of CenterNet in CenterNet2.\n\n    Paper link `<https://arxiv.org/abs/2103.07461>`_.\n    \"\"\"\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self._init_reg_convs()\n        self._init_predictor()\n\n    def _init_predictor(self) -> None:\n        \"\"\"Initialize predictor layers of the head.\"\"\"\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.num_classes, 3, padding=1)\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n    def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of each level outputs.\n\n            - cls_scores (list[Tensor]): Box scores for each scale level, \\\n            each is a 4D-tensor, the channel number is num_classes.\n            - bbox_preds (list[Tensor]): Box energies / deltas for each \\\n            scale level, each is a 4D-tensor, the channel number is 4.\n        \"\"\"\n        res = multi_apply(self.forward_single, x, self.scales, self.strides)\n        return res\n\n    def forward_single(self, x: Tensor, scale: Scale,\n                       stride: int) -> Tuple[Tensor, Tensor]:\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps.\n\n        Returns:\n            tuple: scores for each class, bbox predictions of\n            input feature maps.\n        \"\"\"\n        for m in self.reg_convs:\n            x = m(x)\n        cls_score = self.conv_cls(x)\n        bbox_pred = self.conv_reg(x)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        # bbox_pred needed for gradient computation has been modified\n        # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n        # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n        bbox_pred = bbox_pred.clamp(min=0)\n        if not self.training:\n            bbox_pred *= stride\n        return cls_score, bbox_pred  # score aligned, box larger\n\n    def _predict_by_feat_single(self,\n                                cls_score_list: List[Tensor],\n                                bbox_pred_list: List[Tensor],\n                                score_factor_list: List[Tensor],\n                                mlvl_priors: List[Tensor],\n                                img_meta: dict,\n                                cfg: ConfigDict,\n                                rescale: bool = False,\n                                with_nms: bool = True) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmengine.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            with_nms (bool): If True, do nms before return boxes.\n                Defaults to True.\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bbox_preds = []\n        mlvl_valid_priors = []\n        mlvl_scores = []\n        mlvl_labels = []\n\n        for level_idx, (cls_score, bbox_pred, score_factor, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              score_factor_list, mlvl_priors)):\n\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            dim = self.bbox_coder.encode_size\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            heatmap = cls_score.sigmoid()\n            score_thr = cfg.get('score_thr', 0)\n\n            candidate_inds = heatmap > score_thr  # 0.05\n            pre_nms_top_n = candidate_inds.sum()  # N\n            pre_nms_top_n = pre_nms_top_n.clamp(max=nms_pre)  # N\n\n            heatmap = heatmap[candidate_inds]  # n\n\n            candidate_nonzeros = candidate_inds.nonzero()  # n\n            box_loc = candidate_nonzeros[:, 0]  # n\n            labels = candidate_nonzeros[:, 1]  # n\n\n            bbox_pred = bbox_pred[box_loc]  # n x 4\n            per_grids = priors[box_loc]  # n x 2\n\n            if candidate_inds.sum().item() > pre_nms_top_n.item():\n                heatmap, top_k_indices = \\\n                    heatmap.topk(pre_nms_top_n, sorted=False)\n                labels = labels[top_k_indices]\n                bbox_pred = bbox_pred[top_k_indices]\n                per_grids = per_grids[top_k_indices]\n\n            bboxes = self.bbox_coder.decode(per_grids, bbox_pred)\n            # avoid invalid boxes in RoI heads\n            bboxes[:, 2] = torch.max(bboxes[:, 2], bboxes[:, 0] + 0.01)\n            bboxes[:, 3] = torch.max(bboxes[:, 3], bboxes[:, 1] + 0.01)\n\n            mlvl_bbox_preds.append(bboxes)\n            mlvl_valid_priors.append(priors)\n            mlvl_scores.append(torch.sqrt(heatmap))\n            mlvl_labels.append(labels)\n\n        results = InstanceData()\n        results.bboxes = torch.cat(mlvl_bbox_preds)\n        results.scores = torch.cat(mlvl_scores)\n        results.labels = torch.cat(mlvl_labels)\n\n        return self._bbox_post_process(\n            results=results,\n            cfg=cfg,\n            rescale=rescale,\n            with_nms=with_nms,\n            img_meta=img_meta)\n"
  },
  {
    "path": "projects/Detic/detic/detic_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Union\n\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.layers import multiclass_nms\nfrom mmdet.models.roi_heads.bbox_heads import Shared2FCBBoxHead\nfrom mmdet.models.utils import empty_instances\nfrom mmdet.registry import MODELS\nfrom mmdet.structures.bbox import get_box_tensor, scale_boxes\n\n\n@MODELS.register_module(force=True)  # avoid bug\nclass DeticBBoxHead(Shared2FCBBoxHead):\n\n    def __init__(self,\n                 *args,\n                 init_cfg: Optional[Union[dict, ConfigDict]] = None,\n                 **kwargs) -> None:\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n        # reconstruct fc_cls and fc_reg since input channels are changed\n        assert self.with_cls\n        cls_channels = self.num_classes\n        cls_predictor_cfg_ = self.cls_predictor_cfg.copy()\n        cls_predictor_cfg_.update(\n            in_features=self.cls_last_dim, out_features=cls_channels)\n        self.fc_cls = MODELS.build(cls_predictor_cfg_)\n\n    def _predict_by_feat_single(\n            self,\n            roi: Tensor,\n            cls_score: Tensor,\n            bbox_pred: Tensor,\n            img_meta: dict,\n            rescale: bool = False,\n            rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:\n        \"\"\"Transform a single image's features extracted from the head into\n        bbox results.\n\n        Args:\n            roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n                last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n            cls_score (Tensor): Box scores, has shape\n                (num_boxes, num_classes + 1).\n            bbox_pred (Tensor): Box energies / deltas.\n                has shape (num_boxes, num_classes * 4).\n            img_meta (dict): image information.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n                Defaults to None\n\n        Returns:\n            :obj:`InstanceData`: Detection results of each image\\\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        results = InstanceData()\n        if roi.shape[0] == 0:\n            return empty_instances([img_meta],\n                                   roi.device,\n                                   task_type='bbox',\n                                   instance_results=[results],\n                                   box_type=self.predict_box_type,\n                                   use_box_type=False,\n                                   num_classes=self.num_classes,\n                                   score_per_cls=rcnn_test_cfg is None)[0]\n        scores = cls_score\n        img_shape = img_meta['img_shape']\n        num_rois = roi.size(0)\n\n        num_classes = 1 if self.reg_class_agnostic else self.num_classes\n        roi = roi.repeat_interleave(num_classes, dim=0)\n        bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)\n        bboxes = self.bbox_coder.decode(\n            roi[..., 1:], bbox_pred, max_shape=img_shape)\n\n        if rescale and bboxes.size(0) > 0:\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = [1 / s for s in img_meta['scale_factor']]\n            bboxes = scale_boxes(bboxes, scale_factor)\n\n        # Get the inside tensor when `bboxes` is a box type\n        bboxes = get_box_tensor(bboxes)\n        box_dim = bboxes.size(-1)\n        bboxes = bboxes.view(num_rois, -1)\n\n        if rcnn_test_cfg is None:\n            # This means that it is aug test.\n            # It needs to return the raw results without nms.\n            results.bboxes = bboxes\n            results.scores = scores\n        else:\n            det_bboxes, det_labels = multiclass_nms(\n                bboxes,\n                scores,\n                rcnn_test_cfg.score_thr,\n                rcnn_test_cfg.nms,\n                rcnn_test_cfg.max_per_img,\n                box_dim=box_dim)\n            results.bboxes = det_bboxes[:, :-1]\n            results.scores = det_bboxes[:, -1]\n            results.labels = det_labels\n        return results\n"
  },
  {
    "path": "projects/Detic/detic/detic_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Sequence, Tuple\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads import CascadeRoIHead\nfrom mmdet.models.task_modules.samplers import SamplingResult\nfrom mmdet.models.test_time_augs import merge_aug_masks\nfrom mmdet.models.utils.misc import empty_instances\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import bbox2roi, get_box_tensor\nfrom mmdet.utils import ConfigType, InstanceList, MultiConfig\n\n\n@MODELS.register_module(force=True)  # avoid bug\nclass DeticRoIHead(CascadeRoIHead):\n\n    def init_mask_head(self, mask_roi_extractor: MultiConfig,\n                       mask_head: MultiConfig) -> None:\n        \"\"\"Initialize mask head and mask roi extractor.\n\n        Args:\n            mask_head (dict): Config of mask in mask head.\n            mask_roi_extractor (:obj:`ConfigDict`, dict or list):\n                Config of mask roi extractor.\n        \"\"\"\n        self.mask_head = MODELS.build(mask_head)\n\n        if mask_roi_extractor is not None:\n            self.share_roi_extractor = False\n            self.mask_roi_extractor = MODELS.build(mask_roi_extractor)\n        else:\n            self.share_roi_extractor = True\n            self.mask_roi_extractor = self.bbox_roi_extractor\n\n    def _refine_roi(self, x: Tuple[Tensor], rois: Tensor,\n                    batch_img_metas: List[dict],\n                    num_proposals_per_img: Sequence[int], **kwargs) -> tuple:\n        \"\"\"Multi-stage refinement of RoI.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2]\n            batch_img_metas (list[dict]): List of image information.\n            num_proposals_per_img (sequence[int]): number of proposals\n                in each image.\n\n        Returns:\n            tuple:\n\n               - rois (Tensor): Refined RoI.\n               - cls_scores (list[Tensor]): Average predicted\n                   cls score per image.\n               - bbox_preds (list[Tensor]): Bbox branch predictions\n                   for the last stage of per image.\n        \"\"\"\n        # \"ms\" in variable names means multi-stage\n        ms_scores = []\n        for stage in range(self.num_stages):\n            bbox_results = self._bbox_forward(\n                stage=stage, x=x, rois=rois, **kwargs)\n\n            # split batch bbox prediction back to each image\n            cls_scores = bbox_results['cls_score'].sigmoid()\n            bbox_preds = bbox_results['bbox_pred']\n\n            rois = rois.split(num_proposals_per_img, 0)\n            cls_scores = cls_scores.split(num_proposals_per_img, 0)\n            ms_scores.append(cls_scores)\n            bbox_preds = bbox_preds.split(num_proposals_per_img, 0)\n\n            if stage < self.num_stages - 1:\n                bbox_head = self.bbox_head[stage]\n                refine_rois_list = []\n                for i in range(len(batch_img_metas)):\n                    if rois[i].shape[0] > 0:\n                        bbox_label = cls_scores[i][:, :-1].argmax(dim=1)\n                        # Refactor `bbox_head.regress_by_class` to only accept\n                        # box tensor without img_idx concatenated.\n                        refined_bboxes = bbox_head.regress_by_class(\n                            rois[i][:, 1:], bbox_label, bbox_preds[i],\n                            batch_img_metas[i])\n                        refined_bboxes = get_box_tensor(refined_bboxes)\n                        refined_rois = torch.cat(\n                            [rois[i][:, [0]], refined_bboxes], dim=1)\n                        refine_rois_list.append(refined_rois)\n                rois = torch.cat(refine_rois_list)\n        # ms_scores aligned\n        # average scores of each image by stages\n        cls_scores = [\n            sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n            for i in range(len(batch_img_metas))\n        ]  # aligned\n        return rois, cls_scores, bbox_preds\n\n    def _bbox_forward(self, stage: int, x: Tuple[Tensor],\n                      rois: Tensor) -> dict:\n        \"\"\"Box head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): List of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n             dict[str, Tensor]: Usually returns a dictionary with keys:\n\n                - `cls_score` (Tensor): Classification scores.\n                - `bbox_pred` (Tensor): Box energies / deltas.\n                - `bbox_feats` (Tensor): Extract bbox RoI features.\n        \"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        cls_score, bbox_pred = bbox_head(bbox_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n        return bbox_results\n\n    def predict_bbox(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     rpn_results_list: InstanceList,\n                     rcnn_test_cfg: ConfigType,\n                     rescale: bool = False,\n                     **kwargs) -> InstanceList:\n        \"\"\"Perform forward propagation of the bbox head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        proposals = [res.bboxes for res in rpn_results_list]\n        proposal_scores = [res.scores for res in rpn_results_list]\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = bbox2roi(proposals)\n\n        if rois.shape[0] == 0:\n            return empty_instances(\n                batch_img_metas,\n                rois.device,\n                task_type='bbox',\n                box_type=self.bbox_head[-1].predict_box_type,\n                num_classes=self.bbox_head[-1].num_classes,\n                score_per_cls=rcnn_test_cfg is None)\n        # rois aligned\n        rois, cls_scores, bbox_preds = self._refine_roi(\n            x=x,\n            rois=rois,\n            batch_img_metas=batch_img_metas,\n            num_proposals_per_img=num_proposals_per_img,\n            **kwargs)\n\n        # score reweighting in centernet2\n        cls_scores = [(s * ps[:, None])**0.5\n                      for s, ps in zip(cls_scores, proposal_scores)]\n        cls_scores = [\n            s * (s == s[:, :-1].max(dim=1)[0][:, None]).float()\n            for s in cls_scores\n        ]\n\n        # fast_rcnn_inference\n        results_list = self.bbox_head[-1].predict_by_feat(\n            rois=rois,\n            cls_scores=cls_scores,\n            bbox_preds=bbox_preds,\n            batch_img_metas=batch_img_metas,\n            rescale=rescale,\n            rcnn_test_cfg=rcnn_test_cfg)\n        return results_list\n\n    def _mask_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:\n        \"\"\"Mask head forward function used in both training and testing.\n\n        Args:\n            stage (int): The current stage in Cascade RoI Head.\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            rois (Tensor): RoIs with the shape (n, 5) where the first\n                column indicates batch id of each RoI.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n        \"\"\"\n        mask_feats = self.mask_roi_extractor(\n            x[:self.mask_roi_extractor.num_inputs], rois)\n        # do not support caffe_c4 model anymore\n        mask_preds = self.mask_head(mask_feats)\n\n        mask_results = dict(mask_preds=mask_preds)\n        return mask_results\n\n    def mask_loss(self, x, sampling_results: List[SamplingResult],\n                  batch_gt_instances: InstanceList) -> dict:\n        \"\"\"Run forward function and calculate loss for mask head in training.\n\n        Args:\n            x (tuple[Tensor]): Tuple of multi-level img features.\n            sampling_results (list[\"obj:`SamplingResult`]): Sampling results.\n            batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n                gt_instance. It usually includes ``bboxes``, ``labels``, and\n                ``masks`` attributes.\n\n        Returns:\n            dict: Usually returns a dictionary with keys:\n\n                - `mask_preds` (Tensor): Mask prediction.\n                - `loss_mask` (dict): A dictionary of mask loss components.\n        \"\"\"\n        pos_rois = bbox2roi([res.pos_priors for res in sampling_results])\n        mask_results = self._mask_forward(x, pos_rois)\n\n        mask_loss_and_target = self.mask_head.loss_and_target(\n            mask_preds=mask_results['mask_preds'],\n            sampling_results=sampling_results,\n            batch_gt_instances=batch_gt_instances,\n            rcnn_train_cfg=self.train_cfg[-1])\n        mask_results.update(mask_loss_and_target)\n\n        return mask_results\n\n    def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,\n             batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        roi on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): List of multi-level img features.\n            rpn_results_list (list[:obj:`InstanceData`]): List of region\n                proposals.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components\n        \"\"\"\n        raise NotImplementedError\n\n    def predict_mask(self,\n                     x: Tuple[Tensor],\n                     batch_img_metas: List[dict],\n                     results_list: List[InstanceData],\n                     rescale: bool = False) -> List[InstanceData]:\n        \"\"\"Perform forward propagation of the mask head and predict detection\n        results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            batch_img_metas (list[dict]): List of image information.\n            results_list (list[:obj:`InstanceData`]): Detection results of\n                each image.\n            rescale (bool): If True, return boxes in original image space.\n                Defaults to False.\n\n        Returns:\n            list[:obj:`InstanceData`]: Detection results of each image\n            after the post process.\n            Each item usually contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                  (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                  the last dimension 4 arrange as (x1, y1, x2, y2).\n                - masks (Tensor): Has a shape (num_instances, H, W).\n        \"\"\"\n        bboxes = [res.bboxes for res in results_list]\n        mask_rois = bbox2roi(bboxes)\n        if mask_rois.shape[0] == 0:\n            results_list = empty_instances(\n                batch_img_metas,\n                mask_rois.device,\n                task_type='mask',\n                instance_results=results_list,\n                mask_thr_binary=self.test_cfg.mask_thr_binary)\n            return results_list\n\n        num_mask_rois_per_img = [len(res) for res in results_list]\n        aug_masks = []\n        mask_results = self._mask_forward(x, mask_rois)\n        mask_preds = mask_results['mask_preds']\n        # split batch mask prediction back to each image\n        mask_preds = mask_preds.split(num_mask_rois_per_img, 0)\n        aug_masks.append([m.sigmoid().detach() for m in mask_preds])\n\n        merged_masks = []\n        for i in range(len(batch_img_metas)):\n            aug_mask = [mask[i] for mask in aug_masks]\n            merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])\n            merged_masks.append(merged_mask)\n        results_list = self.mask_head.predict_by_feat(\n            mask_preds=merged_masks,\n            results_list=results_list,\n            batch_img_metas=batch_img_metas,\n            rcnn_test_cfg=self.test_cfg,\n            rescale=rescale,\n            activate_map=True)\n        return results_list\n"
  },
  {
    "path": "projects/Detic/detic/text_encoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\n\n\nclass CLIPTextEncoder(nn.Module):\n\n    def __init__(self, model_name='ViT-B/32'):\n        super().__init__()\n        import clip\n        from clip.simple_tokenizer import SimpleTokenizer\n        self.tokenizer = SimpleTokenizer()\n        pretrained_model, _ = clip.load(model_name, device='cpu')\n        self.clip = pretrained_model\n\n    @property\n    def device(self):\n        return self.clip.device\n\n    @property\n    def dtype(self):\n        return self.clip.dtype\n\n    def tokenize(self,\n                 texts: Union[str, List[str]],\n                 context_length: int = 77) -> torch.LongTensor:\n        if isinstance(texts, str):\n            texts = [texts]\n\n        sot_token = self.tokenizer.encoder['<|startoftext|>']\n        eot_token = self.tokenizer.encoder['<|endoftext|>']\n        all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]\n                      for text in texts]\n        result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n        for i, tokens in enumerate(all_tokens):\n            if len(tokens) > context_length:\n                st = torch.randint(len(tokens) - context_length + 1,\n                                   (1, ))[0].item()\n                tokens = tokens[st:st + context_length]\n            result[i, :len(tokens)] = torch.tensor(tokens)\n\n        return result\n\n    def forward(self, text):\n        text = self.tokenize(text)\n        text_features = self.clip.encode_text(text)\n        return text_features\n"
  },
  {
    "path": "projects/Detic/detic/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.logging import print_log\n\nfrom .text_encoder import CLIPTextEncoder\n\n# download from\n# https://github.com/facebookresearch/Detic/tree/main/datasets/metadata\nDATASET_EMBEDDINGS = {\n    'lvis': 'datasets/metadata/lvis_v1_clip_a+cname.npy',\n    'objects365': 'datasets/metadata/o365_clip_a+cnamefix.npy',\n    'openimages': 'datasets/metadata/oid_clip_a+cname.npy',\n    'coco': 'datasets/metadata/coco_clip_a+cname.npy',\n}\n\n\ndef get_text_embeddings(dataset=None,\n                        custom_vocabulary=None,\n                        prompt_prefix='a '):\n    assert (dataset is None) ^ (custom_vocabulary is None), \\\n        'Either `dataset` or `custom_vocabulary` should be specified.'\n    if dataset:\n        if dataset in DATASET_EMBEDDINGS:\n            return DATASET_EMBEDDINGS[dataset]\n        else:\n            custom_vocabulary = get_class_names(dataset)\n\n    text_encoder = CLIPTextEncoder()\n    text_encoder.eval()\n    texts = [prompt_prefix + x for x in custom_vocabulary]\n    print_log(\n        f'Computing text embeddings for {len(custom_vocabulary)} classes.')\n    embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()\n    return embeddings\n\n\ndef get_class_names(dataset):\n    if dataset == 'coco':\n        from mmdet.datasets import CocoDataset\n        class_names = CocoDataset.METAINFO['classes']\n    elif dataset == 'cityscapes':\n        from mmdet.datasets import CityscapesDataset\n        class_names = CityscapesDataset.METAINFO['classes']\n    elif dataset == 'voc':\n        from mmdet.datasets import VOCDataset\n        class_names = VOCDataset.METAINFO['classes']\n    elif dataset == 'openimages':\n        from mmdet.datasets import OpenImagesDataset\n        class_names = OpenImagesDataset.METAINFO['classes']\n    elif dataset == 'lvis':\n        from mmdet.datasets import LVISV1Dataset\n        class_names = LVISV1Dataset.METAINFO['classes']\n    else:\n        raise TypeError(f'Invalid type for dataset name: {type(dataset)}')\n    return class_names\n\n\ndef reset_cls_layer_weight(model, weight):\n    if type(weight) == str:\n        print_log(f'Resetting cls_layer_weight from file: {weight}')\n        zs_weight = torch.tensor(\n            np.load(weight),\n            dtype=torch.float32).permute(1, 0).contiguous()  # D x C\n    else:\n        zs_weight = weight\n    zs_weight = torch.cat(\n        [zs_weight, zs_weight.new_zeros(\n            (zs_weight.shape[0], 1))], dim=1)  # D x (C + 1)\n    zs_weight = F.normalize(zs_weight, p=2, dim=0)\n    zs_weight = zs_weight.to('cuda')\n    num_classes = zs_weight.shape[-1]\n\n    for bbox_head in model.roi_head.bbox_head:\n        bbox_head.num_classes = num_classes\n        del bbox_head.fc_cls.zs_weight\n        bbox_head.fc_cls.zs_weight = zs_weight\n"
  },
  {
    "path": "projects/Detic/detic/zero_shot_classifier.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates.\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module(force=True)  # avoid bug\nclass ZeroShotClassifier(nn.Module):\n\n    def __init__(\n        self,\n        in_features: int,\n        out_features: int,  # num_classes\n        zs_weight_path: str,\n        zs_weight_dim: int = 512,\n        use_bias: float = 0.0,\n        norm_weight: bool = True,\n        norm_temperature: float = 50.0,\n    ):\n        super().__init__()\n        num_classes = out_features\n        self.norm_weight = norm_weight\n        self.norm_temperature = norm_temperature\n\n        self.use_bias = use_bias < 0\n        if self.use_bias:\n            self.cls_bias = nn.Parameter(torch.ones(1) * use_bias)\n\n        self.linear = nn.Linear(in_features, zs_weight_dim)\n\n        if zs_weight_path == 'rand':\n            zs_weight = torch.randn((zs_weight_dim, num_classes))\n            nn.init.normal_(zs_weight, std=0.01)\n        else:\n            zs_weight = torch.tensor(\n                np.load(zs_weight_path),\n                dtype=torch.float32).permute(1, 0).contiguous()  # D x C\n        zs_weight = torch.cat(\n            [zs_weight, zs_weight.new_zeros(\n                (zs_weight_dim, 1))], dim=1)  # D x (C + 1)\n\n        if self.norm_weight:\n            zs_weight = F.normalize(zs_weight, p=2, dim=0)\n\n        if zs_weight_path == 'rand':\n            self.zs_weight = nn.Parameter(zs_weight)\n        else:\n            self.register_buffer('zs_weight', zs_weight)\n\n        assert self.zs_weight.shape[1] == num_classes + 1, self.zs_weight.shape\n\n    def forward(self, x, classifier=None):\n        '''\n        Inputs:\n            x: B x D'\n            classifier_info: (C', C' x D)\n        '''\n        x = self.linear(x)\n        if classifier is not None:\n            zs_weight = classifier.permute(1, 0).contiguous()  # D x C'\n            zs_weight = F.normalize(zs_weight, p=2, dim=0) \\\n                if self.norm_weight else zs_weight\n        else:\n            zs_weight = self.zs_weight\n        if self.norm_weight:\n            x = self.norm_temperature * F.normalize(x, p=2, dim=1)\n        x = torch.mm(x, zs_weight)\n        if self.use_bias:\n            x = x + self.cls_bias\n        return x\n"
  },
  {
    "path": "projects/DiffusionDet/README.md",
    "content": "## Description\n\nThis is an implementation of [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet) based on [MMDetection](https://github.com/open-mmlab/mmdetection/tree/3.x), [MMCV](https://github.com/open-mmlab/mmcv), and [MMEngine](https://github.com/open-mmlab/mmengine).\n\n<center>\n<img src=\"https://user-images.githubusercontent.com/48282753/211472911-c84d658a-952b-4608-8b91-9ac932cbf2e2.png\">\n</center>\n\n## Usage\n\n<!-- For a typical model, this section should contain the commands for training and testing. You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. -->\n\n### Comparison of results\n\n1. Download the [DiffusionDet released model](https://github.com/ShoufaChen/DiffusionDet#models).\n\n2. Convert model from DiffusionDet version to MMDetection version. We give a [sample script](model_converters/diffusiondet_resnet_to_mmdet.py)\n   to convert `DiffusionDet-resnet50` model. Users can download the corresponding models from [here](https://github.com/ShoufaChen/DiffusionDet/releases/download/v0.1/diffdet_coco_res50.pth).\n\n   ```shell\n   python projects/DiffusionDet/model_converters/diffusiondet_resnet_to_mmdet.py ${DiffusionDet ckpt path} ${MMDetectron ckpt path}\n   ```\n\n3. Testing the model in MMDetection.\n\n   ```shell\n   python tools/test.py projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py ${CHECKPOINT_PATH}\n   ```\n\n**Note:** During inference time, DiffusionDet will randomly generate noisy boxes,\nwhich may affect the AP results. If users want to get the same result every inference time, setting seed is a good way.\nWe give a table to compare the inference results on `ResNet50-500-proposals` between DiffusionDet and MMDetection.\n\n|                                                         Config                                                          | Step |    AP     |\n| :---------------------------------------------------------------------------------------------------------------------: | :--: | :-------: |\n| [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet/blob/main/configs/diffdet.coco.res50.yaml) (released results) |  1   |   45.5    |\n|      [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet/blob/main/configs/diffdet.coco.res50.yaml) (seed=0)      |  1   |   45.66   |\n|         [MMDetection](configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py) (seed=0)          |  1   |   45.7    |\n|       [MMDetection](configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py) (random seed)       |  1   | 45.6~45.8 |\n| [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet/blob/main/configs/diffdet.coco.res50.yaml) (released results) |  4   |   46.1    |\n|      [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet/blob/main/configs/diffdet.coco.res50.yaml) (seed=0)      |  4   |   46.38   |\n|         [MMDetection](configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py) (seed=0)          |  4   |   46.4    |\n|       [MMDetection](configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py) (random seed)       |  4   | 46.2~46.4 |\n\n- `seed=0` means hard set seed before generating random boxes.\n  ```python\n  # hard set seed=0 before generating random boxes\n  seed = 0\n  random.seed(seed)\n  torch.manual_seed(seed)\n  # torch.cuda.manual_seed(seed)\n  torch.cuda.manual_seed_all(seed)\n  ...\n  noise_bboxes_raw = torch.randn(\n      (self.num_proposals, 4),\n      device=device)\n  ...\n  ```\n- `random seed` means do not hard set seed before generating random boxes.\n\n### Training commands\n\nIn MMDetection's root directory, run the following command to train the model:\n\n```bash\npython tools/train.py projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py\n```\n\nFor multi-gpu training, run:\n\n```bash\npython -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=${NUM_GPUS} --master_port=29506 --master_addr=\"127.0.0.1\" tools/train.py projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py\n```\n\n### Testing commands\n\nIn MMDetection's root directory, run the following command to test the model:\n\n```bash\n# for 1 step inference\n# test command\npython tools/test.py projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py ${CHECKPOINT_PATH}\n\n# for 4 steps inference\n\n# test command\npython tools/test.py projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py ${CHECKPOINT_PATH} --cfg-options model.bbox_head.sampling_timesteps=4\n```\n\n**Note:** There is no difference between 1 step or 4 steps (or other multi-step) during training. Users can set different steps during inference through `--cfg-options model.bbox_head.sampling_timesteps=${STEPS}`, but larger `sampling_timesteps` will affect the inference time.\n\n## Results\n\nHere we provide the baseline version of DiffusionDet with ResNet50 backbone.\n\nTo find more variants, please visit the [official model zoo](https://github.com/ShoufaChen/DiffusionDet#models).\n\n| Backbone |  Style  | Lr schd | AP (Step=1) | AP (Step=4) |                                           Config                                           |                                                                                                                                                                                                                                      Download                                                                                                                                                                                                                                      |\n| :------: | :-----: | :-----: | :---------: | :---------: | :----------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|   R-50   | PyTorch |  450k   |    44.5     |    46.2     | [config](./configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/diffusiondet/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco_20230215_090925-7d6ed504.pth) \\| [log](https://download.openmmlab.com/mmdetection/v3.0/diffusiondet/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco_20230215_090925.log.json) |\n\n## License\n\nDiffusionDet is under the [CC-BY-NC 4.0 license](https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE). Users should be careful about adopting these features in any commercial matters.\n\n## Citation\n\nIf you find DiffusionDet is useful in your research or applications, please consider giving a star 🌟 to the [official repository](https://github.com/ShoufaChen/DiffusionDet) and citing DiffusionDet by the following BibTeX entry.\n\n```BibTeX\n@article{chen2022diffusiondet,\n      title={DiffusionDet: Diffusion Model for Object Detection},\n      author={Chen, Shoufa and Sun, Peize and Song, Yibing and Luo, Ping},\n      journal={arXiv preprint arXiv:2211.09788},\n      year={2022}\n}\n```\n\n## Checklist\n\n<!-- Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR.\nOpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone.\nNote that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed.\nA project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. -->\n\n- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.\n\n  - [x] Finish the code\n\n    <!-- The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmdet.registry.MODELS` and configurable via a config file. -->\n\n  - [x] Basic docstrings & proper citation\n\n    <!-- Each major object should contain a docstring, describing its functionality and arguments. If you have adapted the code from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) -->\n\n  - [x] Test-time correctness\n\n    <!-- If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. -->\n\n  - [x] A full README\n\n    <!-- As this template does. -->\n\n- [x] Milestone 2: Indicates a successful model implementation.\n\n  - [x] Training-time correctness\n\n    <!-- If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. -->\n\n- [ ] Milestone 3: Good to be a part of our core package!\n\n  - [ ] Type hints and docstrings\n\n    <!-- Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/mmdet/datasets/transforms/transforms.py#L41-L169) -->\n\n  - [ ] Unit tests\n\n    <!-- Unit tests for each module are required. [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/tests/test_datasets/test_transforms/test_transforms.py#L35-L88) -->\n\n  - [ ] Code polishing\n\n    <!-- Refactor your code according to reviewer's comment. -->\n\n  - [ ] Metafile.yml\n\n    <!-- It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/metafile.yml) -->\n\n- [ ] Move your modules into the core package following the codebase's file hierarchy structure.\n\n  <!-- In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/README.md) -->\n\n- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.\n"
  },
  {
    "path": "projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py",
    "content": "_base_ = [\n    'mmdet::_base_/datasets/coco_detection.py',\n    'mmdet::_base_/schedules/schedule_1x.py',\n    'mmdet::_base_/default_runtime.py'\n]\n\ncustom_imports = dict(\n    imports=['projects.DiffusionDet.diffusiondet'], allow_failed_imports=False)\n\n# model settings\nmodel = dict(\n    type='DiffusionDet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    neck=dict(\n        type='FPN',\n        in_channels=[256, 512, 1024, 2048],\n        out_channels=256,\n        num_outs=4),\n    bbox_head=dict(\n        type='DynamicDiffusionDetHead',\n        num_classes=80,\n        feat_channels=256,\n        num_proposals=500,\n        num_heads=6,\n        deep_supervision=True,\n        prior_prob=0.01,\n        snr_scale=2.0,\n        sampling_timesteps=1,\n        ddim_sampling_eta=1.0,\n        single_head=dict(\n            type='SingleDiffusionDetHead',\n            num_cls_convs=1,\n            num_reg_convs=3,\n            dim_feedforward=2048,\n            num_heads=8,\n            dropout=0.0,\n            act_cfg=dict(type='ReLU', inplace=True),\n            dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)),\n        roi_extractor=dict(\n            type='SingleRoIExtractor',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n            out_channels=256,\n            featmap_strides=[4, 8, 16, 32]),\n        # criterion\n        criterion=dict(\n            type='DiffusionDetCriterion',\n            num_classes=80,\n            assigner=dict(\n                type='DiffusionDetMatcher',\n                match_costs=[\n                    dict(\n                        type='FocalLossCost',\n                        alpha=0.25,\n                        gamma=2.0,\n                        weight=2.0,\n                        eps=1e-8),\n                    dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'),\n                    dict(type='IoUCost', iou_mode='giou', weight=2.0)\n                ],\n                center_radius=2.5,\n                candidate_topk=5),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                alpha=0.25,\n                gamma=2.0,\n                reduction='sum',\n                loss_weight=2.0),\n            loss_bbox=dict(type='L1Loss', reduction='sum', loss_weight=5.0),\n            loss_giou=dict(type='GIoULoss', reduction='sum',\n                           loss_weight=2.0))),\n    test_cfg=dict(\n        use_nms=True,\n        score_thr=0.5,\n        min_bbox_size=0,\n        nms=dict(type='nms', iou_threshold=0.5),\n    ))\n\nbackend = 'pillow'\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args=_base_.file_client_args,\n        imdecode_backend=backend),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='RandomFlip', prob=0.5),\n    dict(\n        type='RandomChoice',\n        transforms=[[\n            dict(\n                type='RandomChoiceResize',\n                scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),\n                        (608, 1333), (640, 1333), (672, 1333), (704, 1333),\n                        (736, 1333), (768, 1333), (800, 1333)],\n                keep_ratio=True,\n                backend=backend),\n        ],\n                    [\n                        dict(\n                            type='RandomChoiceResize',\n                            scales=[(400, 1333), (500, 1333), (600, 1333)],\n                            keep_ratio=True,\n                            backend=backend),\n                        dict(\n                            type='RandomCrop',\n                            crop_type='absolute_range',\n                            crop_size=(384, 600),\n                            allow_negative_crop=True),\n                        dict(\n                            type='RandomChoiceResize',\n                            scales=[(480, 1333), (512, 1333), (544, 1333),\n                                    (576, 1333), (608, 1333), (640, 1333),\n                                    (672, 1333), (704, 1333), (736, 1333),\n                                    (768, 1333), (800, 1333)],\n                            keep_ratio=True,\n                            backend=backend)\n                    ]]),\n    dict(type='PackDetInputs')\n]\n\ntest_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args=_base_.file_client_args,\n        imdecode_backend=backend),\n    dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),\n    # If you don't have a gt annotation, delete the pipeline\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\ntrain_dataloader = dict(\n    sampler=dict(type='InfiniteSampler'),\n    dataset=dict(\n        filter_cfg=dict(filter_empty_gt=False, min_size=1e-5),\n        pipeline=train_pipeline))\n\nval_dataloader = dict(dataset=dict(pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(\n        _delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001),\n    clip_grad=dict(max_norm=1.0, norm_type=2))\ntrain_cfg = dict(\n    _delete_=True,\n    type='IterBasedTrainLoop',\n    max_iters=450000,\n    val_interval=75000)\n\n# learning rate\nparam_scheduler = [\n    dict(\n        type='LinearLR', start_factor=0.01, by_epoch=False, begin=0, end=1000),\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=450000,\n        by_epoch=False,\n        milestones=[350000, 420000],\n        gamma=0.1)\n]\n\ndefault_hooks = dict(\n    checkpoint=dict(by_epoch=False, interval=75000, max_keep_ckpts=3))\nlog_processor = dict(by_epoch=False)\n"
  },
  {
    "path": "projects/DiffusionDet/diffusiondet/__init__.py",
    "content": "from .diffusiondet import DiffusionDet\nfrom .head import (DynamicConv, DynamicDiffusionDetHead,\n                   SingleDiffusionDetHead, SinusoidalPositionEmbeddings)\nfrom .loss import DiffusionDetCriterion, DiffusionDetMatcher\n\n__all__ = [\n    'DiffusionDet', 'DynamicDiffusionDetHead', 'SingleDiffusionDetHead',\n    'SinusoidalPositionEmbeddings', 'DynamicConv', 'DiffusionDetCriterion',\n    'DiffusionDetMatcher'\n]\n"
  },
  {
    "path": "projects/DiffusionDet/diffusiondet/diffusiondet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models import SingleStageDetector\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\n\n\n@MODELS.register_module()\nclass DiffusionDet(SingleStageDetector):\n    \"\"\"Implementation of `DiffusionDet <>`_\"\"\"\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "projects/DiffusionDet/diffusiondet/head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/detector.py   # noqa\n# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/head.py   # noqa\n\n# This work is licensed under the CC-BY-NC 4.0 License.\n# Users should be careful about adopting these features in any commercial matters.    # noqa\n# For more details, please refer to https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE    # noqa\n\nimport copy\nimport math\nimport random\nimport warnings\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import build_activation_layer\nfrom mmcv.ops import batched_nms\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures import SampleList\nfrom mmdet.structures.bbox import (bbox2roi, bbox_cxcywh_to_xyxy,\n                                   bbox_xyxy_to_cxcywh, get_box_wh,\n                                   scale_boxes)\nfrom mmdet.utils import InstanceList\n\n_DEFAULT_SCALE_CLAMP = math.log(100000.0 / 16)\n\n\ndef cosine_beta_schedule(timesteps, s=0.008):\n    \"\"\"Cosine schedule as proposed in\n    https://openreview.net/forum?id=-NEXDKk8gZ.\"\"\"\n    steps = timesteps + 1\n    x = torch.linspace(0, timesteps, steps, dtype=torch.float64)\n    alphas_cumprod = torch.cos(\n        ((x / timesteps) + s) / (1 + s) * math.pi * 0.5)**2\n    alphas_cumprod = alphas_cumprod / alphas_cumprod[0]\n    betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])\n    return torch.clip(betas, 0, 0.999)\n\n\ndef extract(a, t, x_shape):\n    \"\"\"extract the appropriate t index for a batch of indices.\"\"\"\n    batch_size = t.shape[0]\n    out = a.gather(-1, t)\n    return out.reshape(batch_size, *((1, ) * (len(x_shape) - 1)))\n\n\nclass SinusoidalPositionEmbeddings(nn.Module):\n\n    def __init__(self, dim):\n        super().__init__()\n        self.dim = dim\n\n    def forward(self, time):\n        device = time.device\n        half_dim = self.dim // 2\n        embeddings = math.log(10000) / (half_dim - 1)\n        embeddings = torch.exp(\n            torch.arange(half_dim, device=device) * -embeddings)\n        embeddings = time[:, None] * embeddings[None, :]\n        embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)\n        return embeddings\n\n\n@MODELS.register_module()\nclass DynamicDiffusionDetHead(nn.Module):\n\n    def __init__(self,\n                 num_classes=80,\n                 feat_channels=256,\n                 num_proposals=500,\n                 num_heads=6,\n                 prior_prob=0.01,\n                 snr_scale=2.0,\n                 timesteps=1000,\n                 sampling_timesteps=1,\n                 self_condition=False,\n                 box_renewal=True,\n                 use_ensemble=True,\n                 deep_supervision=True,\n                 ddim_sampling_eta=1.0,\n                 criterion=dict(\n                     type='DiffusionDetCriterion',\n                     num_classes=80,\n                     assigner=dict(\n                         type='DiffusionDetMatcher',\n                         match_costs=[\n                             dict(\n                                 type='FocalLossCost',\n                                 alpha=2.0,\n                                 gamma=0.25,\n                                 weight=2.0),\n                             dict(\n                                 type='BBoxL1Cost',\n                                 weight=5.0,\n                                 box_format='xyxy'),\n                             dict(type='IoUCost', iou_mode='giou', weight=2.0)\n                         ],\n                         center_radius=2.5,\n                         candidate_topk=5),\n                 ),\n                 single_head=dict(\n                     type='DiffusionDetHead',\n                     num_cls_convs=1,\n                     num_reg_convs=3,\n                     dim_feedforward=2048,\n                     num_heads=8,\n                     dropout=0.0,\n                     act_cfg=dict(type='ReLU'),\n                     dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)),\n                 roi_extractor=dict(\n                     type='SingleRoIExtractor',\n                     roi_layer=dict(\n                         type='RoIAlign', output_size=7, sampling_ratio=2),\n                     out_channels=256,\n                     featmap_strides=[4, 8, 16, 32]),\n                 test_cfg=None,\n                 **kwargs) -> None:\n        super().__init__()\n        self.roi_extractor = MODELS.build(roi_extractor)\n\n        self.num_classes = num_classes\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.num_proposals = num_proposals\n        self.num_heads = num_heads\n        # Build Diffusion\n        assert isinstance(timesteps, int), 'The type of `timesteps` should ' \\\n                                           f'be int but got {type(timesteps)}'\n        assert sampling_timesteps <= timesteps\n        self.timesteps = timesteps\n        self.sampling_timesteps = sampling_timesteps\n        self.snr_scale = snr_scale\n\n        self.ddim_sampling = self.sampling_timesteps < self.timesteps\n        self.ddim_sampling_eta = ddim_sampling_eta\n        self.self_condition = self_condition\n        self.box_renewal = box_renewal\n        self.use_ensemble = use_ensemble\n\n        self._build_diffusion()\n\n        # Build assigner\n        assert criterion.get('assigner', None) is not None\n        assigner = TASK_UTILS.build(criterion.get('assigner'))\n        # Init parameters.\n        self.use_focal_loss = assigner.use_focal_loss\n        self.use_fed_loss = assigner.use_fed_loss\n\n        # build criterion\n        criterion.update(deep_supervision=deep_supervision)\n        self.criterion = TASK_UTILS.build(criterion)\n\n        # Build Dynamic Head.\n        single_head_ = single_head.copy()\n        single_head_num_classes = single_head_.get('num_classes', None)\n        if single_head_num_classes is None:\n            single_head_.update(num_classes=num_classes)\n        else:\n            if single_head_num_classes != num_classes:\n                warnings.warn(\n                    'The `num_classes` of `DynamicDiffusionDetHead` and '\n                    '`SingleDiffusionDetHead` should be same, changing '\n                    f'`single_head.num_classes` to {num_classes}')\n                single_head_.update(num_classes=num_classes)\n\n        single_head_feat_channels = single_head_.get('feat_channels', None)\n        if single_head_feat_channels is None:\n            single_head_.update(feat_channels=feat_channels)\n        else:\n            if single_head_feat_channels != feat_channels:\n                warnings.warn(\n                    'The `feat_channels` of `DynamicDiffusionDetHead` and '\n                    '`SingleDiffusionDetHead` should be same, changing '\n                    f'`single_head.feat_channels` to {feat_channels}')\n                single_head_.update(feat_channels=feat_channels)\n\n        default_pooler_resolution = roi_extractor['roi_layer'].get(\n            'output_size')\n        assert default_pooler_resolution is not None\n        single_head_pooler_resolution = single_head_.get('pooler_resolution')\n        if single_head_pooler_resolution is None:\n            single_head_.update(pooler_resolution=default_pooler_resolution)\n        else:\n            if single_head_pooler_resolution != default_pooler_resolution:\n                warnings.warn(\n                    'The `pooler_resolution` of `DynamicDiffusionDetHead` '\n                    'and `SingleDiffusionDetHead` should be same, changing '\n                    f'`single_head.pooler_resolution` to {num_classes}')\n                single_head_.update(\n                    pooler_resolution=default_pooler_resolution)\n\n        single_head_.update(\n            use_focal_loss=self.use_focal_loss, use_fed_loss=self.use_fed_loss)\n        single_head_module = MODELS.build(single_head_)\n\n        self.num_heads = num_heads\n        self.head_series = nn.ModuleList(\n            [copy.deepcopy(single_head_module) for _ in range(num_heads)])\n\n        self.deep_supervision = deep_supervision\n\n        # Gaussian random feature embedding layer for time\n        time_dim = feat_channels * 4\n        self.time_mlp = nn.Sequential(\n            SinusoidalPositionEmbeddings(feat_channels),\n            nn.Linear(feat_channels, time_dim), nn.GELU(),\n            nn.Linear(time_dim, time_dim))\n\n        self.prior_prob = prior_prob\n        self.test_cfg = test_cfg\n        self.use_nms = self.test_cfg.get('use_nms', True)\n        self._init_weights()\n\n    def _init_weights(self):\n        # init all parameters.\n        bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n            # initialize the bias for focal loss and fed loss.\n            if self.use_focal_loss or self.use_fed_loss:\n                if p.shape[-1] == self.num_classes or \\\n                        p.shape[-1] == self.num_classes + 1:\n                    nn.init.constant_(p, bias_value)\n\n    def _build_diffusion(self):\n        betas = cosine_beta_schedule(self.timesteps)\n        alphas = 1. - betas\n        alphas_cumprod = torch.cumprod(alphas, dim=0)\n        alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)\n\n        self.register_buffer('betas', betas)\n        self.register_buffer('alphas_cumprod', alphas_cumprod)\n        self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)\n\n        # calculations for diffusion q(x_t | x_{t-1}) and others\n        self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))\n        self.register_buffer('sqrt_one_minus_alphas_cumprod',\n                             torch.sqrt(1. - alphas_cumprod))\n        self.register_buffer('log_one_minus_alphas_cumprod',\n                             torch.log(1. - alphas_cumprod))\n        self.register_buffer('sqrt_recip_alphas_cumprod',\n                             torch.sqrt(1. / alphas_cumprod))\n        self.register_buffer('sqrt_recipm1_alphas_cumprod',\n                             torch.sqrt(1. / alphas_cumprod - 1))\n\n        # calculations for posterior q(x_{t-1} | x_t, x_0)\n        # equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)\n        posterior_variance = betas * (1. - alphas_cumprod_prev) / (\n            1. - alphas_cumprod)\n        self.register_buffer('posterior_variance', posterior_variance)\n\n        # log calculation clipped because the posterior variance is 0 at\n        # the beginning of the diffusion chain\n        self.register_buffer('posterior_log_variance_clipped',\n                             torch.log(posterior_variance.clamp(min=1e-20)))\n        self.register_buffer(\n            'posterior_mean_coef1',\n            betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))\n        self.register_buffer('posterior_mean_coef2',\n                             (1. - alphas_cumprod_prev) * torch.sqrt(alphas) /\n                             (1. - alphas_cumprod))\n\n    def forward(self, features, init_bboxes, init_t, init_features=None):\n        time = self.time_mlp(init_t, )\n\n        inter_class_logits = []\n        inter_pred_bboxes = []\n\n        bs = len(features[0])\n        bboxes = init_bboxes\n\n        if init_features is not None:\n            init_features = init_features[None].repeat(1, bs, 1)\n            proposal_features = init_features.clone()\n        else:\n            proposal_features = None\n\n        for head_idx, single_head in enumerate(self.head_series):\n            class_logits, pred_bboxes, proposal_features = single_head(\n                features, bboxes, proposal_features, self.roi_extractor, time)\n            if self.deep_supervision:\n                inter_class_logits.append(class_logits)\n                inter_pred_bboxes.append(pred_bboxes)\n            bboxes = pred_bboxes.detach()\n\n        if self.deep_supervision:\n            return torch.stack(inter_class_logits), torch.stack(\n                inter_pred_bboxes)\n        else:\n            return class_logits[None, ...], pred_bboxes[None, ...]\n\n    def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:\n        \"\"\"Perform forward propagation and loss calculation of the detection\n        head on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        prepare_outputs = self.prepare_training_targets(batch_data_samples)\n        (batch_gt_instances, batch_pred_instances, batch_gt_instances_ignore,\n         batch_img_metas) = prepare_outputs\n\n        batch_diff_bboxes = torch.stack([\n            pred_instances.diff_bboxes_abs\n            for pred_instances in batch_pred_instances\n        ])\n        batch_time = torch.stack(\n            [pred_instances.time for pred_instances in batch_pred_instances])\n\n        pred_logits, pred_bboxes = self(x, batch_diff_bboxes, batch_time)\n\n        output = {\n            'pred_logits': pred_logits[-1],\n            'pred_boxes': pred_bboxes[-1]\n        }\n        if self.deep_supervision:\n            output['aux_outputs'] = [{\n                'pred_logits': a,\n                'pred_boxes': b\n            } for a, b in zip(pred_logits[:-1], pred_bboxes[:-1])]\n\n        losses = self.criterion(output, batch_gt_instances, batch_img_metas)\n        return losses\n\n    def prepare_training_targets(self, batch_data_samples):\n        # hard-setting seed to keep results same (if necessary)\n        # random.seed(0)\n        # torch.manual_seed(0)\n        # torch.cuda.manual_seed_all(0)\n        # torch.backends.cudnn.deterministic = True\n        # torch.backends.cudnn.benchmark = False\n\n        batch_gt_instances = []\n        batch_pred_instances = []\n        batch_gt_instances_ignore = []\n        batch_img_metas = []\n        for data_sample in batch_data_samples:\n            img_meta = data_sample.metainfo\n            gt_instances = data_sample.gt_instances\n\n            gt_bboxes = gt_instances.bboxes\n            h, w = img_meta['img_shape']\n            image_size = gt_bboxes.new_tensor([w, h, w, h])\n\n            norm_gt_bboxes = gt_bboxes / image_size\n            norm_gt_bboxes_cxcywh = bbox_xyxy_to_cxcywh(norm_gt_bboxes)\n            pred_instances = self.prepare_diffusion(norm_gt_bboxes_cxcywh,\n                                                    image_size)\n\n            gt_instances.set_metainfo(dict(image_size=image_size))\n            gt_instances.norm_bboxes_cxcywh = norm_gt_bboxes_cxcywh\n\n            batch_gt_instances.append(gt_instances)\n            batch_pred_instances.append(pred_instances)\n            batch_img_metas.append(data_sample.metainfo)\n            if 'ignored_instances' in data_sample:\n                batch_gt_instances_ignore.append(data_sample.ignored_instances)\n            else:\n                batch_gt_instances_ignore.append(None)\n        return (batch_gt_instances, batch_pred_instances,\n                batch_gt_instances_ignore, batch_img_metas)\n\n    def prepare_diffusion(self, gt_boxes, image_size):\n        device = gt_boxes.device\n        time = torch.randint(\n            0, self.timesteps, (1, ), dtype=torch.long, device=device)\n        noise = torch.randn(self.num_proposals, 4, device=device)\n\n        num_gt = gt_boxes.shape[0]\n        if num_gt < self.num_proposals:\n            # 3 * sigma = 1/2 --> sigma: 1/6\n            box_placeholder = torch.randn(\n                self.num_proposals - num_gt, 4, device=device) / 6. + 0.5\n            box_placeholder[:, 2:] = torch.clip(\n                box_placeholder[:, 2:], min=1e-4)\n            x_start = torch.cat((gt_boxes, box_placeholder), dim=0)\n        else:\n            select_mask = [True] * self.num_proposals + \\\n                          [False] * (num_gt - self.num_proposals)\n            random.shuffle(select_mask)\n            x_start = gt_boxes[select_mask]\n\n        x_start = (x_start * 2. - 1.) * self.snr_scale\n\n        # noise sample\n        x = self.q_sample(x_start=x_start, time=time, noise=noise)\n\n        x = torch.clamp(x, min=-1 * self.snr_scale, max=self.snr_scale)\n        x = ((x / self.snr_scale) + 1) / 2.\n\n        diff_bboxes = bbox_cxcywh_to_xyxy(x)\n        # convert to abs bboxes\n        diff_bboxes_abs = diff_bboxes * image_size\n\n        metainfo = dict(time=time.squeeze(-1))\n        pred_instances = InstanceData(metainfo=metainfo)\n        pred_instances.diff_bboxes = diff_bboxes\n        pred_instances.diff_bboxes_abs = diff_bboxes_abs\n        pred_instances.noise = noise\n        return pred_instances\n\n    # forward diffusion\n    def q_sample(self, x_start, time, noise=None):\n        if noise is None:\n            noise = torch.randn_like(x_start)\n\n        x_start_shape = x_start.shape\n\n        sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, time,\n                                        x_start_shape)\n        sqrt_one_minus_alphas_cumprod_t = extract(\n            self.sqrt_one_minus_alphas_cumprod, time, x_start_shape)\n\n        return sqrt_alphas_cumprod_t * x_start + \\\n            sqrt_one_minus_alphas_cumprod_t * noise\n\n    def predict(self,\n                x: Tuple[Tensor],\n                batch_data_samples: SampleList,\n                rescale: bool = False) -> InstanceList:\n        \"\"\"Perform forward propagation of the detection head and predict\n        detection results on the features of the upstream network.\n\n        Args:\n            x (tuple[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[obj:`InstanceData`]: Detection results of each image\n            after the post process.\n        \"\"\"\n        # hard-setting seed to keep results same (if necessary)\n        # seed = 0\n        # random.seed(seed)\n        # torch.manual_seed(seed)\n        # torch.cuda.manual_seed_all(seed)\n\n        device = x[-1].device\n\n        batch_img_metas = [\n            data_samples.metainfo for data_samples in batch_data_samples\n        ]\n\n        (time_pairs, batch_noise_bboxes, batch_noise_bboxes_raw,\n         batch_image_size) = self.prepare_testing_targets(\n             batch_img_metas, device)\n\n        predictions = self.predict_by_feat(\n            x,\n            time_pairs=time_pairs,\n            batch_noise_bboxes=batch_noise_bboxes,\n            batch_noise_bboxes_raw=batch_noise_bboxes_raw,\n            batch_image_size=batch_image_size,\n            device=device,\n            batch_img_metas=batch_img_metas)\n        return predictions\n\n    def predict_by_feat(self,\n                        x,\n                        time_pairs,\n                        batch_noise_bboxes,\n                        batch_noise_bboxes_raw,\n                        batch_image_size,\n                        device,\n                        batch_img_metas=None,\n                        cfg=None,\n                        rescale=True):\n\n        batch_size = len(batch_img_metas)\n\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n\n        ensemble_score, ensemble_label, ensemble_coord = [], [], []\n        for time, time_next in time_pairs:\n            batch_time = torch.full((batch_size, ),\n                                    time,\n                                    device=device,\n                                    dtype=torch.long)\n            # self_condition = x_start if self.self_condition else None\n            pred_logits, pred_bboxes = self(x, batch_noise_bboxes, batch_time)\n\n            x_start = pred_bboxes[-1]\n\n            x_start = x_start / batch_image_size[:, None, :]\n            x_start = bbox_xyxy_to_cxcywh(x_start)\n            x_start = (x_start * 2 - 1.) * self.snr_scale\n            x_start = torch.clamp(\n                x_start, min=-1 * self.snr_scale, max=self.snr_scale)\n            pred_noise = self.predict_noise_from_start(batch_noise_bboxes_raw,\n                                                       batch_time, x_start)\n            pred_noise_list, x_start_list = [], []\n            noise_bboxes_list, num_remain_list = [], []\n            if self.box_renewal:  # filter\n                score_thr = cfg.get('score_thr', 0)\n                for img_id in range(batch_size):\n                    score_per_image = pred_logits[-1][img_id]\n\n                    score_per_image = torch.sigmoid(score_per_image)\n                    value, _ = torch.max(score_per_image, -1, keepdim=False)\n                    keep_idx = value > score_thr\n\n                    num_remain_list.append(torch.sum(keep_idx))\n                    pred_noise_list.append(pred_noise[img_id, keep_idx, :])\n                    x_start_list.append(x_start[img_id, keep_idx, :])\n                    noise_bboxes_list.append(batch_noise_bboxes[img_id,\n                                                                keep_idx, :])\n            if time_next < 0:\n                # Not same as original DiffusionDet\n                if self.use_ensemble and self.sampling_timesteps > 1:\n                    box_pred_per_image, scores_per_image, labels_per_image = \\\n                        self.inference(\n                            box_cls=pred_logits[-1],\n                            box_pred=pred_bboxes[-1],\n                            cfg=cfg,\n                            device=device)\n                    ensemble_score.append(scores_per_image)\n                    ensemble_label.append(labels_per_image)\n                    ensemble_coord.append(box_pred_per_image)\n                continue\n\n            alpha = self.alphas_cumprod[time]\n            alpha_next = self.alphas_cumprod[time_next]\n\n            sigma = self.ddim_sampling_eta * ((1 - alpha / alpha_next) *\n                                              (1 - alpha_next) /\n                                              (1 - alpha)).sqrt()\n            c = (1 - alpha_next - sigma**2).sqrt()\n\n            batch_noise_bboxes_list = []\n            batch_noise_bboxes_raw_list = []\n            for idx in range(batch_size):\n                pred_noise = pred_noise_list[idx]\n                x_start = x_start_list[idx]\n                noise_bboxes = noise_bboxes_list[idx]\n                num_remain = num_remain_list[idx]\n                noise = torch.randn_like(noise_bboxes)\n\n                noise_bboxes = x_start * alpha_next.sqrt() + \\\n                    c * pred_noise + sigma * noise\n\n                if self.box_renewal:  # filter\n                    # replenish with randn boxes\n                    if num_remain < self.num_proposals:\n                        noise_bboxes = torch.cat(\n                            (noise_bboxes,\n                             torch.randn(\n                                 self.num_proposals - num_remain,\n                                 4,\n                                 device=device)),\n                            dim=0)\n                    else:\n                        select_mask = [True] * self.num_proposals + \\\n                                      [False] * (num_remain -\n                                                 self.num_proposals)\n                        random.shuffle(select_mask)\n                        noise_bboxes = noise_bboxes[select_mask]\n\n                    # raw noise boxes\n                    batch_noise_bboxes_raw_list.append(noise_bboxes)\n                    # resize to xyxy\n                    noise_bboxes = torch.clamp(\n                        noise_bboxes,\n                        min=-1 * self.snr_scale,\n                        max=self.snr_scale)\n                    noise_bboxes = ((noise_bboxes / self.snr_scale) + 1) / 2\n                    noise_bboxes = bbox_cxcywh_to_xyxy(noise_bboxes)\n                    noise_bboxes = noise_bboxes * batch_image_size[idx]\n\n                batch_noise_bboxes_list.append(noise_bboxes)\n            batch_noise_bboxes = torch.stack(batch_noise_bboxes_list)\n            batch_noise_bboxes_raw = torch.stack(batch_noise_bboxes_raw_list)\n            if self.use_ensemble and self.sampling_timesteps > 1:\n                box_pred_per_image, scores_per_image, labels_per_image = \\\n                    self.inference(\n                        box_cls=pred_logits[-1],\n                        box_pred=pred_bboxes[-1],\n                        cfg=cfg,\n                        device=device)\n                ensemble_score.append(scores_per_image)\n                ensemble_label.append(labels_per_image)\n                ensemble_coord.append(box_pred_per_image)\n        if self.use_ensemble and self.sampling_timesteps > 1:\n            steps = len(ensemble_score)\n            results_list = []\n            for idx in range(batch_size):\n                ensemble_score_per_img = [\n                    ensemble_score[i][idx] for i in range(steps)\n                ]\n                ensemble_label_per_img = [\n                    ensemble_label[i][idx] for i in range(steps)\n                ]\n                ensemble_coord_per_img = [\n                    ensemble_coord[i][idx] for i in range(steps)\n                ]\n\n                scores_per_image = torch.cat(ensemble_score_per_img, dim=0)\n                labels_per_image = torch.cat(ensemble_label_per_img, dim=0)\n                box_pred_per_image = torch.cat(ensemble_coord_per_img, dim=0)\n\n                if self.use_nms:\n                    det_bboxes, keep_idxs = batched_nms(\n                        box_pred_per_image, scores_per_image, labels_per_image,\n                        cfg.nms)\n                    box_pred_per_image = box_pred_per_image[keep_idxs]\n                    labels_per_image = labels_per_image[keep_idxs]\n                    scores_per_image = det_bboxes[:, -1]\n                results = InstanceData()\n                results.bboxes = box_pred_per_image\n                results.scores = scores_per_image\n                results.labels = labels_per_image\n            results_list.append(results)\n        else:\n            box_cls = pred_logits[-1]\n            box_pred = pred_bboxes[-1]\n            results_list = self.inference(box_cls, box_pred, cfg, device)\n        if rescale:\n            results_list = self.do_results_post_process(\n                results_list, cfg, batch_img_metas=batch_img_metas)\n        return results_list\n\n    @staticmethod\n    def do_results_post_process(results_list, cfg, batch_img_metas=None):\n        processed_results = []\n        for results, img_meta in zip(results_list, batch_img_metas):\n            assert img_meta.get('scale_factor') is not None\n            scale_factor = [1 / s for s in img_meta['scale_factor']]\n            results.bboxes = scale_boxes(results.bboxes, scale_factor)\n            # clip w, h\n            h, w = img_meta['ori_shape']\n            results.bboxes[:, 0::2] = results.bboxes[:, 0::2].clamp(\n                min=0, max=w)\n            results.bboxes[:, 1::2] = results.bboxes[:, 1::2].clamp(\n                min=0, max=h)\n\n            # filter small size bboxes\n            if cfg.get('min_bbox_size', 0) >= 0:\n                w, h = get_box_wh(results.bboxes)\n                valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n                if not valid_mask.all():\n                    results = results[valid_mask]\n            processed_results.append(results)\n\n        return processed_results\n\n    def prepare_testing_targets(self, batch_img_metas, device):\n        # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == timesteps\n        times = torch.linspace(\n            -1, self.timesteps - 1, steps=self.sampling_timesteps + 1)\n        times = list(reversed(times.int().tolist()))\n        # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]\n        time_pairs = list(zip(times[:-1], times[1:]))\n\n        noise_bboxes_list = []\n        noise_bboxes_raw_list = []\n        image_size_list = []\n        for img_meta in batch_img_metas:\n            h, w = img_meta['img_shape']\n            image_size = torch.tensor([w, h, w, h],\n                                      dtype=torch.float32,\n                                      device=device)\n            noise_bboxes_raw = torch.randn((self.num_proposals, 4),\n                                           device=device)\n            noise_bboxes = torch.clamp(\n                noise_bboxes_raw, min=-1 * self.snr_scale, max=self.snr_scale)\n            noise_bboxes = ((noise_bboxes / self.snr_scale) + 1) / 2\n            noise_bboxes = bbox_cxcywh_to_xyxy(noise_bboxes)\n            noise_bboxes = noise_bboxes * image_size\n\n            noise_bboxes_raw_list.append(noise_bboxes_raw)\n            noise_bboxes_list.append(noise_bboxes)\n            image_size_list.append(image_size[None])\n        batch_noise_bboxes = torch.stack(noise_bboxes_list)\n        batch_image_size = torch.cat(image_size_list)\n        batch_noise_bboxes_raw = torch.stack(noise_bboxes_raw_list)\n        return (time_pairs, batch_noise_bboxes, batch_noise_bboxes_raw,\n                batch_image_size)\n\n    def predict_noise_from_start(self, x_t, t, x0):\n        results = (extract(\n            self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \\\n                  extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n        return results\n\n    def inference(self, box_cls, box_pred, cfg, device):\n        \"\"\"\n        Args:\n            box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).\n                The tensor predicts the classification probability for\n                each proposal.\n            box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).\n                The tensor predicts 4-vector (x,y,w,h) box\n                regression values for every proposal\n\n        Returns:\n            results (List[Instances]): a list of #images elements.\n        \"\"\"\n        results = []\n\n        if self.use_focal_loss or self.use_fed_loss:\n            scores = torch.sigmoid(box_cls)\n            labels = torch.arange(\n                self.num_classes,\n                device=device).unsqueeze(0).repeat(self.num_proposals,\n                                                   1).flatten(0, 1)\n            box_pred_list = []\n            scores_list = []\n            labels_list = []\n            for i, (scores_per_image,\n                    box_pred_per_image) in enumerate(zip(scores, box_pred)):\n\n                scores_per_image, topk_indices = scores_per_image.flatten(\n                    0, 1).topk(\n                        self.num_proposals, sorted=False)\n                labels_per_image = labels[topk_indices]\n                box_pred_per_image = box_pred_per_image.view(-1, 1, 4).repeat(\n                    1, self.num_classes, 1).view(-1, 4)\n                box_pred_per_image = box_pred_per_image[topk_indices]\n\n                if self.use_ensemble and self.sampling_timesteps > 1:\n                    box_pred_list.append(box_pred_per_image)\n                    scores_list.append(scores_per_image)\n                    labels_list.append(labels_per_image)\n                    continue\n\n                if self.use_nms:\n                    det_bboxes, keep_idxs = batched_nms(\n                        box_pred_per_image, scores_per_image, labels_per_image,\n                        cfg.nms)\n                    box_pred_per_image = box_pred_per_image[keep_idxs]\n                    labels_per_image = labels_per_image[keep_idxs]\n                    # some nms would reweight the score, such as softnms\n                    scores_per_image = det_bboxes[:, -1]\n                result = InstanceData()\n                result.bboxes = box_pred_per_image\n                result.scores = scores_per_image\n                result.labels = labels_per_image\n                results.append(result)\n\n        else:\n            # For each box we assign the best class or the second\n            # best if the best on is `no_object`.\n            scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)\n\n            for i, (scores_per_image, labels_per_image,\n                    box_pred_per_image) in enumerate(\n                        zip(scores, labels, box_pred)):\n                if self.use_ensemble and self.sampling_timesteps > 1:\n                    return box_pred_per_image, scores_per_image, \\\n                           labels_per_image\n\n                if self.use_nms:\n                    det_bboxes, keep_idxs = batched_nms(\n                        box_pred_per_image, scores_per_image, labels_per_image,\n                        cfg.nms)\n                    box_pred_per_image = box_pred_per_image[keep_idxs]\n                    labels_per_image = labels_per_image[keep_idxs]\n                    # some nms would reweight the score, such as softnms\n                    scores_per_image = det_bboxes[:, -1]\n\n                result = InstanceData()\n                result.bboxes = box_pred_per_image\n                result.scores = scores_per_image\n                result.labels = labels_per_image\n                results.append(result)\n        if self.use_ensemble and self.sampling_timesteps > 1:\n            return box_pred_list, scores_list, labels_list\n        else:\n            return results\n\n\n@MODELS.register_module()\nclass SingleDiffusionDetHead(nn.Module):\n\n    def __init__(\n        self,\n        num_classes=80,\n        feat_channels=256,\n        dim_feedforward=2048,\n        num_cls_convs=1,\n        num_reg_convs=3,\n        num_heads=8,\n        dropout=0.0,\n        pooler_resolution=7,\n        scale_clamp=_DEFAULT_SCALE_CLAMP,\n        bbox_weights=(2.0, 2.0, 1.0, 1.0),\n        use_focal_loss=True,\n        use_fed_loss=False,\n        act_cfg=dict(type='ReLU', inplace=True),\n        dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)\n    ) -> None:\n        super().__init__()\n        self.feat_channels = feat_channels\n\n        # Dynamic\n        self.self_attn = nn.MultiheadAttention(\n            feat_channels, num_heads, dropout=dropout)\n        self.inst_interact = DynamicConv(\n            feat_channels=feat_channels,\n            pooler_resolution=pooler_resolution,\n            dynamic_dim=dynamic_conv['dynamic_dim'],\n            dynamic_num=dynamic_conv['dynamic_num'])\n\n        self.linear1 = nn.Linear(feat_channels, dim_feedforward)\n        self.dropout = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(dim_feedforward, feat_channels)\n\n        self.norm1 = nn.LayerNorm(feat_channels)\n        self.norm2 = nn.LayerNorm(feat_channels)\n        self.norm3 = nn.LayerNorm(feat_channels)\n        self.dropout1 = nn.Dropout(dropout)\n        self.dropout2 = nn.Dropout(dropout)\n        self.dropout3 = nn.Dropout(dropout)\n\n        self.activation = build_activation_layer(act_cfg)\n\n        # block time mlp\n        self.block_time_mlp = nn.Sequential(\n            nn.SiLU(), nn.Linear(feat_channels * 4, feat_channels * 2))\n\n        # cls.\n        cls_module = list()\n        for _ in range(num_cls_convs):\n            cls_module.append(nn.Linear(feat_channels, feat_channels, False))\n            cls_module.append(nn.LayerNorm(feat_channels))\n            cls_module.append(nn.ReLU(inplace=True))\n        self.cls_module = nn.ModuleList(cls_module)\n\n        # reg.\n        reg_module = list()\n        for _ in range(num_reg_convs):\n            reg_module.append(nn.Linear(feat_channels, feat_channels, False))\n            reg_module.append(nn.LayerNorm(feat_channels))\n            reg_module.append(nn.ReLU(inplace=True))\n        self.reg_module = nn.ModuleList(reg_module)\n\n        # pred.\n        self.use_focal_loss = use_focal_loss\n        self.use_fed_loss = use_fed_loss\n        if self.use_focal_loss or self.use_fed_loss:\n            self.class_logits = nn.Linear(feat_channels, num_classes)\n        else:\n            self.class_logits = nn.Linear(feat_channels, num_classes + 1)\n        self.bboxes_delta = nn.Linear(feat_channels, 4)\n        self.scale_clamp = scale_clamp\n        self.bbox_weights = bbox_weights\n\n    def forward(self, features, bboxes, pro_features, pooler, time_emb):\n        \"\"\"\n        :param bboxes: (N, num_boxes, 4)\n        :param pro_features: (N, num_boxes, feat_channels)\n        \"\"\"\n\n        N, num_boxes = bboxes.shape[:2]\n\n        # roi_feature.\n        proposal_boxes = list()\n        for b in range(N):\n            proposal_boxes.append(bboxes[b])\n        rois = bbox2roi(proposal_boxes)\n\n        roi_features = pooler(features, rois)\n\n        if pro_features is None:\n            pro_features = roi_features.view(N, num_boxes, self.feat_channels,\n                                             -1).mean(-1)\n\n        roi_features = roi_features.view(N * num_boxes, self.feat_channels,\n                                         -1).permute(2, 0, 1)\n\n        # self_att.\n        pro_features = pro_features.view(N, num_boxes,\n                                         self.feat_channels).permute(1, 0, 2)\n        pro_features2 = self.self_attn(\n            pro_features, pro_features, value=pro_features)[0]\n        pro_features = pro_features + self.dropout1(pro_features2)\n        pro_features = self.norm1(pro_features)\n\n        # inst_interact.\n        pro_features = pro_features.view(\n            num_boxes, N,\n            self.feat_channels).permute(1, 0,\n                                        2).reshape(1, N * num_boxes,\n                                                   self.feat_channels)\n        pro_features2 = self.inst_interact(pro_features, roi_features)\n        pro_features = pro_features + self.dropout2(pro_features2)\n        obj_features = self.norm2(pro_features)\n\n        # obj_feature.\n        obj_features2 = self.linear2(\n            self.dropout(self.activation(self.linear1(obj_features))))\n        obj_features = obj_features + self.dropout3(obj_features2)\n        obj_features = self.norm3(obj_features)\n\n        fc_feature = obj_features.transpose(0, 1).reshape(N * num_boxes, -1)\n\n        scale_shift = self.block_time_mlp(time_emb)\n        scale_shift = torch.repeat_interleave(scale_shift, num_boxes, dim=0)\n        scale, shift = scale_shift.chunk(2, dim=1)\n        fc_feature = fc_feature * (scale + 1) + shift\n\n        cls_feature = fc_feature.clone()\n        reg_feature = fc_feature.clone()\n        for cls_layer in self.cls_module:\n            cls_feature = cls_layer(cls_feature)\n        for reg_layer in self.reg_module:\n            reg_feature = reg_layer(reg_feature)\n        class_logits = self.class_logits(cls_feature)\n        bboxes_deltas = self.bboxes_delta(reg_feature)\n        pred_bboxes = self.apply_deltas(bboxes_deltas, bboxes.view(-1, 4))\n\n        return (class_logits.view(N, num_boxes,\n                                  -1), pred_bboxes.view(N, num_boxes,\n                                                        -1), obj_features)\n\n    def apply_deltas(self, deltas, boxes):\n        \"\"\"Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.\n\n        Args:\n            deltas (Tensor): transformation deltas of shape (N, k*4),\n                where k >= 1. deltas[i] represents k potentially\n                different class-specific box transformations for\n                the single box boxes[i].\n            boxes (Tensor): boxes to transform, of shape (N, 4)\n        \"\"\"\n        boxes = boxes.to(deltas.dtype)\n\n        widths = boxes[:, 2] - boxes[:, 0]\n        heights = boxes[:, 3] - boxes[:, 1]\n        ctr_x = boxes[:, 0] + 0.5 * widths\n        ctr_y = boxes[:, 1] + 0.5 * heights\n\n        wx, wy, ww, wh = self.bbox_weights\n        dx = deltas[:, 0::4] / wx\n        dy = deltas[:, 1::4] / wy\n        dw = deltas[:, 2::4] / ww\n        dh = deltas[:, 3::4] / wh\n\n        # Prevent sending too large values into torch.exp()\n        dw = torch.clamp(dw, max=self.scale_clamp)\n        dh = torch.clamp(dh, max=self.scale_clamp)\n\n        pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\n        pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\n        pred_w = torch.exp(dw) * widths[:, None]\n        pred_h = torch.exp(dh) * heights[:, None]\n\n        pred_boxes = torch.zeros_like(deltas)\n        pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w  # x1\n        pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h  # y1\n        pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w  # x2\n        pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h  # y2\n\n        return pred_boxes\n\n\nclass DynamicConv(nn.Module):\n\n    def __init__(self,\n                 feat_channels: int,\n                 dynamic_dim: int = 64,\n                 dynamic_num: int = 2,\n                 pooler_resolution: int = 7) -> None:\n        super().__init__()\n\n        self.feat_channels = feat_channels\n        self.dynamic_dim = dynamic_dim\n        self.dynamic_num = dynamic_num\n        self.num_params = self.feat_channels * self.dynamic_dim\n        self.dynamic_layer = nn.Linear(self.feat_channels,\n                                       self.dynamic_num * self.num_params)\n\n        self.norm1 = nn.LayerNorm(self.dynamic_dim)\n        self.norm2 = nn.LayerNorm(self.feat_channels)\n\n        self.activation = nn.ReLU(inplace=True)\n\n        num_output = self.feat_channels * pooler_resolution**2\n        self.out_layer = nn.Linear(num_output, self.feat_channels)\n        self.norm3 = nn.LayerNorm(self.feat_channels)\n\n    def forward(self, pro_features: Tensor, roi_features: Tensor) -> Tensor:\n        \"\"\"Forward function.\n\n        Args:\n            pro_features: (1,  N * num_boxes, self.feat_channels)\n            roi_features: (49, N * num_boxes, self.feat_channels)\n\n        Returns:\n        \"\"\"\n        features = roi_features.permute(1, 0, 2)\n        parameters = self.dynamic_layer(pro_features).permute(1, 0, 2)\n\n        param1 = parameters[:, :, :self.num_params].view(\n            -1, self.feat_channels, self.dynamic_dim)\n        param2 = parameters[:, :,\n                            self.num_params:].view(-1, self.dynamic_dim,\n                                                   self.feat_channels)\n\n        features = torch.bmm(features, param1)\n        features = self.norm1(features)\n        features = self.activation(features)\n\n        features = torch.bmm(features, param2)\n        features = self.norm2(features)\n        features = self.activation(features)\n\n        features = features.flatten(1)\n        features = self.out_layer(features)\n        features = self.norm3(features)\n        features = self.activation(features)\n\n        return features\n"
  },
  {
    "path": "projects/DiffusionDet/diffusiondet/loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/loss.py   # noqa\n\n# This work is licensed under the CC-BY-NC 4.0 License.\n# Users should be careful about adopting these features in any commercial matters.  # noqa\n# For more details, please refer to https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE    # noqa\n\nfrom typing import List, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh\nfrom mmdet.utils import ConfigType\n\n\n@TASK_UTILS.register_module()\nclass DiffusionDetCriterion(nn.Module):\n\n    def __init__(\n            self,\n            num_classes,\n            assigner: Union[ConfigDict, nn.Module],\n            deep_supervision=True,\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                alpha=0.25,\n                gamma=2.0,\n                reduction='sum',\n                loss_weight=2.0),\n            loss_bbox=dict(type='L1Loss', reduction='sum', loss_weight=5.0),\n            loss_giou=dict(type='GIoULoss', reduction='sum', loss_weight=2.0),\n    ):\n\n        super().__init__()\n        self.num_classes = num_classes\n\n        if isinstance(assigner, nn.Module):\n            self.assigner = assigner\n        else:\n            self.assigner = TASK_UTILS.build(assigner)\n\n        self.deep_supervision = deep_supervision\n\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_bbox = MODELS.build(loss_bbox)\n        self.loss_giou = MODELS.build(loss_giou)\n\n    def forward(self, outputs, batch_gt_instances, batch_img_metas):\n        batch_indices = self.assigner(outputs, batch_gt_instances,\n                                      batch_img_metas)\n        # Compute all the requested losses\n        loss_cls = self.loss_classification(outputs, batch_gt_instances,\n                                            batch_indices)\n        loss_bbox, loss_giou = self.loss_boxes(outputs, batch_gt_instances,\n                                               batch_indices)\n\n        losses = dict(\n            loss_cls=loss_cls, loss_bbox=loss_bbox, loss_giou=loss_giou)\n\n        if self.deep_supervision:\n            assert 'aux_outputs' in outputs\n            for i, aux_outputs in enumerate(outputs['aux_outputs']):\n                batch_indices = self.assigner(aux_outputs, batch_gt_instances,\n                                              batch_img_metas)\n                loss_cls = self.loss_classification(aux_outputs,\n                                                    batch_gt_instances,\n                                                    batch_indices)\n                loss_bbox, loss_giou = self.loss_boxes(aux_outputs,\n                                                       batch_gt_instances,\n                                                       batch_indices)\n                tmp_losses = dict(\n                    loss_cls=loss_cls,\n                    loss_bbox=loss_bbox,\n                    loss_giou=loss_giou)\n                for name, value in tmp_losses.items():\n                    losses[f's.{i}.{name}'] = value\n        return losses\n\n    def loss_classification(self, outputs, batch_gt_instances, indices):\n        assert 'pred_logits' in outputs\n        src_logits = outputs['pred_logits']\n        target_classes_list = [\n            gt.labels[J] for gt, (_, J) in zip(batch_gt_instances, indices)\n        ]\n        target_classes = torch.full(\n            src_logits.shape[:2],\n            self.num_classes,\n            dtype=torch.int64,\n            device=src_logits.device)\n        for idx in range(len(batch_gt_instances)):\n            target_classes[idx, indices[idx][0]] = target_classes_list[idx]\n\n        src_logits = src_logits.flatten(0, 1)\n        target_classes = target_classes.flatten(0, 1)\n        # comp focal loss.\n        num_instances = max(torch.cat(target_classes_list).shape[0], 1)\n        loss_cls = self.loss_cls(\n            src_logits,\n            target_classes,\n        ) / num_instances\n        return loss_cls\n\n    def loss_boxes(self, outputs, batch_gt_instances, indices):\n        assert 'pred_boxes' in outputs\n        pred_boxes = outputs['pred_boxes']\n\n        target_bboxes_norm_list = [\n            gt.norm_bboxes_cxcywh[J]\n            for gt, (_, J) in zip(batch_gt_instances, indices)\n        ]\n        target_bboxes_list = [\n            gt.bboxes[J] for gt, (_, J) in zip(batch_gt_instances, indices)\n        ]\n\n        pred_bboxes_list = []\n        pred_bboxes_norm_list = []\n        for idx in range(len(batch_gt_instances)):\n            pred_bboxes_list.append(pred_boxes[idx, indices[idx][0]])\n            image_size = batch_gt_instances[idx].image_size\n            pred_bboxes_norm_list.append(pred_boxes[idx, indices[idx][0]] /\n                                         image_size)\n\n        pred_boxes_cat = torch.cat(pred_bboxes_list)\n        pred_boxes_norm_cat = torch.cat(pred_bboxes_norm_list)\n        target_bboxes_cat = torch.cat(target_bboxes_list)\n        target_bboxes_norm_cat = torch.cat(target_bboxes_norm_list)\n\n        if len(pred_boxes_cat) > 0:\n            num_instances = pred_boxes_cat.shape[0]\n\n            loss_bbox = self.loss_bbox(\n                pred_boxes_norm_cat,\n                bbox_cxcywh_to_xyxy(target_bboxes_norm_cat)) / num_instances\n            loss_giou = self.loss_giou(pred_boxes_cat,\n                                       target_bboxes_cat) / num_instances\n        else:\n            loss_bbox = pred_boxes.sum() * 0\n            loss_giou = pred_boxes.sum() * 0\n        return loss_bbox, loss_giou\n\n\n@TASK_UTILS.register_module()\nclass DiffusionDetMatcher(nn.Module):\n    \"\"\"This class computes an assignment between the targets and the\n    predictions of the network For efficiency reasons, the targets don't\n    include the no_object.\n\n    Because of this, in general, there are more predictions than targets. In\n    this case, we do a 1-to-k (dynamic) matching of the best predictions, while\n    the others are un-matched (and thus treated as non-objects).\n    \"\"\"\n\n    def __init__(self,\n                 match_costs: Union[List[Union[dict, ConfigDict]], dict,\n                                    ConfigDict],\n                 center_radius: float = 2.5,\n                 candidate_topk: int = 5,\n                 iou_calculator: ConfigType = dict(type='BboxOverlaps2D'),\n                 **kwargs):\n        super().__init__()\n\n        self.center_radius = center_radius\n        self.candidate_topk = candidate_topk\n\n        if isinstance(match_costs, dict):\n            match_costs = [match_costs]\n        elif isinstance(match_costs, list):\n            assert len(match_costs) > 0, \\\n                'match_costs must not be a empty list.'\n        self.use_focal_loss = False\n        self.use_fed_loss = False\n        for _match_cost in match_costs:\n            if _match_cost.get('type') == 'FocalLossCost':\n                self.use_focal_loss = True\n            if _match_cost.get('type') == 'FedLoss':\n                self.use_fed_loss = True\n                raise NotImplementedError\n\n        self.match_costs = [\n            TASK_UTILS.build(match_cost) for match_cost in match_costs\n        ]\n        self.iou_calculator = TASK_UTILS.build(iou_calculator)\n\n    def forward(self, outputs, batch_gt_instances, batch_img_metas):\n        assert 'pred_logits' in outputs and 'pred_boxes' in outputs\n\n        pred_logits = outputs['pred_logits']\n        pred_bboxes = outputs['pred_boxes']\n        batch_size = len(batch_gt_instances)\n\n        assert batch_size == pred_logits.shape[0] == pred_bboxes.shape[0]\n        batch_indices = []\n        for i in range(batch_size):\n            pred_instances = InstanceData()\n            pred_instances.bboxes = pred_bboxes[i, ...]\n            pred_instances.scores = pred_logits[i, ...]\n            gt_instances = batch_gt_instances[i]\n            img_meta = batch_img_metas[i]\n            indices = self.single_assigner(pred_instances, gt_instances,\n                                           img_meta)\n            batch_indices.append(indices)\n        return batch_indices\n\n    def single_assigner(self, pred_instances, gt_instances, img_meta):\n        with torch.no_grad():\n            gt_bboxes = gt_instances.bboxes\n            pred_bboxes = pred_instances.bboxes\n            num_gt = gt_bboxes.size(0)\n\n            if num_gt == 0:  # empty object in key frame\n                valid_mask = pred_bboxes.new_zeros((pred_bboxes.shape[0], ),\n                                                   dtype=torch.bool)\n                matched_gt_inds = pred_bboxes.new_zeros((gt_bboxes.shape[0], ),\n                                                        dtype=torch.long)\n                return valid_mask, matched_gt_inds\n\n            valid_mask, is_in_boxes_and_center = \\\n                self.get_in_gt_and_in_center_info(\n                    bbox_xyxy_to_cxcywh(pred_bboxes),\n                    bbox_xyxy_to_cxcywh(gt_bboxes)\n                )\n\n            cost_list = []\n            for match_cost in self.match_costs:\n                cost = match_cost(\n                    pred_instances=pred_instances,\n                    gt_instances=gt_instances,\n                    img_meta=img_meta)\n                cost_list.append(cost)\n\n            pairwise_ious = self.iou_calculator(pred_bboxes, gt_bboxes)\n\n            cost_list.append((~is_in_boxes_and_center) * 100.0)\n            cost_matrix = torch.stack(cost_list).sum(0)\n            cost_matrix[~valid_mask] = cost_matrix[~valid_mask] + 10000.0\n\n            fg_mask_inboxes, matched_gt_inds = \\\n                self.dynamic_k_matching(\n                    cost_matrix, pairwise_ious, num_gt)\n        return fg_mask_inboxes, matched_gt_inds\n\n    def get_in_gt_and_in_center_info(\n            self, pred_bboxes: Tensor,\n            gt_bboxes: Tensor) -> Tuple[Tensor, Tensor]:\n        \"\"\"Get the information of which prior is in gt bboxes and gt center\n        priors.\"\"\"\n        xy_target_gts = bbox_cxcywh_to_xyxy(gt_bboxes)  # (x1, y1, x2, y2)\n\n        pred_bboxes_center_x = pred_bboxes[:, 0].unsqueeze(1)\n        pred_bboxes_center_y = pred_bboxes[:, 1].unsqueeze(1)\n\n        # whether the center of each anchor is inside a gt box\n        b_l = pred_bboxes_center_x > xy_target_gts[:, 0].unsqueeze(0)\n        b_r = pred_bboxes_center_x < xy_target_gts[:, 2].unsqueeze(0)\n        b_t = pred_bboxes_center_y > xy_target_gts[:, 1].unsqueeze(0)\n        b_b = pred_bboxes_center_y < xy_target_gts[:, 3].unsqueeze(0)\n        # (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,\n        is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() +\n                        b_b.long()) == 4)\n        is_in_boxes_all = is_in_boxes.sum(1) > 0  # [num_query]\n        # in fixed center\n        center_radius = 2.5\n        # Modified to self-adapted sampling --- the center size depends\n        # on the size of the gt boxes\n        # https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212    # noqa\n        b_l = pred_bboxes_center_x > (\n            gt_bboxes[:, 0] -\n            (center_radius *\n             (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n        b_r = pred_bboxes_center_x < (\n            gt_bboxes[:, 0] +\n            (center_radius *\n             (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n        b_t = pred_bboxes_center_y > (\n            gt_bboxes[:, 1] -\n            (center_radius *\n             (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n        b_b = pred_bboxes_center_y < (\n            gt_bboxes[:, 1] +\n            (center_radius *\n             (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n\n        is_in_centers = ((b_l.long() + b_r.long() + b_t.long() +\n                          b_b.long()) == 4)\n        is_in_centers_all = is_in_centers.sum(1) > 0\n\n        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all\n        is_in_boxes_and_center = (is_in_boxes & is_in_centers)\n\n        return is_in_boxes_anchor, is_in_boxes_and_center\n\n    def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor,\n                           num_gt: int) -> Tuple[Tensor, Tensor]:\n        \"\"\"Use IoU and matching cost to calculate the dynamic top-k positive\n        targets.\"\"\"\n        matching_matrix = torch.zeros_like(cost)\n        # select candidate topk ious for dynamic-k calculation\n        candidate_topk = min(self.candidate_topk, pairwise_ious.size(0))\n        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)\n        # calculate dynamic k for each gt\n        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n        for gt_idx in range(num_gt):\n            _, pos_idx = torch.topk(\n                cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)\n            matching_matrix[:, gt_idx][pos_idx] = 1\n\n        del topk_ious, dynamic_ks, pos_idx\n\n        prior_match_gt_mask = matching_matrix.sum(1) > 1\n        if prior_match_gt_mask.sum() > 0:\n            _, cost_argmin = torch.min(cost[prior_match_gt_mask, :], dim=1)\n            matching_matrix[prior_match_gt_mask, :] *= 0\n            matching_matrix[prior_match_gt_mask, cost_argmin] = 1\n\n        while (matching_matrix.sum(0) == 0).any():\n            matched_query_id = matching_matrix.sum(1) > 0\n            cost[matched_query_id] += 100000.0\n            unmatch_id = torch.nonzero(\n                matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)\n            for gt_idx in unmatch_id:\n                pos_idx = torch.argmin(cost[:, gt_idx])\n                matching_matrix[:, gt_idx][pos_idx] = 1.0\n            if (matching_matrix.sum(1) > 1).sum() > 0:\n                _, cost_argmin = torch.min(cost[prior_match_gt_mask], dim=1)\n                matching_matrix[prior_match_gt_mask] *= 0\n                matching_matrix[prior_match_gt_mask, cost_argmin, ] = 1\n\n        assert not (matching_matrix.sum(0) == 0).any()\n        # get foreground mask inside box and center prior\n        fg_mask_inboxes = matching_matrix.sum(1) > 0\n        matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)\n\n        return fg_mask_inboxes, matched_gt_inds\n"
  },
  {
    "path": "projects/DiffusionDet/model_converters/diffusiondet_resnet_to_mmdet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nfrom mmengine.fileio import load\n\n\ndef convert(src, dst):\n    if src.endswith('pth'):\n        src_model = torch.load(src)\n    else:\n        src_model = load(src)\n\n    dst_state_dict = OrderedDict()\n    for k, v in src_model['model'].items():\n        key_name_split = k.split('.')\n        if 'backbone.fpn_lateral' in k:\n            lateral_id = int(key_name_split[-2][-1])\n            name = f'neck.lateral_convs.{lateral_id - 2}.' \\\n                   f'conv.{key_name_split[-1]}'\n        elif 'backbone.fpn_output' in k:\n            lateral_id = int(key_name_split[-2][-1])\n            name = f'neck.fpn_convs.{lateral_id - 2}.conv.' \\\n                   f'{key_name_split[-1]}'\n        elif 'backbone.bottom_up.stem.conv1.norm.' in k:\n            name = f'backbone.bn1.{key_name_split[-1]}'\n        elif 'backbone.bottom_up.stem.conv1.' in k:\n            name = f'backbone.conv1.{key_name_split[-1]}'\n        elif 'backbone.bottom_up.res' in k:\n            # weight_type = key_name_split[-1]\n            res_id = int(key_name_split[2][-1]) - 1\n            # deal with short cut\n            if 'shortcut' in key_name_split[4]:\n                if 'shortcut' == key_name_split[-2]:\n                    name = f'backbone.layer{res_id}.' \\\n                           f'{key_name_split[3]}.downsample.0.' \\\n                           f'{key_name_split[-1]}'\n                elif 'shortcut' == key_name_split[-3]:\n                    name = f'backbone.layer{res_id}.' \\\n                           f'{key_name_split[3]}.downsample.1.' \\\n                           f'{key_name_split[-1]}'\n                else:\n                    print(f'Unvalid key {k}')\n            # deal with conv\n            elif 'conv' in key_name_split[-2]:\n                conv_id = int(key_name_split[-2][-1])\n                name = f'backbone.layer{res_id}.{key_name_split[3]}' \\\n                       f'.conv{conv_id}.{key_name_split[-1]}'\n            # deal with BN\n            elif key_name_split[-2] == 'norm':\n                conv_id = int(key_name_split[-3][-1])\n                name = f'backbone.layer{res_id}.{key_name_split[3]}.' \\\n                       f'bn{conv_id}.{key_name_split[-1]}'\n            else:\n                print(f'{k} is invalid')\n\n        elif key_name_split[0] == 'head':\n            # d2: head.xxx -> mmdet: bbox_head.xxx\n            name = f'bbox_{k}'\n        else:\n            # some base parameters such as beta will not convert\n            print(f'{k} is not converted!!')\n            continue\n\n        if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):\n            raise ValueError(\n                'Unsupported type found in checkpoint! {}: {}'.format(\n                    k, type(v)))\n        if not isinstance(v, torch.Tensor):\n            dst_state_dict[name] = torch.from_numpy(v)\n        else:\n            dst_state_dict[name] = v\n    mmdet_model = dict(state_dict=dst_state_dict, meta=dict())\n    torch.save(mmdet_model, dst)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Convert model keys')\n    parser.add_argument('src', help='src detectron model path')\n    parser.add_argument('dst', help='save path')\n    args = parser.parse_args()\n    convert(args.src, args.dst)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "projects/EfficientDet/README.md",
    "content": "# EfficientDet\n\n> [**EfficientDet: Scalable and Efficient Object Detection**](https://arxiv.org/pdf/1911.09070.pdf),\n> Mingxing Tan, Ruoming Pang, Quoc V. Le,\n> *CVPR 2020*\n\n## Abstract\n\nThis is an implementation of [EfficientDet](https://github.com/google/automl) based on [MMDetection](https://github.com/open-mmlab/mmdetection/tree/3.x), [MMCV](https://github.com/open-mmlab/mmcv), and [MMEngine](https://github.com/open-mmlab/mmengine).\n<br>\nEfficientDet a new family of object detectors, which consistently achieve much better efficiency than prior art across a wide\nspectrum of resource constraints.\nIn particular, with single model and single-scale, EfficientDet-D7 achieves stateof-the-art 55.1 AP on COCO test-dev with 77M parameters and 410B FLOP.\n<br>\nBiFPN is a simple yet highly effective weighted bi-directional feature pyramid network, which introduces learnable weights to learn the importance of different input features, while repeatedly applying topdown and bottom-up multi-scale feature fusion.\n<br>\nIn contrast to other feature pyramid network, such as FPN, FPN + PAN, NAS-FPN, BiFPN achieves  the best accuracy with fewer parameters and FLOPs.\n\n<div align=\"center\">\n<img src=\"https://github.com/zwhus/pictures/raw/main/Screenshot%20from%202023-01-31%2010-38-51.png\">\n</div>\n\n## Usage\n\n### Model conversion\n\nFirstly, download EfficientDet [weights](https://github.com/google/automl/tree/master/efficientdet) and unzip,  please use the following command\n\n```bash\ntar -xzvf {EFFICIENTDET_WEIGHT}\n```\n\nThen, install tensorflow, please use the following command\n\n```bash\npip install tensorflow-gpu==2.6.0\n```\n\nLastly, convert weights from tensorflow to pytorch, please use the following command\n\n```bash\npython projects/EfficientDet/convert_tf_to_pt.py --backbone {BACKBONE_NAME} --tensorflow_weight {TENSORFLOW_WEIGHT_PATH} --out_weight {OUT_PATH}\n```\n\n### Testing commands\n\nIn MMDetection's root directory, run the following command to test the model:\n\n```bash\npython tools/test.py projects/EfficientDet/configs/efficientdet_effb0_bifpn_8xb16-crop512-300e_coco.py ${CHECKPOINT_PATH}\n```\n\n## Results\n\nBased on mmdetection, this project aligns the test accuracy of the [official model](https://github.com/google/automl).\n<br>\nIf you want to reproduce the test results, you need to convert model weights first, then run the test command.\n<br>\nThe training accuracy will also be aligned with the official in the future\n\n|                                      Method                                      |    Backbone     | Pretrained Model |  Training set  |   Test set   | Epoch | Val Box AP | Official AP |\n| :------------------------------------------------------------------------------: | :-------------: | :--------------: | :------------: | :----------: | :---: | :--------: | :---------: |\n| [efficientdet-d0](./configs/efficientdet_effb0_bifpn_8xb16-crop512-300e_coco.py) | efficientnet-b0 |     ImageNet     | COCO2017 Train | COCO2017 Val |  300  |    34.4    |    34.3     |\n\n## Citation\n\n```BibTeX\n@inproceedings{tan2020efficientdet,\n  title={Efficientdet: Scalable and efficient object detection},\n  author={Tan, Mingxing and Pang, Ruoming and Le, Quoc V},\n  booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},\n  pages={10781--10790},\n  year={2020}\n}\n```\n\n## Checklist\n\n<!-- Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR.\nOpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone.\nNote that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed.\nA project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. -->\n\n- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.\n\n  - [x] Finish the code\n\n    <!-- The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmdet.registry.MODELS` and configurable via a config file. -->\n\n  - [x] Basic docstrings & proper citation\n\n    <!-- Each major object should contain a docstring, describing its functionality and arguments. If you have adapted the code from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) -->\n\n  - [x] Test-time correctness\n\n    <!-- If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. -->\n\n  - [x] A full README\n\n    <!-- As this template does. -->\n\n- [ ] Milestone 2: Indicates a successful model implementation.\n\n  - [ ] Training-time correctness\n\n    <!-- If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. -->\n\n- [ ] Milestone 3: Good to be a part of our core package!\n\n  - [ ] Type hints and docstrings\n\n    <!-- Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/mmdet/datasets/transforms/transforms.py#L41-L169) -->\n\n  - [ ] Unit tests\n\n    <!-- Unit tests for each module are required. [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/tests/test_datasets/test_transforms/test_transforms.py#L35-L88) -->\n\n  - [ ] Code polishing\n\n    <!-- Refactor your code according to reviewer's comment. -->\n\n  - [ ] Metafile.yml\n\n    <!-- It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/metafile.yml) -->\n\n- [ ] Move your modules into the core package following the codebase's file hierarchy structure.\n\n  <!-- In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/README.md) -->\n\n- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.\n"
  },
  {
    "path": "projects/EfficientDet/configs/efficientdet_effb0_bifpn_16xb8-crop512-300e_coco.py",
    "content": "_base_ = [\n    'mmdet::_base_/datasets/coco_detection.py',\n    'mmdet::_base_/schedules/schedule_1x.py',\n    'mmdet::_base_/default_runtime.py'\n]\ncustom_imports = dict(\n    imports=['projects.EfficientDet.efficientdet'], allow_failed_imports=False)\n\nimage_size = 512\ndataset_type = 'Coco90Dataset'\nevalute_type = 'Coco90Metric'\nbatch_augments = [\n    dict(type='BatchFixedSizePad', size=(image_size, image_size))\n]\nnorm_cfg = dict(type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01)\ncheckpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth'  # noqa\nmodel = dict(\n    type='EfficientDet',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_size_divisor=image_size,\n        batch_augments=batch_augments),\n    backbone=dict(\n        type='EfficientNet',\n        arch='b0',\n        drop_path_rate=0.2,\n        out_indices=(3, 4, 5),\n        frozen_stages=0,\n        norm_cfg=norm_cfg,\n        norm_eval=False,\n        init_cfg=dict(\n            type='Pretrained', prefix='backbone', checkpoint=checkpoint)),\n    neck=dict(\n        type='BiFPN',\n        num_stages=3,\n        in_channels=[40, 112, 320],\n        out_channels=64,\n        start_level=0,\n        norm_cfg=norm_cfg),\n    bbox_head=dict(\n        type='EfficientDetSepBNHead',\n        num_classes=90,\n        num_ins=5,\n        in_channels=64,\n        feat_channels=64,\n        stacked_convs=3,\n        norm_cfg=norm_cfg,\n        anchor_generator=dict(\n            type='YXYXAnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[1.0, 0.5, 2.0],\n            strides=[8, 16, 32, 64, 128],\n            center_offset=0.5),\n        bbox_coder=dict(\n            type='YXYXDeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),\n    # training and testing settings\n    train_cfg=dict(\n        assigner=dict(\n            type='TransMaxIoUAssigner',\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.5,\n            min_pos_iou=0,\n            ignore_iof_thr=-1),\n        sampler=dict(\n            type='PseudoSampler'),  # Focal loss should use PseudoSampler\n        allowed_border=-1,\n        pos_weight=-1,\n        debug=False),\n    test_cfg=dict(\n        nms_pre=1000,\n        min_bbox_size=0,\n        score_thr=0.05,\n        nms=dict(\n            type='soft_nms',\n            iou_threshold=0.3,\n            sigma=0.5,\n            min_score=1e-3,\n            method='gaussian'),\n        max_per_img=100))\n\n# dataset settings\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='RandomResize',\n        scale=(image_size, image_size),\n        ratio_range=(0.1, 2.0),\n        keep_ratio=True),\n    dict(type='RandomCrop', crop_size=(image_size, image_size)),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\ntest_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}}),\n    dict(type='Resize', scale=(image_size, image_size), keep_ratio=True),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=16,\n    num_workers=16,\n    dataset=dict(type=dataset_type, pipeline=train_pipeline))\nval_dataloader = dict(dataset=dict(type=dataset_type, pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(type=evalute_type)\ntest_evaluator = val_evaluator\n\noptim_wrapper = dict(\n    optimizer=dict(lr=0.16),\n    paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))\n\n# learning policy\nmax_epochs = 300\nparam_scheduler = [\n    dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=917),\n    dict(\n        type='CosineAnnealingLR',\n        eta_min=0.0016,\n        begin=1,\n        T_max=284,\n        end=285,\n        by_epoch=True,\n        convert_to_iter_based=True)\n]\ntrain_cfg = dict(max_epochs=max_epochs, val_interval=1)\n\nvis_backends = [\n    dict(type='LocalVisBackend'),\n    dict(type='TensorboardVisBackend')\n]\nvisualizer = dict(\n    type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')\n\ndefault_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=15))\n# cudnn_benchmark=True can accelerate fix-size training\nenv_cfg = dict(cudnn_benchmark=True)\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (32 samples per GPU)\nauto_scale_lr = dict(base_batch_size=128)\n"
  },
  {
    "path": "projects/EfficientDet/convert_tf_to_pt.py",
    "content": "import argparse\n\nimport numpy as np\nimport torch\nfrom tensorflow.python.training import py_checkpoint_reader\n\ntorch.set_printoptions(precision=20)\n\n\ndef tf2pth(v):\n    if v.ndim == 4:\n        return np.ascontiguousarray(v.transpose(3, 2, 0, 1))\n    elif v.ndim == 2:\n        return np.ascontiguousarray(v.transpose())\n    return v\n\n\ndef convert_key(model_name, bifpn_repeats, weights):\n\n    p6_w1 = [\n        torch.tensor([-1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p5_w1 = [\n        torch.tensor([-1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p4_w1 = [\n        torch.tensor([-1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p3_w1 = [\n        torch.tensor([-1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p4_w2 = [\n        torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p5_w2 = [\n        torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p6_w2 = [\n        torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    p7_w2 = [\n        torch.tensor([-1e4, -1e4], dtype=torch.float64)\n        for _ in range(bifpn_repeats)\n    ]\n    idx2key = {\n        0: '1.0',\n        1: '2.0',\n        2: '2.1',\n        3: '3.0',\n        4: '3.1',\n        5: '4.0',\n        6: '4.1',\n        7: '4.2',\n        8: '4.3',\n        9: '4.4',\n        10: '4.5',\n        11: '5.0',\n        12: '5.1',\n        13: '5.2',\n        14: '5.3',\n        15: '5.4'\n    }\n    m = dict()\n    for k, v in weights.items():\n\n        if 'Exponential' in k or 'global_step' in k:\n            continue\n\n        seg = k.split('/')\n        if len(seg) == 1:\n            continue\n        if seg[2] == 'depthwise_conv2d':\n            v = v.transpose(1, 0)\n\n        if seg[0] == model_name:\n            if seg[1] == 'stem':\n                prefix = 'backbone.layers.0'\n                mapping = {\n                    'conv2d/kernel': 'conv.weight',\n                    'tpu_batch_normalization/beta': 'bn.bias',\n                    'tpu_batch_normalization/gamma': 'bn.weight',\n                    'tpu_batch_normalization/moving_mean': 'bn.running_mean',\n                    'tpu_batch_normalization/moving_variance':\n                    'bn.running_var',\n                }\n                suffix = mapping['/'.join(seg[2:])]\n                m[prefix + '.' + suffix] = v\n\n            elif seg[1].startswith('blocks_'):\n                idx = int(seg[1][7:])\n                prefix = '.'.join(['backbone', 'layers', idx2key[idx]])\n                base_mapping = {\n                    'depthwise_conv2d/depthwise_kernel':\n                    'depthwise_conv.conv.weight',\n                    'se/conv2d/kernel': 'se.conv1.conv.weight',\n                    'se/conv2d/bias': 'se.conv1.conv.bias',\n                    'se/conv2d_1/kernel': 'se.conv2.conv.weight',\n                    'se/conv2d_1/bias': 'se.conv2.conv.bias'\n                }\n                if idx == 0:\n                    mapping = {\n                        'conv2d/kernel':\n                        'linear_conv.conv.weight',\n                        'tpu_batch_normalization/beta':\n                        'depthwise_conv.bn.bias',\n                        'tpu_batch_normalization/gamma':\n                        'depthwise_conv.bn.weight',\n                        'tpu_batch_normalization/moving_mean':\n                        'depthwise_conv.bn.running_mean',\n                        'tpu_batch_normalization/moving_variance':\n                        'depthwise_conv.bn.running_var',\n                        'tpu_batch_normalization_1/beta':\n                        'linear_conv.bn.bias',\n                        'tpu_batch_normalization_1/gamma':\n                        'linear_conv.bn.weight',\n                        'tpu_batch_normalization_1/moving_mean':\n                        'linear_conv.bn.running_mean',\n                        'tpu_batch_normalization_1/moving_variance':\n                        'linear_conv.bn.running_var',\n                    }\n                else:\n                    mapping = {\n                        'depthwise_conv2d/depthwise_kernel':\n                        'depthwise_conv.conv.weight',\n                        'conv2d/kernel':\n                        'expand_conv.conv.weight',\n                        'conv2d_1/kernel':\n                        'linear_conv.conv.weight',\n                        'tpu_batch_normalization/beta':\n                        'expand_conv.bn.bias',\n                        'tpu_batch_normalization/gamma':\n                        'expand_conv.bn.weight',\n                        'tpu_batch_normalization/moving_mean':\n                        'expand_conv.bn.running_mean',\n                        'tpu_batch_normalization/moving_variance':\n                        'expand_conv.bn.running_var',\n                        'tpu_batch_normalization_1/beta':\n                        'depthwise_conv.bn.bias',\n                        'tpu_batch_normalization_1/gamma':\n                        'depthwise_conv.bn.weight',\n                        'tpu_batch_normalization_1/moving_mean':\n                        'depthwise_conv.bn.running_mean',\n                        'tpu_batch_normalization_1/moving_variance':\n                        'depthwise_conv.bn.running_var',\n                        'tpu_batch_normalization_2/beta':\n                        'linear_conv.bn.bias',\n                        'tpu_batch_normalization_2/gamma':\n                        'linear_conv.bn.weight',\n                        'tpu_batch_normalization_2/moving_mean':\n                        'linear_conv.bn.running_mean',\n                        'tpu_batch_normalization_2/moving_variance':\n                        'linear_conv.bn.running_var',\n                    }\n                mapping.update(base_mapping)\n                suffix = mapping['/'.join(seg[2:])]\n                m[prefix + '.' + suffix] = v\n        elif seg[0] == 'resample_p6':\n            prefix = 'neck.bifpn.0.p5_to_p6.0'\n            mapping = {\n                'conv2d/kernel': 'down_conv.conv.weight',\n                'conv2d/bias': 'down_conv.conv.bias',\n                'bn/beta': 'bn.bias',\n                'bn/gamma': 'bn.weight',\n                'bn/moving_mean': 'bn.running_mean',\n                'bn/moving_variance': 'bn.running_var',\n            }\n            suffix = mapping['/'.join(seg[1:])]\n            m[prefix + '.' + suffix] = v\n        elif seg[0] == 'fpn_cells':\n            fpn_idx = int(seg[1][5:])\n            prefix = '.'.join(['neck', 'bifpn', str(fpn_idx)])\n            fnode_id = int(seg[2][5])\n            if fnode_id == 0:\n                mapping = {\n                    'op_after_combine5/conv/depthwise_kernel':\n                    'conv6_up.depthwise_conv.conv.weight',\n                    'op_after_combine5/conv/pointwise_kernel':\n                    'conv6_up.pointwise_conv.conv.weight',\n                    'op_after_combine5/conv/bias':\n                    'conv6_up.pointwise_conv.conv.bias',\n                    'op_after_combine5/bn/beta':\n                    'conv6_up.bn.bias',\n                    'op_after_combine5/bn/gamma':\n                    'conv6_up.bn.weight',\n                    'op_after_combine5/bn/moving_mean':\n                    'conv6_up.bn.running_mean',\n                    'op_after_combine5/bn/moving_variance':\n                    'conv6_up.bn.running_var',\n                }\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1':\n                    suffix = mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p6_w1[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p6_w1[fpn_idx][1] = v\n                if torch.min(p6_w1[fpn_idx]) > -1e4:\n                    m[prefix + '.p6_w1'] = p6_w1[fpn_idx]\n            elif fnode_id == 1:\n                base_mapping = {\n                    'op_after_combine6/conv/depthwise_kernel':\n                    'conv5_up.depthwise_conv.conv.weight',\n                    'op_after_combine6/conv/pointwise_kernel':\n                    'conv5_up.pointwise_conv.conv.weight',\n                    'op_after_combine6/conv/bias':\n                    'conv5_up.pointwise_conv.conv.bias',\n                    'op_after_combine6/bn/beta':\n                    'conv5_up.bn.bias',\n                    'op_after_combine6/bn/gamma':\n                    'conv5_up.bn.weight',\n                    'op_after_combine6/bn/moving_mean':\n                    'conv5_up.bn.running_mean',\n                    'op_after_combine6/bn/moving_variance':\n                    'conv5_up.bn.running_var',\n                }\n                if fpn_idx == 0:\n                    mapping = {\n                        'resample_0_2_6/conv2d/kernel':\n                        'p5_down_channel.down_conv.conv.weight',\n                        'resample_0_2_6/conv2d/bias':\n                        'p5_down_channel.down_conv.conv.bias',\n                        'resample_0_2_6/bn/beta':\n                        'p5_down_channel.bn.bias',\n                        'resample_0_2_6/bn/gamma':\n                        'p5_down_channel.bn.weight',\n                        'resample_0_2_6/bn/moving_mean':\n                        'p5_down_channel.bn.running_mean',\n                        'resample_0_2_6/bn/moving_variance':\n                        'p5_down_channel.bn.running_var',\n                    }\n                    base_mapping.update(mapping)\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p5_w1[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p5_w1[fpn_idx][1] = v\n                if torch.min(p5_w1[fpn_idx]) > -1e4:\n                    m[prefix + '.p5_w1'] = p5_w1[fpn_idx]\n            elif fnode_id == 2:\n                base_mapping = {\n                    'op_after_combine7/conv/depthwise_kernel':\n                    'conv4_up.depthwise_conv.conv.weight',\n                    'op_after_combine7/conv/pointwise_kernel':\n                    'conv4_up.pointwise_conv.conv.weight',\n                    'op_after_combine7/conv/bias':\n                    'conv4_up.pointwise_conv.conv.bias',\n                    'op_after_combine7/bn/beta':\n                    'conv4_up.bn.bias',\n                    'op_after_combine7/bn/gamma':\n                    'conv4_up.bn.weight',\n                    'op_after_combine7/bn/moving_mean':\n                    'conv4_up.bn.running_mean',\n                    'op_after_combine7/bn/moving_variance':\n                    'conv4_up.bn.running_var',\n                }\n                if fpn_idx == 0:\n                    mapping = {\n                        'resample_0_1_7/conv2d/kernel':\n                        'p4_down_channel.down_conv.conv.weight',\n                        'resample_0_1_7/conv2d/bias':\n                        'p4_down_channel.down_conv.conv.bias',\n                        'resample_0_1_7/bn/beta':\n                        'p4_down_channel.bn.bias',\n                        'resample_0_1_7/bn/gamma':\n                        'p4_down_channel.bn.weight',\n                        'resample_0_1_7/bn/moving_mean':\n                        'p4_down_channel.bn.running_mean',\n                        'resample_0_1_7/bn/moving_variance':\n                        'p4_down_channel.bn.running_var',\n                    }\n                    base_mapping.update(mapping)\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p4_w1[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p4_w1[fpn_idx][1] = v\n                if torch.min(p4_w1[fpn_idx]) > -1e4:\n                    m[prefix + '.p4_w1'] = p4_w1[fpn_idx]\n            elif fnode_id == 3:\n\n                base_mapping = {\n                    'op_after_combine8/conv/depthwise_kernel':\n                    'conv3_up.depthwise_conv.conv.weight',\n                    'op_after_combine8/conv/pointwise_kernel':\n                    'conv3_up.pointwise_conv.conv.weight',\n                    'op_after_combine8/conv/bias':\n                    'conv3_up.pointwise_conv.conv.bias',\n                    'op_after_combine8/bn/beta':\n                    'conv3_up.bn.bias',\n                    'op_after_combine8/bn/gamma':\n                    'conv3_up.bn.weight',\n                    'op_after_combine8/bn/moving_mean':\n                    'conv3_up.bn.running_mean',\n                    'op_after_combine8/bn/moving_variance':\n                    'conv3_up.bn.running_var',\n                }\n                if fpn_idx == 0:\n                    mapping = {\n                        'resample_0_0_8/conv2d/kernel':\n                        'p3_down_channel.down_conv.conv.weight',\n                        'resample_0_0_8/conv2d/bias':\n                        'p3_down_channel.down_conv.conv.bias',\n                        'resample_0_0_8/bn/beta':\n                        'p3_down_channel.bn.bias',\n                        'resample_0_0_8/bn/gamma':\n                        'p3_down_channel.bn.weight',\n                        'resample_0_0_8/bn/moving_mean':\n                        'p3_down_channel.bn.running_mean',\n                        'resample_0_0_8/bn/moving_variance':\n                        'p3_down_channel.bn.running_var',\n                    }\n                    base_mapping.update(mapping)\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p3_w1[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p3_w1[fpn_idx][1] = v\n                if torch.min(p3_w1[fpn_idx]) > -1e4:\n                    m[prefix + '.p3_w1'] = p3_w1[fpn_idx]\n            elif fnode_id == 4:\n                base_mapping = {\n                    'op_after_combine9/conv/depthwise_kernel':\n                    'conv4_down.depthwise_conv.conv.weight',\n                    'op_after_combine9/conv/pointwise_kernel':\n                    'conv4_down.pointwise_conv.conv.weight',\n                    'op_after_combine9/conv/bias':\n                    'conv4_down.pointwise_conv.conv.bias',\n                    'op_after_combine9/bn/beta':\n                    'conv4_down.bn.bias',\n                    'op_after_combine9/bn/gamma':\n                    'conv4_down.bn.weight',\n                    'op_after_combine9/bn/moving_mean':\n                    'conv4_down.bn.running_mean',\n                    'op_after_combine9/bn/moving_variance':\n                    'conv4_down.bn.running_var',\n                }\n                if fpn_idx == 0:\n                    mapping = {\n                        'resample_0_1_9/conv2d/kernel':\n                        'p4_level_connection.down_conv.conv.weight',\n                        'resample_0_1_9/conv2d/bias':\n                        'p4_level_connection.down_conv.conv.bias',\n                        'resample_0_1_9/bn/beta':\n                        'p4_level_connection.bn.bias',\n                        'resample_0_1_9/bn/gamma':\n                        'p4_level_connection.bn.weight',\n                        'resample_0_1_9/bn/moving_mean':\n                        'p4_level_connection.bn.running_mean',\n                        'resample_0_1_9/bn/moving_variance':\n                        'p4_level_connection.bn.running_var',\n                    }\n                    base_mapping.update(mapping)\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p4_w2[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p4_w2[fpn_idx][1] = v\n                elif seg[3] == 'WSM_2':\n                    p4_w2[fpn_idx][2] = v\n                if torch.min(p4_w2[fpn_idx]) > -1e4:\n                    m[prefix + '.p4_w2'] = p4_w2[fpn_idx]\n            elif fnode_id == 5:\n                base_mapping = {\n                    'op_after_combine10/conv/depthwise_kernel':\n                    'conv5_down.depthwise_conv.conv.weight',\n                    'op_after_combine10/conv/pointwise_kernel':\n                    'conv5_down.pointwise_conv.conv.weight',\n                    'op_after_combine10/conv/bias':\n                    'conv5_down.pointwise_conv.conv.bias',\n                    'op_after_combine10/bn/beta':\n                    'conv5_down.bn.bias',\n                    'op_after_combine10/bn/gamma':\n                    'conv5_down.bn.weight',\n                    'op_after_combine10/bn/moving_mean':\n                    'conv5_down.bn.running_mean',\n                    'op_after_combine10/bn/moving_variance':\n                    'conv5_down.bn.running_var',\n                }\n                if fpn_idx == 0:\n                    mapping = {\n                        'resample_0_2_10/conv2d/kernel':\n                        'p5_level_connection.down_conv.conv.weight',\n                        'resample_0_2_10/conv2d/bias':\n                        'p5_level_connection.down_conv.conv.bias',\n                        'resample_0_2_10/bn/beta':\n                        'p5_level_connection.bn.bias',\n                        'resample_0_2_10/bn/gamma':\n                        'p5_level_connection.bn.weight',\n                        'resample_0_2_10/bn/moving_mean':\n                        'p5_level_connection.bn.running_mean',\n                        'resample_0_2_10/bn/moving_variance':\n                        'p5_level_connection.bn.running_var',\n                    }\n                    base_mapping.update(mapping)\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p5_w2[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p5_w2[fpn_idx][1] = v\n                elif seg[3] == 'WSM_2':\n                    p5_w2[fpn_idx][2] = v\n                if torch.min(p5_w2[fpn_idx]) > -1e4:\n                    m[prefix + '.p5_w2'] = p5_w2[fpn_idx]\n            elif fnode_id == 6:\n                base_mapping = {\n                    'op_after_combine11/conv/depthwise_kernel':\n                    'conv6_down.depthwise_conv.conv.weight',\n                    'op_after_combine11/conv/pointwise_kernel':\n                    'conv6_down.pointwise_conv.conv.weight',\n                    'op_after_combine11/conv/bias':\n                    'conv6_down.pointwise_conv.conv.bias',\n                    'op_after_combine11/bn/beta':\n                    'conv6_down.bn.bias',\n                    'op_after_combine11/bn/gamma':\n                    'conv6_down.bn.weight',\n                    'op_after_combine11/bn/moving_mean':\n                    'conv6_down.bn.running_mean',\n                    'op_after_combine11/bn/moving_variance':\n                    'conv6_down.bn.running_var',\n                }\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p6_w2[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p6_w2[fpn_idx][1] = v\n                elif seg[3] == 'WSM_2':\n                    p6_w2[fpn_idx][2] = v\n                if torch.min(p6_w2[fpn_idx]) > -1e4:\n                    m[prefix + '.p6_w2'] = p6_w2[fpn_idx]\n            elif fnode_id == 7:\n                base_mapping = {\n                    'op_after_combine12/conv/depthwise_kernel':\n                    'conv7_down.depthwise_conv.conv.weight',\n                    'op_after_combine12/conv/pointwise_kernel':\n                    'conv7_down.pointwise_conv.conv.weight',\n                    'op_after_combine12/conv/bias':\n                    'conv7_down.pointwise_conv.conv.bias',\n                    'op_after_combine12/bn/beta':\n                    'conv7_down.bn.bias',\n                    'op_after_combine12/bn/gamma':\n                    'conv7_down.bn.weight',\n                    'op_after_combine12/bn/moving_mean':\n                    'conv7_down.bn.running_mean',\n                    'op_after_combine12/bn/moving_variance':\n                    'conv7_down.bn.running_var',\n                }\n                if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':\n                    suffix = base_mapping['/'.join(seg[3:])]\n                    if 'depthwise_conv' in suffix:\n                        v = v.transpose(1, 0)\n                    m[prefix + '.' + suffix] = v\n                elif seg[3] == 'WSM':\n                    p7_w2[fpn_idx][0] = v\n                elif seg[3] == 'WSM_1':\n                    p7_w2[fpn_idx][1] = v\n                if torch.min(p7_w2[fpn_idx]) > -1e4:\n                    m[prefix + '.p7_w2'] = p7_w2[fpn_idx]\n        elif seg[0] == 'box_net':\n            if 'box-predict' in seg[1]:\n                prefix = '.'.join(['bbox_head', 'reg_header'])\n                base_mapping = {\n                    'depthwise_kernel': 'depthwise_conv.conv.weight',\n                    'pointwise_kernel': 'pointwise_conv.conv.weight',\n                    'bias': 'pointwise_conv.conv.bias'\n                }\n                suffix = base_mapping['/'.join(seg[2:])]\n                if 'depthwise_conv' in suffix:\n                    v = v.transpose(1, 0)\n                m[prefix + '.' + suffix] = v\n            elif 'bn' in seg[1]:\n                bbox_conv_idx = int(seg[1][4])\n                bbox_bn_idx = int(seg[1][9]) - 3\n                prefix = '.'.join([\n                    'bbox_head', 'reg_bn_list',\n                    str(bbox_conv_idx),\n                    str(bbox_bn_idx)\n                ])\n                base_mapping = {\n                    'beta': 'bias',\n                    'gamma': 'weight',\n                    'moving_mean': 'running_mean',\n                    'moving_variance': 'running_var'\n                }\n                suffix = base_mapping['/'.join(seg[2:])]\n                m[prefix + '.' + suffix] = v\n            else:\n                bbox_conv_idx = int(seg[1][4])\n                prefix = '.'.join(\n                    ['bbox_head', 'reg_conv_list',\n                     str(bbox_conv_idx)])\n                base_mapping = {\n                    'depthwise_kernel': 'depthwise_conv.conv.weight',\n                    'pointwise_kernel': 'pointwise_conv.conv.weight',\n                    'bias': 'pointwise_conv.conv.bias'\n                }\n                suffix = base_mapping['/'.join(seg[2:])]\n                if 'depthwise_conv' in suffix:\n                    v = v.transpose(1, 0)\n                m[prefix + '.' + suffix] = v\n        elif seg[0] == 'class_net':\n            if 'class-predict' in seg[1]:\n                prefix = '.'.join(['bbox_head', 'cls_header'])\n                base_mapping = {\n                    'depthwise_kernel': 'depthwise_conv.conv.weight',\n                    'pointwise_kernel': 'pointwise_conv.conv.weight',\n                    'bias': 'pointwise_conv.conv.bias'\n                }\n                suffix = base_mapping['/'.join(seg[2:])]\n                if 'depthwise_conv' in suffix:\n                    v = v.transpose(1, 0)\n                m[prefix + '.' + suffix] = v\n            elif 'bn' in seg[1]:\n                cls_conv_idx = int(seg[1][6])\n                cls_bn_idx = int(seg[1][11]) - 3\n                prefix = '.'.join([\n                    'bbox_head', 'cls_bn_list',\n                    str(cls_conv_idx),\n                    str(cls_bn_idx)\n                ])\n                base_mapping = {\n                    'beta': 'bias',\n                    'gamma': 'weight',\n                    'moving_mean': 'running_mean',\n                    'moving_variance': 'running_var'\n                }\n                suffix = base_mapping['/'.join(seg[2:])]\n                m[prefix + '.' + suffix] = v\n            else:\n                cls_conv_idx = int(seg[1][6])\n                prefix = '.'.join(\n                    ['bbox_head', 'cls_conv_list',\n                     str(cls_conv_idx)])\n                base_mapping = {\n                    'depthwise_kernel': 'depthwise_conv.conv.weight',\n                    'pointwise_kernel': 'pointwise_conv.conv.weight',\n                    'bias': 'pointwise_conv.conv.bias'\n                }\n                suffix = base_mapping['/'.join(seg[2:])]\n                if 'depthwise_conv' in suffix:\n                    v = v.transpose(1, 0)\n                m[prefix + '.' + suffix] = v\n    return m\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='convert efficientdet weight from tensorflow to pytorch')\n    parser.add_argument(\n        '--backbone',\n        type=str,\n        help='efficientnet model name, like efficientnet-b0')\n    parser.add_argument(\n        '--tensorflow_weight',\n        type=str,\n        help='efficientdet tensorflow weight name, like efficientdet-d0/model')\n    parser.add_argument(\n        '--out_weight',\n        type=str,\n        help='efficientdet pytorch weight name like demo.pth')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    model_name = args.backbone\n    ori_weight_name = args.tensorflow_weight\n    out_name = args.out_weight\n\n    repeat_map = {\n        0: 3,\n        1: 4,\n        2: 5,\n        3: 6,\n        4: 7,\n        5: 7,\n        6: 8,\n        7: 8,\n    }\n\n    reader = py_checkpoint_reader.NewCheckpointReader(ori_weight_name)\n    weights = {\n        n: torch.as_tensor(tf2pth(reader.get_tensor(n)))\n        for (n, _) in reader.get_variable_to_shape_map().items()\n    }\n    print(weights.keys())\n    bifpn_repeats = repeat_map[int(model_name[14])]\n    out = convert_key(model_name, bifpn_repeats, weights)\n    result = {'state_dict': out}\n    torch.save(result, out_name)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/__init__.py",
    "content": "from .anchor_generator import YXYXAnchorGenerator\nfrom .bifpn import BiFPN\nfrom .coco_90class import Coco90Dataset\nfrom .coco_90metric import Coco90Metric\nfrom .efficientdet import EfficientDet\nfrom .efficientdet_head import EfficientDetSepBNHead\nfrom .trans_max_iou_assigner import TransMaxIoUAssigner\nfrom .yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder\n\n__all__ = [\n    'EfficientDet', 'BiFPN', 'EfficientDetSepBNHead', 'YXYXAnchorGenerator',\n    'YXYXDeltaXYWHBBoxCoder', 'Coco90Dataset', 'Coco90Metric',\n    'TransMaxIoUAssigner'\n]\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/anchor_generator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom mmdet.models.task_modules.prior_generators.anchor_generator import \\\n    AnchorGenerator\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes\n\nDeviceType = Union[str, torch.device]\n\n\n@TASK_UTILS.register_module()\nclass YXYXAnchorGenerator(AnchorGenerator):\n\n    def gen_single_level_base_anchors(self,\n                                      base_size: Union[int, float],\n                                      scales: Tensor,\n                                      ratios: Tensor,\n                                      center: Optional[Tuple[float]] = None) \\\n            -> Tensor:\n        \"\"\"Generate base anchors of a single level.\n\n        Args:\n            base_size (int | float): Basic size of an anchor.\n            scales (torch.Tensor): Scales of the anchor.\n            ratios (torch.Tensor): The ratio between the height\n                and width of anchors in a single level.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature maps.\n        \"\"\"\n\n        w = base_size\n        h = base_size\n        if center is None:\n            x_center = self.center_offset * w\n            y_center = self.center_offset * h\n        else:\n            x_center, y_center = center\n\n        h_ratios = torch.sqrt(ratios)\n        w_ratios = 1 / h_ratios\n        if self.scale_major:\n            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)\n            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)\n        else:\n            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)\n            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)\n\n        # use float anchor and the anchor's center is aligned with the\n        # pixel center\n        base_anchors = [\n            y_center - 0.5 * hs,\n            x_center - 0.5 * ws,\n            y_center + 0.5 * hs,\n            x_center + 0.5 * ws,\n        ]\n        base_anchors = torch.stack(base_anchors, dim=-1)\n\n        return base_anchors\n\n    def single_level_grid_priors(self,\n                                 featmap_size: Tuple[int, int],\n                                 level_idx: int,\n                                 dtype: torch.dtype = torch.float32,\n                                 device: DeviceType = 'cuda') -> Tensor:\n        \"\"\"Generate grid anchors of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_priors``.\n\n        Args:\n            featmap_size (tuple[int, int]): Size of the feature maps.\n            level_idx (int): The index of corresponding feature map level.\n            dtype (obj:`torch.dtype`): Date type of points.Defaults to\n                ``torch.float32``.\n            device (str | torch.device): The device the tensor will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: Anchors in the overall feature maps.\n        \"\"\"\n        base_anchors = self.base_anchors[level_idx].to(device).to(dtype)\n        feat_h, feat_w = featmap_size\n        stride_w, stride_h = self.strides[level_idx]\n        # First create Range with the default dtype, than convert to\n        # target `dtype` for onnx exporting.\n        shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w\n        shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h\n\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        shifts = torch.stack([shift_yy, shift_xx, shift_yy, shift_xx], dim=-1)\n        # first feat_w elements correspond to the first row of shifts\n        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n        # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n        all_anchors = all_anchors.view(-1, 4)\n        # first A rows correspond to A anchors of (0, 0) in feature map,\n        # then (0, 1), (0, 2), ...\n        if self.use_box_type:\n            all_anchors = HorizontalBoxes(all_anchors)\n\n        return all_anchors\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/api_wrappers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .coco_api import COCO, COCOeval, COCOPanoptic\n\n__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/api_wrappers/coco_api.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# This file add snake case alias for coco api\n\nimport warnings\nfrom collections import defaultdict\nfrom typing import List, Optional, Union\n\nimport pycocotools\nfrom pycocotools.coco import COCO as _COCO\nfrom pycocotools.cocoeval import COCOeval as _COCOeval\n\n\nclass COCO(_COCO):\n    \"\"\"This class is almost the same as official pycocotools package.\n\n    It implements some snake case function aliases. So that the COCO class has\n    the same interface as LVIS class.\n    \"\"\"\n\n    def __init__(self, annotation_file=None):\n        if getattr(pycocotools, '__version__', '0') >= '12.0.2':\n            warnings.warn(\n                'mmpycocotools is deprecated. Please install official pycocotools by \"pip install pycocotools\"',  # noqa: E501\n                UserWarning)\n        super().__init__(annotation_file=annotation_file)\n        self.img_ann_map = self.imgToAnns\n        self.cat_img_map = self.catToImgs\n\n    def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):\n        return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)\n\n    def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):\n        # return self.getCatIds(cat_names, sup_names, cat_ids)\n        cat_ids_coco = self.getCatIds(cat_names, sup_names, cat_ids)\n        if None in cat_names:\n            index = [i for i, v in enumerate(cat_names) if v is not None]\n            cat_ids = list(range(len(cat_names)))\n            for i in range(len(index)):\n                cat_ids[index[i]] = cat_ids_coco[i]\n            return cat_ids\n        else:\n            return cat_ids_coco\n\n    def get_img_ids(self, img_ids=[], cat_ids=[]):\n        return self.getImgIds(img_ids, cat_ids)\n\n    def load_anns(self, ids):\n        return self.loadAnns(ids)\n\n    def load_cats(self, ids):\n        return self.loadCats(ids)\n\n    def load_imgs(self, ids):\n        return self.loadImgs(ids)\n\n\n# just for the ease of import\nCOCOeval = _COCOeval\n\n\nclass COCOPanoptic(COCO):\n    \"\"\"This wrapper is for loading the panoptic style annotation file.\n\n    The format is shown in the CocoPanopticDataset class.\n\n    Args:\n        annotation_file (str, optional): Path of annotation file.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self, annotation_file: Optional[str] = None) -> None:\n        super(COCOPanoptic, self).__init__(annotation_file)\n\n    def createIndex(self) -> None:\n        \"\"\"Create index.\"\"\"\n        # create index\n        print('creating index...')\n        # anns stores 'segment_id -> annotation'\n        anns, cats, imgs = {}, {}, {}\n        img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)\n        if 'annotations' in self.dataset:\n            for ann in self.dataset['annotations']:\n                for seg_ann in ann['segments_info']:\n                    # to match with instance.json\n                    seg_ann['image_id'] = ann['image_id']\n                    img_to_anns[ann['image_id']].append(seg_ann)\n                    # segment_id is not unique in coco dataset orz...\n                    # annotations from different images but\n                    # may have same segment_id\n                    if seg_ann['id'] in anns.keys():\n                        anns[seg_ann['id']].append(seg_ann)\n                    else:\n                        anns[seg_ann['id']] = [seg_ann]\n\n            # filter out annotations from other images\n            img_to_anns_ = defaultdict(list)\n            for k, v in img_to_anns.items():\n                img_to_anns_[k] = [x for x in v if x['image_id'] == k]\n            img_to_anns = img_to_anns_\n\n        if 'images' in self.dataset:\n            for img_info in self.dataset['images']:\n                img_info['segm_file'] = img_info['file_name'].replace(\n                    'jpg', 'png')\n                imgs[img_info['id']] = img_info\n\n        if 'categories' in self.dataset:\n            for cat in self.dataset['categories']:\n                cats[cat['id']] = cat\n\n        if 'annotations' in self.dataset and 'categories' in self.dataset:\n            for ann in self.dataset['annotations']:\n                for seg_ann in ann['segments_info']:\n                    cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])\n\n        print('index created!')\n\n        self.anns = anns\n        self.imgToAnns = img_to_anns\n        self.catToImgs = cat_to_imgs\n        self.imgs = imgs\n        self.cats = cats\n\n    def load_anns(self,\n                  ids: Union[List[int], int] = []) -> Optional[List[dict]]:\n        \"\"\"Load anns with the specified ids.\n\n        ``self.anns`` is a list of annotation lists instead of a\n        list of annotations.\n\n        Args:\n            ids (Union[List[int], int]): Integer ids specifying anns.\n\n        Returns:\n            anns (List[dict], optional): Loaded ann objects.\n        \"\"\"\n        anns = []\n\n        if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):\n            # self.anns is a list of annotation lists instead of\n            # a list of annotations\n            for id in ids:\n                anns += self.anns[id]\n            return anns\n        elif type(ids) == int:\n            return self.anns[ids]\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/bifpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn.bricks import Swish\nfrom mmengine.model import BaseModule\n\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import MultiConfig, OptConfigType\nfrom .utils import (DepthWiseConvBlock, DownChannelBlock, MaxPool2dSamePadding,\n                    MemoryEfficientSwish)\n\n\nclass BiFPNStage(nn.Module):\n    '''\n        in_channels: List[int], input dim for P3, P4, P5\n        out_channels: int, output dim for P2 - P7\n        first_time: int, whether is the first bifpnstage\n        num_outs: int, BiFPN need feature maps num\n        use_swish: whether use MemoryEfficientSwish\n        norm_cfg: (:obj:`ConfigDict` or dict, optional): Config dict for\n            normalization layer.\n        epsilon: float, hyperparameter in fusion features\n    '''\n\n    def __init__(self,\n                 in_channels: List[int],\n                 out_channels: int,\n                 first_time: bool = False,\n                 apply_bn_for_resampling: bool = True,\n                 conv_bn_act_pattern: bool = False,\n                 use_meswish: bool = True,\n                 norm_cfg: OptConfigType = dict(\n                     type='BN', momentum=1e-2, eps=1e-3),\n                 epsilon: float = 1e-4) -> None:\n        super().__init__()\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.first_time = first_time\n        self.apply_bn_for_resampling = apply_bn_for_resampling\n        self.conv_bn_act_pattern = conv_bn_act_pattern\n        self.use_meswish = use_meswish\n        self.norm_cfg = norm_cfg\n        self.epsilon = epsilon\n\n        if self.first_time:\n            self.p5_down_channel = DownChannelBlock(\n                self.in_channels[-1],\n                self.out_channels,\n                apply_norm=self.apply_bn_for_resampling,\n                conv_bn_act_pattern=self.conv_bn_act_pattern,\n                norm_cfg=norm_cfg)\n            self.p4_down_channel = DownChannelBlock(\n                self.in_channels[-2],\n                self.out_channels,\n                apply_norm=self.apply_bn_for_resampling,\n                conv_bn_act_pattern=self.conv_bn_act_pattern,\n                norm_cfg=norm_cfg)\n            self.p3_down_channel = DownChannelBlock(\n                self.in_channels[-3],\n                self.out_channels,\n                apply_norm=self.apply_bn_for_resampling,\n                conv_bn_act_pattern=self.conv_bn_act_pattern,\n                norm_cfg=norm_cfg)\n            self.p5_to_p6 = nn.Sequential(\n                DownChannelBlock(\n                    self.in_channels[-1],\n                    self.out_channels,\n                    apply_norm=self.apply_bn_for_resampling,\n                    conv_bn_act_pattern=self.conv_bn_act_pattern,\n                    norm_cfg=norm_cfg), MaxPool2dSamePadding(3, 2))\n            self.p6_to_p7 = MaxPool2dSamePadding(3, 2)\n            self.p4_level_connection = DownChannelBlock(\n                self.in_channels[-2],\n                self.out_channels,\n                apply_norm=self.apply_bn_for_resampling,\n                conv_bn_act_pattern=self.conv_bn_act_pattern,\n                norm_cfg=norm_cfg)\n            self.p5_level_connection = DownChannelBlock(\n                self.in_channels[-1],\n                self.out_channels,\n                apply_norm=self.apply_bn_for_resampling,\n                conv_bn_act_pattern=self.conv_bn_act_pattern,\n                norm_cfg=norm_cfg)\n\n        self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n        self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n        self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n        self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')\n\n        # bottom to up: feature map down_sample module\n        self.p4_down_sample = MaxPool2dSamePadding(3, 2)\n        self.p5_down_sample = MaxPool2dSamePadding(3, 2)\n        self.p6_down_sample = MaxPool2dSamePadding(3, 2)\n        self.p7_down_sample = MaxPool2dSamePadding(3, 2)\n\n        # Fuse Conv Layers\n        self.conv6_up = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv5_up = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv4_up = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv3_up = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv4_down = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv5_down = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv6_down = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        self.conv7_down = DepthWiseConvBlock(\n            out_channels,\n            out_channels,\n            apply_norm=self.apply_bn_for_resampling,\n            conv_bn_act_pattern=self.conv_bn_act_pattern,\n            norm_cfg=norm_cfg)\n        # weights\n        self.p6_w1 = nn.Parameter(\n            torch.ones(2, dtype=torch.float32), requires_grad=True)\n        self.p6_w1_relu = nn.ReLU()\n        self.p5_w1 = nn.Parameter(\n            torch.ones(2, dtype=torch.float32), requires_grad=True)\n        self.p5_w1_relu = nn.ReLU()\n        self.p4_w1 = nn.Parameter(\n            torch.ones(2, dtype=torch.float32), requires_grad=True)\n        self.p4_w1_relu = nn.ReLU()\n        self.p3_w1 = nn.Parameter(\n            torch.ones(2, dtype=torch.float32), requires_grad=True)\n        self.p3_w1_relu = nn.ReLU()\n\n        self.p4_w2 = nn.Parameter(\n            torch.ones(3, dtype=torch.float32), requires_grad=True)\n        self.p4_w2_relu = nn.ReLU()\n        self.p5_w2 = nn.Parameter(\n            torch.ones(3, dtype=torch.float32), requires_grad=True)\n        self.p5_w2_relu = nn.ReLU()\n        self.p6_w2 = nn.Parameter(\n            torch.ones(3, dtype=torch.float32), requires_grad=True)\n        self.p6_w2_relu = nn.ReLU()\n        self.p7_w2 = nn.Parameter(\n            torch.ones(2, dtype=torch.float32), requires_grad=True)\n        self.p7_w2_relu = nn.ReLU()\n\n        self.swish = MemoryEfficientSwish() if use_meswish else Swish()\n\n    def combine(self, x):\n        if not self.conv_bn_act_pattern:\n            x = self.swish(x)\n\n        return x\n\n    def forward(self, x):\n        if self.first_time:\n            p3, p4, p5 = x\n            # build feature map P6\n            p6_in = self.p5_to_p6(p5)\n            # build feature map P7\n            p7_in = self.p6_to_p7(p6_in)\n\n            p3_in = self.p3_down_channel(p3)\n            p4_in = self.p4_down_channel(p4)\n            p5_in = self.p5_down_channel(p5)\n\n        else:\n            p3_in, p4_in, p5_in, p6_in, p7_in = x\n\n        # Weights for P6_0 and P7_0 to P6_1\n        p6_w1 = self.p6_w1_relu(self.p6_w1)\n        weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)\n        # Connections for P6_0 and P7_0 to P6_1 respectively\n        p6_up = self.conv6_up(\n            self.combine(weight[0] * p6_in +\n                         weight[1] * self.p6_upsample(p7_in)))\n\n        # Weights for P5_0 and P6_1 to P5_1\n        p5_w1 = self.p5_w1_relu(self.p5_w1)\n        weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)\n        # Connections for P5_0 and P6_1 to P5_1 respectively\n        p5_up = self.conv5_up(\n            self.combine(weight[0] * p5_in +\n                         weight[1] * self.p5_upsample(p6_up)))\n\n        # Weights for P4_0 and P5_1 to P4_1\n        p4_w1 = self.p4_w1_relu(self.p4_w1)\n        weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)\n        # Connections for P4_0 and P5_1 to P4_1 respectively\n        p4_up = self.conv4_up(\n            self.combine(weight[0] * p4_in +\n                         weight[1] * self.p4_upsample(p5_up)))\n\n        # Weights for P3_0 and P4_1 to P3_2\n        p3_w1 = self.p3_w1_relu(self.p3_w1)\n        weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)\n        # Connections for P3_0 and P4_1 to P3_2 respectively\n        p3_out = self.conv3_up(\n            self.combine(weight[0] * p3_in +\n                         weight[1] * self.p3_upsample(p4_up)))\n\n        if self.first_time:\n            p4_in = self.p4_level_connection(p4)\n            p5_in = self.p5_level_connection(p5)\n\n        # Weights for P4_0, P4_1 and P3_2 to P4_2\n        p4_w2 = self.p4_w2_relu(self.p4_w2)\n        weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)\n        # Connections for P4_0, P4_1 and P3_2 to P4_2 respectively\n        p4_out = self.conv4_down(\n            self.combine(weight[0] * p4_in + weight[1] * p4_up +\n                         weight[2] * self.p4_down_sample(p3_out)))\n\n        # Weights for P5_0, P5_1 and P4_2 to P5_2\n        p5_w2 = self.p5_w2_relu(self.p5_w2)\n        weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)\n        # Connections for P5_0, P5_1 and P4_2 to P5_2 respectively\n        p5_out = self.conv5_down(\n            self.combine(weight[0] * p5_in + weight[1] * p5_up +\n                         weight[2] * self.p5_down_sample(p4_out)))\n\n        # Weights for P6_0, P6_1 and P5_2 to P6_2\n        p6_w2 = self.p6_w2_relu(self.p6_w2)\n        weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)\n        # Connections for P6_0, P6_1 and P5_2 to P6_2 respectively\n        p6_out = self.conv6_down(\n            self.combine(weight[0] * p6_in + weight[1] * p6_up +\n                         weight[2] * self.p6_down_sample(p5_out)))\n\n        # Weights for P7_0 and P6_2 to P7_2\n        p7_w2 = self.p7_w2_relu(self.p7_w2)\n        weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)\n        # Connections for P7_0 and P6_2 to P7_2\n        p7_out = self.conv7_down(\n            self.combine(weight[0] * p7_in +\n                         weight[1] * self.p7_down_sample(p6_out)))\n        return p3_out, p4_out, p5_out, p6_out, p7_out\n\n\n@MODELS.register_module()\nclass BiFPN(BaseModule):\n    '''\n        num_stages: int, bifpn number of repeats\n        in_channels: List[int], input dim for P3, P4, P5\n        out_channels: int, output dim for P2 - P7\n        start_level: int, Index of input features in backbone\n        epsilon: float, hyperparameter in fusion features\n        apply_bn_for_resampling: bool, whether use bn after resampling\n        conv_bn_act_pattern: bool, whether use conv_bn_act_pattern\n        use_swish: whether use MemoryEfficientSwish\n        norm_cfg: (:obj:`ConfigDict` or dict, optional): Config dict for\n            normalization layer.\n        init_cfg: MultiConfig: init method\n    '''\n\n    def __init__(self,\n                 num_stages: int,\n                 in_channels: List[int],\n                 out_channels: int,\n                 start_level: int = 0,\n                 epsilon: float = 1e-4,\n                 apply_bn_for_resampling: bool = True,\n                 conv_bn_act_pattern: bool = False,\n                 use_meswish: bool = True,\n                 norm_cfg: OptConfigType = dict(\n                     type='BN', momentum=1e-2, eps=1e-3),\n                 init_cfg: MultiConfig = None) -> None:\n\n        super().__init__(init_cfg=init_cfg)\n        self.start_level = start_level\n        self.bifpn = nn.Sequential(*[\n            BiFPNStage(\n                in_channels=in_channels,\n                out_channels=out_channels,\n                first_time=True if _ == 0 else False,\n                apply_bn_for_resampling=apply_bn_for_resampling,\n                conv_bn_act_pattern=conv_bn_act_pattern,\n                use_meswish=use_meswish,\n                norm_cfg=norm_cfg,\n                epsilon=epsilon) for _ in range(num_stages)\n        ])\n\n    def forward(self, x):\n        x = x[self.start_level:]\n        x = self.bifpn(x)\n\n        return x\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/coco_90class.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nfrom typing import List, Union\n\nfrom mmdet.datasets.base_det_dataset import BaseDetDataset\nfrom mmdet.registry import DATASETS\nfrom .api_wrappers import COCO\n\n\n@DATASETS.register_module()\nclass Coco90Dataset(BaseDetDataset):\n    \"\"\"Dataset for COCO.\"\"\"\n\n    METAINFO = {\n        'classes':\n        ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n         'truck', 'boat', 'traffic light', 'fire hydrant', None, 'stop sign',\n         'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n         'cow', 'elephant', 'bear', 'zebra', 'giraffe', None, 'backpack',\n         'umbrella', None, None, 'handbag', 'tie', 'suitcase', 'frisbee',\n         'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n         'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n         'bottle', None, 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n         'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n         'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',\n         'bed', None, 'dining table', None, None, 'toilet', None, 'tv',\n         'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n         'oven', 'toaster', 'sink', 'refrigerator', None, 'book', 'clock',\n         'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'),\n        # palette is a list of color tuples, which is used for visualization.\n        'palette':\n        [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228),\n         (0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30),\n         (100, 170, 30), None, (220, 220, 0), (175, 116, 175), (250, 0, 30),\n         (165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255),\n         (0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255),\n         (199, 100, 0), (72, 0, 118), None,\n         (255, 179, 240), (0, 125, 92), None, None, (209, 0, 151),\n         (188, 208, 182), (0, 220, 176), (255, 99, 164), (92, 0, 73),\n         (133, 129, 255), (78, 180, 255), (0, 228, 0), (174, 255, 243),\n         (45, 89, 255), (134, 134, 103), (145, 148, 174), (255, 208, 186),\n         (197, 226, 255), None, (171, 134, 1), (109, 63, 54), (207, 138, 255),\n         (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105),\n         (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149),\n         (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205),\n         (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0),\n         (119, 0, 170), None, (0, 182, 199), None, None, (0, 165, 120), None,\n         (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133),\n         (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62),\n         (65, 70, 15), (127, 167, 115), (59, 105, 106), None, (142, 108, 45),\n         (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1),\n         (246, 0, 122), (191, 162, 208)]\n    }\n    COCOAPI = COCO\n    # ann_id is unique in coco dataset.\n    ANN_ID_UNIQUE = True\n\n    def load_data_list(self) -> List[dict]:\n        \"\"\"Load annotations from an annotation file named as ``self.ann_file``\n\n        Returns:\n            List[dict]: A list of annotation.\n        \"\"\"  # noqa: E501\n        with self.file_client.get_local_path(self.ann_file) as local_path:\n            self.coco = self.COCOAPI(local_path)\n        # The order of returned `cat_ids` will not\n        # change with the order of the `classes`\n        self.cat_ids = self.coco.get_cat_ids(\n            cat_names=self.metainfo['classes'])\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)\n\n        img_ids = self.coco.get_img_ids()\n        data_list = []\n        total_ann_ids = []\n        for img_id in img_ids:\n            raw_img_info = self.coco.load_imgs([img_id])[0]\n            raw_img_info['img_id'] = img_id\n\n            ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n            raw_ann_info = self.coco.load_anns(ann_ids)\n            total_ann_ids.extend(ann_ids)\n\n            parsed_data_info = self.parse_data_info({\n                'raw_ann_info':\n                raw_ann_info,\n                'raw_img_info':\n                raw_img_info\n            })\n            data_list.append(parsed_data_info)\n        if self.ANN_ID_UNIQUE:\n            assert len(set(total_ann_ids)) == len(\n                total_ann_ids\n            ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n        del self.coco\n\n        return data_list\n\n    def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:\n        \"\"\"Parse raw annotation to target format.\n\n        Args:\n            raw_data_info (dict): Raw data information load from ``ann_file``\n\n        Returns:\n            Union[dict, List[dict]]: Parsed annotation.\n        \"\"\"\n        img_info = raw_data_info['raw_img_info']\n        ann_info = raw_data_info['raw_ann_info']\n\n        data_info = {}\n\n        # TODO: need to change data_prefix['img'] to data_prefix['img_path']\n        img_path = osp.join(self.data_prefix['img'], img_info['file_name'])\n        if self.data_prefix.get('seg', None):\n            seg_map_path = osp.join(\n                self.data_prefix['seg'],\n                img_info['file_name'].rsplit('.', 1)[0] + self.seg_map_suffix)\n        else:\n            seg_map_path = None\n        data_info['img_path'] = img_path\n        data_info['img_id'] = img_info['img_id']\n        data_info['seg_map_path'] = seg_map_path\n        data_info['height'] = img_info['height']\n        data_info['width'] = img_info['width']\n\n        instances = []\n        for i, ann in enumerate(ann_info):\n            instance = {}\n\n            if ann.get('ignore', False):\n                continue\n            x1, y1, w, h = ann['bbox']\n            inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n            inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n            if inter_w * inter_h == 0:\n                continue\n            if ann['area'] <= 0 or w < 1 or h < 1:\n                continue\n            if ann['category_id'] not in self.cat_ids:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n\n            if ann.get('iscrowd', False):\n                instance['ignore_flag'] = 1\n            else:\n                instance['ignore_flag'] = 0\n            instance['bbox'] = bbox\n            instance['bbox_label'] = self.cat2label[ann['category_id']]\n\n            if ann.get('segmentation', None):\n                instance['mask'] = ann['segmentation']\n\n            instances.append(instance)\n        data_info['instances'] = instances\n        return data_info\n\n    def filter_data(self) -> List[dict]:\n        \"\"\"Filter annotations according to filter_cfg.\n\n        Returns:\n            List[dict]: Filtered results.\n        \"\"\"\n        if self.test_mode:\n            return self.data_list\n\n        if self.filter_cfg is None:\n            return self.data_list\n\n        filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)\n        min_size = self.filter_cfg.get('min_size', 0)\n\n        # obtain images that contain annotation\n        ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)\n        # obtain images that contain annotations of the required categories\n        ids_in_cat = set()\n        for i, class_id in enumerate(self.cat_ids):\n            ids_in_cat |= set(self.cat_img_map[class_id])\n        # merge the image id sets of the two conditions and use the merged set\n        # to filter out images if self.filter_empty_gt=True\n        ids_in_cat &= ids_with_ann\n\n        valid_data_infos = []\n        for i, data_info in enumerate(self.data_list):\n            img_id = data_info['img_id']\n            width = data_info['width']\n            height = data_info['height']\n            if filter_empty_gt and img_id not in ids_in_cat:\n                continue\n            if min(width, height) >= min_size:\n                valid_data_infos.append(data_info)\n\n        return valid_data_infos\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/coco_90metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport datetime\nimport itertools\nimport os.path as osp\nimport tempfile\nfrom collections import OrderedDict\nfrom typing import Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.fileio import FileClient, dump, load\nfrom mmengine.logging import MMLogger\nfrom terminaltables import AsciiTable\n\nfrom mmdet.evaluation.functional import eval_recalls\nfrom mmdet.registry import METRICS\nfrom mmdet.structures.mask import encode_mask_results\nfrom .api_wrappers import COCO, COCOeval\n\n\n@METRICS.register_module()\nclass Coco90Metric(BaseMetric):\n    \"\"\"COCO evaluation metric.\n\n    Evaluate AR, AP, and mAP for detection tasks including proposal/box\n    detection and instance segmentation. Please refer to\n    https://cocodataset.org/#detection-eval for more details.\n\n    Args:\n        ann_file (str, optional): Path to the coco format annotation file.\n            If not specified, ground truth annotations from the dataset will\n            be converted to coco format. Defaults to None.\n        metric (str | List[str]): Metrics to be evaluated. Valid metrics\n            include 'bbox', 'segm', 'proposal', and 'proposal_fast'.\n            Defaults to 'bbox'.\n        classwise (bool): Whether to evaluate the metric class-wise.\n            Defaults to False.\n        proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.\n            Defaults to (100, 300, 1000).\n        iou_thrs (float | List[float], optional): IoU threshold to compute AP\n            and AR. If not specified, IoUs from 0.5 to 0.95 will be used.\n            Defaults to None.\n        metric_items (List[str], optional): Metric result names to be\n            recorded in the evaluation result. Defaults to None.\n        format_only (bool): Format the output results without perform\n            evaluation. It is useful when you want to format the result\n            to a specific format and submit it to the test server.\n            Defaults to False.\n        outfile_prefix (str, optional): The prefix of json files. It includes\n            the file path and the prefix of filename, e.g., \"a/b/prefix\".\n            If not specified, a temp file will be created. Defaults to None.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmengine.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n        collect_device (str): Device name used for collecting results from\n            different ranks during distributed training. Must be 'cpu' or\n            'gpu'. Defaults to 'cpu'.\n        prefix (str, optional): The prefix that will be added in the metric\n            names to disambiguate homonymous metrics of different evaluators.\n            If prefix is not provided in the argument, self.default_prefix\n            will be used instead. Defaults to None.\n    \"\"\"\n    default_prefix: Optional[str] = 'coco'\n\n    def __init__(self,\n                 ann_file: Optional[str] = None,\n                 metric: Union[str, List[str]] = 'bbox',\n                 classwise: bool = False,\n                 proposal_nums: Sequence[int] = (100, 300, 1000),\n                 iou_thrs: Optional[Union[float, Sequence[float]]] = None,\n                 metric_items: Optional[Sequence[str]] = None,\n                 format_only: bool = False,\n                 outfile_prefix: Optional[str] = None,\n                 file_client_args: dict = dict(backend='disk'),\n                 collect_device: str = 'cpu',\n                 prefix: Optional[str] = None) -> None:\n        super().__init__(collect_device=collect_device, prefix=prefix)\n        # coco evaluation metrics\n        self.metrics = metric if isinstance(metric, list) else [metric]\n        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n        for metric in self.metrics:\n            if metric not in allowed_metrics:\n                raise KeyError(\n                    \"metric should be one of 'bbox', 'segm', 'proposal', \"\n                    f\"'proposal_fast', but got {metric}.\")\n\n        # do class wise evaluation, default False\n        self.classwise = classwise\n\n        # proposal_nums used to compute recall or precision.\n        self.proposal_nums = list(proposal_nums)\n\n        # iou_thrs used to compute recall or precision.\n        if iou_thrs is None:\n            iou_thrs = np.linspace(\n                .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n        self.iou_thrs = iou_thrs\n        self.metric_items = metric_items\n        self.format_only = format_only\n        if self.format_only:\n            assert outfile_prefix is not None, 'outfile_prefix must be not'\n            'None when format_only is True, otherwise the result files will'\n            'be saved to a temp directory which will be cleaned up at the end.'\n\n        self.outfile_prefix = outfile_prefix\n\n        self.file_client_args = file_client_args\n        self.file_client = FileClient(**file_client_args)\n\n        # if ann_file is not specified,\n        # initialize coco api with the converted dataset\n        if ann_file is not None:\n            with self.file_client.get_local_path(ann_file) as local_path:\n                self._coco_api = COCO(local_path)\n        else:\n            self._coco_api = None\n\n        # handle dataset lazy init\n        self.cat_ids = None\n        self.img_ids = None\n\n    def fast_eval_recall(self,\n                         results: List[dict],\n                         proposal_nums: Sequence[int],\n                         iou_thrs: Sequence[float],\n                         logger: Optional[MMLogger] = None) -> np.ndarray:\n        \"\"\"Evaluate proposal recall with COCO's fast_eval_recall.\n\n        Args:\n            results (List[dict]): Results of the dataset.\n            proposal_nums (Sequence[int]): Proposal numbers used for\n                evaluation.\n            iou_thrs (Sequence[float]): IoU thresholds used for evaluation.\n            logger (MMLogger, optional): Logger used for logging the recall\n                summary.\n        Returns:\n            np.ndarray: Averaged recall results.\n        \"\"\"\n        gt_bboxes = []\n        pred_bboxes = [result['bboxes'] for result in results]\n        for i in range(len(self.img_ids)):\n            ann_ids = self._coco_api.get_ann_ids(img_ids=self.img_ids[i])\n            ann_info = self._coco_api.load_anns(ann_ids)\n            if len(ann_info) == 0:\n                gt_bboxes.append(np.zeros((0, 4)))\n                continue\n            bboxes = []\n            for ann in ann_info:\n                if ann.get('ignore', False) or ann['iscrowd']:\n                    continue\n                x1, y1, w, h = ann['bbox']\n                bboxes.append([x1, y1, x1 + w, y1 + h])\n            bboxes = np.array(bboxes, dtype=np.float32)\n            if bboxes.shape[0] == 0:\n                bboxes = np.zeros((0, 4))\n            gt_bboxes.append(bboxes)\n\n        recalls = eval_recalls(\n            gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)\n        ar = recalls.mean(axis=1)\n        return ar\n\n    def xyxy2xywh(self, bbox: np.ndarray) -> list:\n        \"\"\"Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n        evaluation.\n\n        Args:\n            bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n                ``xyxy`` order.\n\n        Returns:\n            list[float]: The converted bounding boxes, in ``xywh`` order.\n        \"\"\"\n\n        _bbox: List = bbox.tolist()\n        return [\n            _bbox[0],\n            _bbox[1],\n            _bbox[2] - _bbox[0],\n            _bbox[3] - _bbox[1],\n        ]\n\n    def results2json(self, results: Sequence[dict],\n                     outfile_prefix: str) -> dict:\n        \"\"\"Dump the detection results to a COCO style json file.\n\n        There are 3 types of results: proposals, bbox predictions, mask\n        predictions, and they have different data types. This method will\n        automatically recognize the type, and dump them to json files.\n\n        Args:\n            results (Sequence[dict]): Testing results of the\n                dataset.\n            outfile_prefix (str): The filename prefix of the json files. If the\n                prefix is \"somepath/xxx\", the json files will be named\n                \"somepath/xxx.bbox.json\", \"somepath/xxx.segm.json\",\n                \"somepath/xxx.proposal.json\".\n\n        Returns:\n            dict: Possible keys are \"bbox\", \"segm\", \"proposal\", and\n            values are corresponding filenames.\n        \"\"\"\n        bbox_json_results = []\n        segm_json_results = [] if 'masks' in results[0] else None\n        for idx, result in enumerate(results):\n            image_id = result.get('img_id', idx)\n            labels = result['labels']\n            bboxes = result['bboxes']\n            scores = result['scores']\n            # bbox results\n            for i, label in enumerate(labels):\n                data = dict()\n                data['image_id'] = image_id\n                data['bbox'] = self.xyxy2xywh(bboxes[i])\n                data['score'] = float(scores[i])\n                data['category_id'] = self.cat_ids[label]\n                bbox_json_results.append(data)\n\n            if segm_json_results is None:\n                continue\n\n            # segm results\n            masks = result['masks']\n            mask_scores = result.get('mask_scores', scores)\n            for i, label in enumerate(labels):\n                data = dict()\n                data['image_id'] = image_id\n                data['bbox'] = self.xyxy2xywh(bboxes[i])\n                data['score'] = float(mask_scores[i])\n                data['category_id'] = self.cat_ids[label]\n                if isinstance(masks[i]['counts'], bytes):\n                    masks[i]['counts'] = masks[i]['counts'].decode()\n                data['segmentation'] = masks[i]\n                segm_json_results.append(data)\n\n        result_files = dict()\n        result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n        result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n        dump(bbox_json_results, result_files['bbox'])\n\n        if segm_json_results is not None:\n            result_files['segm'] = f'{outfile_prefix}.segm.json'\n            dump(segm_json_results, result_files['segm'])\n\n        return result_files\n\n    def gt_to_coco_json(self, gt_dicts: Sequence[dict],\n                        outfile_prefix: str) -> str:\n        \"\"\"Convert ground truth to coco format json file.\n\n        Args:\n            gt_dicts (Sequence[dict]): Ground truth of the dataset.\n            outfile_prefix (str): The filename prefix of the json files. If the\n                prefix is \"somepath/xxx\", the json file will be named\n                \"somepath/xxx.gt.json\".\n        Returns:\n            str: The filename of the json file.\n        \"\"\"\n        categories = [\n            dict(id=id, name=name)\n            for id, name in enumerate(self.dataset_meta['classes'])\n        ]\n        image_infos = []\n        annotations = []\n\n        for idx, gt_dict in enumerate(gt_dicts):\n            img_id = gt_dict.get('img_id', idx)\n            image_info = dict(\n                id=img_id,\n                width=gt_dict['width'],\n                height=gt_dict['height'],\n                file_name='')\n            image_infos.append(image_info)\n            for ann in gt_dict['anns']:\n                label = ann['bbox_label']\n                bbox = ann['bbox']\n                coco_bbox = [\n                    bbox[0],\n                    bbox[1],\n                    bbox[2] - bbox[0],\n                    bbox[3] - bbox[1],\n                ]\n\n                annotation = dict(\n                    id=len(annotations) +\n                    1,  # coco api requires id starts with 1\n                    image_id=img_id,\n                    bbox=coco_bbox,\n                    iscrowd=ann.get('ignore_flag', 0),\n                    category_id=int(label),\n                    area=coco_bbox[2] * coco_bbox[3])\n                if ann.get('mask', None):\n                    mask = ann['mask']\n                    # area = mask_util.area(mask)\n                    if isinstance(mask, dict) and isinstance(\n                            mask['counts'], bytes):\n                        mask['counts'] = mask['counts'].decode()\n                    annotation['segmentation'] = mask\n                    # annotation['area'] = float(area)\n                annotations.append(annotation)\n\n        info = dict(\n            date_created=str(datetime.datetime.now()),\n            description='Coco json file converted by mmdet CocoMetric.')\n        coco_json = dict(\n            info=info,\n            images=image_infos,\n            categories=categories,\n            licenses=None,\n        )\n        if len(annotations) > 0:\n            coco_json['annotations'] = annotations\n        converted_json_path = f'{outfile_prefix}.gt.json'\n        dump(coco_json, converted_json_path)\n        return converted_json_path\n\n    # TODO: data_batch is no longer needed, consider adjusting the\n    #  parameter position\n    def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n        \"\"\"Process one batch of data samples and predictions. The processed\n        results should be stored in ``self.results``, which will be used to\n        compute the metrics when all batches have been processed.\n\n        Args:\n            data_batch (dict): A batch of data from the dataloader.\n            data_samples (Sequence[dict]): A batch of data samples that\n                contain annotations and predictions.\n        \"\"\"\n        for data_sample in data_samples:\n            result = dict()\n            pred = data_sample['pred_instances']\n            result['img_id'] = data_sample['img_id']\n            result['bboxes'] = pred['bboxes'].cpu().numpy()\n            result['scores'] = pred['scores'].cpu().numpy()\n            result['labels'] = pred['labels'].cpu().numpy()\n            # encode mask to RLE\n            if 'masks' in pred:\n                result['masks'] = encode_mask_results(\n                    pred['masks'].detach().cpu().numpy())\n            # some detectors use different scores for bbox and mask\n            if 'mask_scores' in pred:\n                result['mask_scores'] = pred['mask_scores'].cpu().numpy()\n\n            # parse gt\n            gt = dict()\n            gt['width'] = data_sample['ori_shape'][1]\n            gt['height'] = data_sample['ori_shape'][0]\n            gt['img_id'] = data_sample['img_id']\n            if self._coco_api is None:\n                # TODO: Need to refactor to support LoadAnnotations\n                assert 'instances' in data_sample, \\\n                    'ground truth is required for evaluation when ' \\\n                    '`ann_file` is not provided'\n                gt['anns'] = data_sample['instances']\n            # add converted result to the results list\n            self.results.append((gt, result))\n\n    def compute_metrics(self, results: list) -> Dict[str, float]:\n        \"\"\"Compute the metrics from processed results.\n\n        Args:\n            results (list): The processed results of each batch.\n\n        Returns:\n            Dict[str, float]: The computed metrics. The keys are the names of\n            the metrics, and the values are corresponding results.\n        \"\"\"\n        logger: MMLogger = MMLogger.get_current_instance()\n\n        # split gt and prediction list\n        gts, preds = zip(*results)\n\n        tmp_dir = None\n        if self.outfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            outfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            outfile_prefix = self.outfile_prefix\n\n        if self._coco_api is None:\n            # use converted gt json file to initialize coco api\n            logger.info('Converting ground truth to coco format...')\n            coco_json_path = self.gt_to_coco_json(\n                gt_dicts=gts, outfile_prefix=outfile_prefix)\n            self._coco_api = COCO(coco_json_path)\n\n            # handle lazy init\n        if self.cat_ids is None:\n            self.cat_ids = self._coco_api.get_cat_ids(\n                cat_names=self.dataset_meta['classes'])\n        if self.img_ids is None:\n            self.img_ids = self._coco_api.get_img_ids()\n\n        # convert predictions to coco format and dump to json file\n        result_files = self.results2json(preds, outfile_prefix)\n\n        eval_results = OrderedDict()\n        if self.format_only:\n            logger.info('results are saved in '\n                        f'{osp.dirname(outfile_prefix)}')\n            return eval_results\n\n        for metric in self.metrics:\n            logger.info(f'Evaluating {metric}...')\n\n            # TODO: May refactor fast_eval_recall to an independent metric?\n            # fast eval recall\n            if metric == 'proposal_fast':\n                ar = self.fast_eval_recall(\n                    preds, self.proposal_nums, self.iou_thrs, logger=logger)\n                log_msg = []\n                for i, num in enumerate(self.proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n                    log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n                log_msg = ''.join(log_msg)\n                logger.info(log_msg)\n                continue\n\n            # evaluate proposal, bbox and segm\n            iou_type = 'bbox' if metric == 'proposal' else metric\n            if metric not in result_files:\n                raise KeyError(f'{metric} is not in results')\n            try:\n                predictions = load(result_files[metric])\n                if iou_type == 'segm':\n                    # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331  # noqa\n                    # When evaluating mask AP, if the results contain bbox,\n                    # cocoapi will use the box area instead of the mask area\n                    # for calculating the instance area. Though the overall AP\n                    # is not affected, this leads to different\n                    # small/medium/large mask AP results.\n                    for x in predictions:\n                        x.pop('bbox')\n                coco_dt = self._coco_api.loadRes(predictions)\n\n            except IndexError:\n                logger.error(\n                    'The testing results of the whole dataset is empty.')\n                break\n\n            coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)\n\n            coco_eval.params.catIds = self.cat_ids\n            coco_eval.params.imgIds = self.img_ids\n            coco_eval.params.maxDets = list(self.proposal_nums)\n            coco_eval.params.iouThrs = self.iou_thrs\n\n            # mapping of cocoEval.stats\n            coco_metric_names = {\n                'mAP': 0,\n                'mAP_50': 1,\n                'mAP_75': 2,\n                'mAP_s': 3,\n                'mAP_m': 4,\n                'mAP_l': 5,\n                'AR@100': 6,\n                'AR@300': 7,\n                'AR@1000': 8,\n                'AR_s@1000': 9,\n                'AR_m@1000': 10,\n                'AR_l@1000': 11\n            }\n            metric_items = self.metric_items\n            if metric_items is not None:\n                for metric_item in metric_items:\n                    if metric_item not in coco_metric_names:\n                        raise KeyError(\n                            f'metric item \"{metric_item}\" is not supported')\n\n            if metric == 'proposal':\n                coco_eval.params.useCats = 0\n                coco_eval.evaluate()\n                coco_eval.accumulate()\n                coco_eval.summarize()\n                if metric_items is None:\n                    metric_items = [\n                        'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',\n                        'AR_m@1000', 'AR_l@1000'\n                    ]\n\n                for item in metric_items:\n                    val = float(\n                        f'{coco_eval.stats[coco_metric_names[item]]:.3f}')\n                    eval_results[item] = val\n            else:\n                coco_eval.evaluate()\n                coco_eval.accumulate()\n                coco_eval.summarize()\n                if self.classwise:  # Compute per-category AP\n                    # Compute per-category AP\n                    # from https://github.com/facebookresearch/detectron2/\n                    precisions = coco_eval.eval['precision']\n                    # precision: (iou, recall, cls, area range, max dets)\n                    assert len(self.cat_ids) == precisions.shape[2]\n\n                    results_per_category = []\n                    for idx, cat_id in enumerate(self.cat_ids):\n                        # area range index 0: all area ranges\n                        # max dets index -1: typically 100 per image\n                        nm = self._coco_api.loadCats(cat_id)[0]\n                        precision = precisions[:, :, idx, 0, -1]\n                        precision = precision[precision > -1]\n                        if precision.size:\n                            ap = np.mean(precision)\n                        else:\n                            ap = float('nan')\n                        results_per_category.append(\n                            (f'{nm[\"name\"]}', f'{round(ap, 3)}'))\n                        eval_results[f'{nm[\"name\"]}_precision'] = round(ap, 3)\n\n                    num_columns = min(6, len(results_per_category) * 2)\n                    results_flatten = list(\n                        itertools.chain(*results_per_category))\n                    headers = ['category', 'AP'] * (num_columns // 2)\n                    results_2d = itertools.zip_longest(*[\n                        results_flatten[i::num_columns]\n                        for i in range(num_columns)\n                    ])\n                    table_data = [headers]\n                    table_data += [result for result in results_2d]\n                    table = AsciiTable(table_data)\n                    logger.info('\\n' + table.table)\n\n                if metric_items is None:\n                    metric_items = [\n                        'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n                    ]\n\n                for metric_item in metric_items:\n                    key = f'{metric}_{metric_item}'\n                    val = coco_eval.stats[coco_metric_names[metric_item]]\n                    eval_results[key] = float(f'{round(val, 3)}')\n\n                ap = coco_eval.stats[:6]\n                logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} '\n                            f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '\n                            f'{ap[4]:.3f} {ap[5]:.3f}')\n\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/efficientdet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.detectors.single_stage import SingleStageDetector\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import ConfigType, OptConfigType, OptMultiConfig\n\n\n@MODELS.register_module()\nclass EfficientDet(SingleStageDetector):\n\n    def __init__(self,\n                 backbone: ConfigType,\n                 neck: ConfigType,\n                 bbox_head: ConfigType,\n                 train_cfg: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 data_preprocessor: OptConfigType = None,\n                 init_cfg: OptMultiConfig = None) -> None:\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            data_preprocessor=data_preprocessor,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/efficientdet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom mmcv.cnn.bricks import build_norm_layer\nfrom mmengine.model import bias_init_with_prob\nfrom torch import Tensor\n\nfrom mmdet.models.dense_heads.anchor_head import AnchorHead\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import OptConfigType, OptMultiConfig\nfrom .utils import DepthWiseConvBlock, MemoryEfficientSwish\n\n\n@MODELS.register_module()\nclass EfficientDetSepBNHead(AnchorHead):\n    \"\"\"EfficientDetHead with separate BN.\n\n    num_classes (int): Number of categories excluding the background\n    category. in_channels (int): Number of channels in the input feature map.\n    feat_channels (int): Number of hidden channels. stacked_convs (int): Number\n    of repetitions of conv norm_cfg (dict): Config dict for normalization\n    layer. anchor_generator (dict): Config dict for anchor generator bbox_coder\n    (dict): Config of bounding box coder. loss_cls (dict): Config of\n    classification loss. loss_bbox (dict): Config of localization loss.\n    train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing\n    config of anchor head. init_cfg (dict or list[dict], optional):\n    Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes: int,\n                 num_ins: int,\n                 in_channels: int,\n                 feat_channels: int,\n                 stacked_convs: int = 3,\n                 norm_cfg: OptConfigType = dict(\n                     type='BN', momentum=1e-2, eps=1e-3),\n                 init_cfg: OptMultiConfig = None,\n                 **kwargs) -> None:\n        self.num_ins = num_ins\n        self.stacked_convs = stacked_convs\n        self.norm_cfg = norm_cfg\n        super().__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self) -> None:\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.reg_conv_list = nn.ModuleList()\n        self.cls_conv_list = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            channels = self.in_channels if i == 0 else self.feat_channels\n            self.reg_conv_list.append(\n                DepthWiseConvBlock(\n                    channels, self.feat_channels, apply_norm=False))\n            self.cls_conv_list.append(\n                DepthWiseConvBlock(\n                    channels, self.feat_channels, apply_norm=False))\n\n        self.reg_bn_list = nn.ModuleList([\n            nn.ModuleList([\n                build_norm_layer(\n                    self.norm_cfg, num_features=self.feat_channels)[1]\n                for j in range(self.num_ins)\n            ]) for i in range(self.stacked_convs)\n        ])\n\n        self.cls_bn_list = nn.ModuleList([\n            nn.ModuleList([\n                build_norm_layer(\n                    self.norm_cfg, num_features=self.feat_channels)[1]\n                for j in range(self.num_ins)\n            ]) for i in range(self.stacked_convs)\n        ])\n\n        self.cls_header = DepthWiseConvBlock(\n            self.in_channels,\n            self.num_base_priors * self.cls_out_channels,\n            apply_norm=False)\n        self.reg_header = DepthWiseConvBlock(\n            self.in_channels, self.num_base_priors * 4, apply_norm=False)\n        self.swish = MemoryEfficientSwish()\n\n    def init_weights(self) -> None:\n        \"\"\"Initialize weights of the head.\"\"\"\n        for m in self.reg_conv_list:\n            nn.init.constant_(m.pointwise_conv.conv.bias, 0.0)\n        for m in self.cls_conv_list:\n            nn.init.constant_(m.pointwise_conv.conv.bias, 0.0)\n        bias_cls = bias_init_with_prob(0.01)\n        nn.init.constant_(self.cls_header.pointwise_conv.conv.bias, bias_cls)\n        nn.init.constant_(self.reg_header.pointwise_conv.conv.bias, 0.0)\n\n    def forward_single_bbox(self, feat: Tensor, level_id: int,\n                            i: int) -> Tensor:\n        conv_op = self.reg_conv_list[i]\n        bn = self.reg_bn_list[i][level_id]\n\n        feat = conv_op(feat)\n        feat = bn(feat)\n        feat = self.swish(feat)\n\n        return feat\n\n    def forward_single_cls(self, feat: Tensor, level_id: int,\n                           i: int) -> Tensor:\n        conv_op = self.cls_conv_list[i]\n        bn = self.cls_bn_list[i][level_id]\n\n        feat = conv_op(feat)\n        feat = bn(feat)\n        feat = self.swish(feat)\n\n        return feat\n\n    def forward(self, feats: Tuple[Tensor]) -> tuple:\n        cls_scores = []\n        bbox_preds = []\n        for level_id in range(self.num_ins):\n            feat = feats[level_id]\n            for i in range(self.stacked_convs):\n                feat = self.forward_single_bbox(feat, level_id, i)\n            bbox_pred = self.reg_header(feat)\n            bbox_preds.append(bbox_pred)\n        for level_id in range(self.num_ins):\n            feat = feats[level_id]\n            for i in range(self.stacked_convs):\n                feat = self.forward_single_cls(feat, level_id, i)\n            cls_score = self.cls_header(feat)\n            cls_scores.append(cls_score)\n\n        return cls_scores, bbox_preds\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/trans_max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Optional\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners.assign_result import AssignResult\nfrom mmdet.models.task_modules.assigners.max_iou_assigner import MaxIoUAssigner\nfrom mmdet.registry import TASK_UTILS\n\n\n@TASK_UTILS.register_module()\nclass TransMaxIoUAssigner(MaxIoUAssigner):\n\n    def assign(self,\n               pred_instances: InstanceData,\n               gt_instances: InstanceData,\n               gt_instances_ignore: Optional[InstanceData] = None,\n               **kwargs) -> AssignResult:\n        \"\"\"Assign gt to bboxes.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, or a semi-positive number. -1 means negative\n        sample, semi-positive number is the index (0-based) of assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to the background\n        2. assign proposals whose iou with all gts < neg_iou_thr to 0\n        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,\n           assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals (may be more than\n           one) to itself\n\n        Args:\n            pred_instances (:obj:`InstanceData`): Instances of model\n                predictions. It includes ``priors``, and the priors can\n                be anchors or points, or the bboxes predicted by the\n                previous stage, has shape (n, 4). The bboxes predicted by\n                the current model or stage will be named ``bboxes``,\n                ``labels``, and ``scores``, the same as the ``InstanceData``\n                in other places.\n            gt_instances (:obj:`InstanceData`): Ground truth of instance\n                annotations. It usually includes ``bboxes``, with shape (k, 4),\n                and ``labels``, with shape (k, ).\n            gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n                to be ignored during training. It includes ``bboxes``\n                attribute data that is ignored during training and testing.\n                Defaults to None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n\n        Example:\n            >>> from mmengine.structures import InstanceData\n            >>> self = MaxIoUAssigner(0.5, 0.5)\n            >>> pred_instances = InstanceData()\n            >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10],\n            ...                                      [10, 10, 20, 20]])\n            >>> gt_instances = InstanceData()\n            >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]])\n            >>> gt_instances.labels = torch.Tensor([0])\n            >>> assign_result = self.assign(pred_instances, gt_instances)\n            >>> expected_gt_inds = torch.LongTensor([1, 0])\n            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)\n        \"\"\"\n        gt_bboxes = gt_instances.bboxes\n        priors = pred_instances.priors\n        gt_labels = gt_instances.labels\n        if gt_instances_ignore is not None:\n            gt_bboxes_ignore = gt_instances_ignore.bboxes\n        else:\n            gt_bboxes_ignore = None\n\n        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n            gt_bboxes.shape[0] > self.gpu_assign_thr) else False\n        # compute overlap and assign gt on CPU when number of GT is large\n        if assign_on_cpu:\n            device = priors.device\n            priors = priors.cpu()\n            gt_bboxes = gt_bboxes.cpu()\n            gt_labels = gt_labels.cpu()\n            if gt_bboxes_ignore is not None:\n                gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n\n        trans_priors = torch.cat([\n            priors[..., 1].view(-1, 1), priors[..., 0].view(-1, 1),\n            priors[..., 3].view(-1, 1), priors[..., 2].view(-1, 1)\n        ],\n                                 dim=-1)\n        overlaps = self.iou_calculator(gt_bboxes, trans_priors)\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and trans_priors.numel() > 0):\n            if self.ignore_wrt_candidates:\n                ignore_overlaps = self.iou_calculator(\n                    trans_priors, gt_bboxes_ignore, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            else:\n                ignore_overlaps = self.iou_calculator(\n                    gt_bboxes_ignore, trans_priors, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)\n            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1\n\n        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n        if assign_on_cpu:\n            assign_result.gt_inds = assign_result.gt_inds.to(device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n            if assign_result.labels is not None:\n                assign_result.labels = assign_result.labels.to(device)\n        return assign_result\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nfrom typing import Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn.bricks import Swish, build_norm_layer\nfrom torch.nn import functional as F\n\nfrom mmdet.utils import OptConfigType\n\n\nclass SwishImplementation(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, i):\n        result = i * torch.sigmoid(i)\n        ctx.save_for_backward(i)\n        return result\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        i = ctx.saved_variables[0]\n        sigmoid_i = torch.sigmoid(i)\n        return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))\n\n\nclass MemoryEfficientSwish(nn.Module):\n\n    def forward(self, x):\n        return SwishImplementation.apply(x)\n\n\nclass Conv2dSamePadding(nn.Module):\n\n    def __init__(self,\n                 in_channels: int,\n                 out_channels: int,\n                 kernel_size: Union[int, Tuple[int, int]],\n                 stride: Union[int, Tuple[int, int]] = 1,\n                 groups: int = 1,\n                 bias: bool = True):\n        super().__init__()\n        self.conv = nn.Conv2d(\n            in_channels,\n            out_channels,\n            kernel_size,\n            stride=stride,\n            bias=bias,\n            groups=groups)\n        self.stride = self.conv.stride\n        self.kernel_size = self.conv.kernel_size\n\n    def forward(self, x):\n        h, w = x.shape[-2:]\n        extra_h = (math.ceil(w / self.stride[1]) -\n                   1) * self.stride[1] - w + self.kernel_size[1]\n        extra_v = (math.ceil(h / self.stride[0]) -\n                   1) * self.stride[0] - h + self.kernel_size[0]\n\n        left = extra_h // 2\n        right = extra_h - left\n        top = extra_v // 2\n        bottom = extra_v - top\n\n        x = F.pad(x, [left, right, top, bottom])\n        x = self.conv(x)\n\n        return x\n\n\nclass MaxPool2dSamePadding(nn.Module):\n\n    def __init__(self,\n                 kernel_size: Union[int, Tuple[int, int]] = 3,\n                 stride: Union[int, Tuple[int, int]] = 2,\n                 **kwargs):\n        super().__init__()\n        self.pool = nn.MaxPool2d(kernel_size, stride, **kwargs)\n        self.stride = self.pool.stride\n        self.kernel_size = self.pool.kernel_size\n\n        if isinstance(self.stride, int):\n            self.stride = [self.stride] * 2\n        if isinstance(self.kernel_size, int):\n            self.kernel_size = [self.kernel_size] * 2\n\n    def forward(self, x):\n        h, w = x.shape[-2:]\n\n        extra_h = (math.ceil(w / self.stride[1]) -\n                   1) * self.stride[1] - w + self.kernel_size[1]\n        extra_v = (math.ceil(h / self.stride[0]) -\n                   1) * self.stride[0] - h + self.kernel_size[0]\n\n        left = extra_h // 2\n        right = extra_h - left\n        top = extra_v // 2\n        bottom = extra_v - top\n\n        x = F.pad(x, [left, right, top, bottom])\n        x = self.pool(x)\n\n        return x\n\n\nclass DepthWiseConvBlock(nn.Module):\n\n    def __init__(\n        self,\n        in_channels: int,\n        out_channels: int,\n        apply_norm: bool = True,\n        conv_bn_act_pattern: bool = False,\n        use_meswish: bool = True,\n        norm_cfg: OptConfigType = dict(type='BN', momentum=1e-2, eps=1e-3)\n    ) -> None:\n        super(DepthWiseConvBlock, self).__init__()\n        self.depthwise_conv = Conv2dSamePadding(\n            in_channels,\n            in_channels,\n            kernel_size=3,\n            stride=1,\n            groups=in_channels,\n            bias=False)\n        self.pointwise_conv = Conv2dSamePadding(\n            in_channels, out_channels, kernel_size=1, stride=1)\n\n        self.apply_norm = apply_norm\n        if self.apply_norm:\n            self.bn = build_norm_layer(norm_cfg, num_features=out_channels)[1]\n\n        self.apply_activation = conv_bn_act_pattern\n        if self.apply_activation:\n            self.swish = MemoryEfficientSwish() if use_meswish else Swish()\n\n    def forward(self, x):\n        x = self.depthwise_conv(x)\n        x = self.pointwise_conv(x)\n        if self.apply_norm:\n            x = self.bn(x)\n        if self.apply_activation:\n            x = self.swish(x)\n\n        return x\n\n\nclass DownChannelBlock(nn.Module):\n\n    def __init__(\n        self,\n        in_channels: int,\n        out_channels: int,\n        apply_norm: bool = True,\n        conv_bn_act_pattern: bool = False,\n        use_meswish: bool = True,\n        norm_cfg: OptConfigType = dict(type='BN', momentum=1e-2, eps=1e-3)\n    ) -> None:\n        super(DownChannelBlock, self).__init__()\n        self.down_conv = Conv2dSamePadding(in_channels, out_channels, 1)\n        self.apply_norm = apply_norm\n        if self.apply_norm:\n            self.bn = build_norm_layer(norm_cfg, num_features=out_channels)[1]\n        self.apply_activation = conv_bn_act_pattern\n        if self.apply_activation:\n            self.swish = MemoryEfficientSwish() if use_meswish else Swish()\n\n    def forward(self, x):\n        x = self.down_conv(x)\n        if self.apply_norm:\n            x = self.bn(x)\n        if self.apply_activation:\n            x = self.swish(x)\n\n        return x\n"
  },
  {
    "path": "projects/EfficientDet/efficientdet/yxyx_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch\n\nfrom mmdet.models.task_modules.coders.delta_xywh_bbox_coder import \\\n    DeltaXYWHBBoxCoder\nfrom mmdet.registry import TASK_UTILS\nfrom mmdet.structures.bbox import HorizontalBoxes, get_box_tensor\n\n\n@TASK_UTILS.register_module()\nclass YXYXDeltaXYWHBBoxCoder(DeltaXYWHBBoxCoder):\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,\n                e.g., object proposals.\n            gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the\n                transformation, e.g., ground-truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        gt_bboxes = get_box_tensor(gt_bboxes)\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = YXbbox2delta(bboxes, gt_bboxes, self.means, self.stds)\n        return encoded_bboxes\n\n    def decode(self,\n               bboxes,\n               pred_bboxes,\n               max_shape=None,\n               wh_ratio_clip=16 / 1000):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. Shape\n                (B, N, 4) or (N, 4)\n            pred_bboxes (Tensor): Encoded offsets with respect to each roi.\n               Has shape (B, N, num_classes * 4) or (B, N, 4) or\n               (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H\n               when rois is a grid of anchors.Offset encoding follows [1]_.\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n               Sequence[int]],optional): Maximum bounds for boxes, specifies\n               (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then\n               the max_shape should be a Sequence[Sequence[int]]\n               and the length of max_shape should also be B.\n            wh_ratio_clip (float, optional): The allowed ratio between\n                width and height.\n\n        Returns:\n            Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.\n        \"\"\"\n        bboxes = get_box_tensor(bboxes)\n        assert pred_bboxes.size(0) == bboxes.size(0)\n        if pred_bboxes.ndim == 3:\n            assert pred_bboxes.size(1) == bboxes.size(1)\n\n        if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():\n            # single image decode\n            decoded_bboxes = YXdelta2bbox(bboxes, pred_bboxes, self.means,\n                                          self.stds, max_shape, wh_ratio_clip,\n                                          self.clip_border, self.add_ctr_clamp,\n                                          self.ctr_clamp)\n        else:\n            if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():\n                warnings.warn(\n                    'DeprecationWarning: onnx_delta2bbox is deprecated '\n                    'in the case of batch decoding and non-ONNX, '\n                    'please use “delta2bbox” instead. In order to improve '\n                    'the decoding speed, the batch function will no '\n                    'longer be supported. ')\n            decoded_bboxes = YXonnx_delta2bbox(bboxes, pred_bboxes, self.means,\n                                               self.stds, max_shape,\n                                               wh_ratio_clip, self.clip_border,\n                                               self.add_ctr_clamp,\n                                               self.ctr_clamp)\n\n        if self.use_box_type:\n            assert decoded_bboxes.size(-1) == 4, \\\n                ('Cannot warp decoded boxes with box type when decoded boxes'\n                 'have shape of (N, num_classes * 4)')\n            decoded_bboxes = HorizontalBoxes(decoded_bboxes)\n        return decoded_bboxes\n\n\ndef YXdelta2bbox(rois,\n                 deltas,\n                 means=(0., 0., 0., 0.),\n                 stds=(1., 1., 1., 1.),\n                 max_shape=None,\n                 hw_ratio_clip=1000 / 16,\n                 clip_border=True,\n                 add_ctr_clamp=False,\n                 ctr_clamp=32):\n    \"\"\"Apply deltas to shift/scale base boxes.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of :func:`bbox2delta`.\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4).\n        deltas (Tensor): Encoded offsets relative to each roi.\n            Has shape (N, num_classes * 4) or (N, 4). Note\n            N = num_base_anchors * W * H, when rois is a grid of\n            anchors. Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates.\n            Default (0., 0., 0., 0.).\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates. Default (1., 1., 1., 1.).\n        max_shape (tuple[int, int]): Maximum bounds for boxes, specifies\n           (H, W). Default None.\n        wh_ratio_clip (float): Maximum aspect ratio for boxes. Default\n            16 / 1000.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Default True.\n        add_ctr_clamp (bool): Whether to add center clamp. When set to True,\n            the center of the prediction bounding box will be clamped to\n            avoid being too far away from the center of the anchor.\n            Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n\n    Returns:\n        Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4\n           represent tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))\n        tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                [0.1409, 0.1409, 2.8591, 2.8591],\n                [0.0000, 0.3161, 4.1945, 0.6839],\n                [5.0000, 5.0000, 5.0000, 5.0000]])\n    \"\"\"\n    num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4\n    if num_bboxes == 0:\n        return deltas\n\n    deltas = deltas.reshape(-1, 4)\n\n    means = deltas.new_tensor(means).view(1, -1)\n    stds = deltas.new_tensor(stds).view(1, -1)\n    denorm_deltas = deltas * stds + means\n\n    dyx = denorm_deltas[:, :2]\n    dhw = denorm_deltas[:, 2:]\n\n    # Compute width/height of each roi\n    rois_ = rois.repeat(1, num_classes).reshape(-1, 4)\n    pyx = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)\n    phw = (rois_[:, 2:] - rois_[:, :2])\n\n    dyx_hw = phw * dyx\n\n    max_ratio = np.abs(np.log(hw_ratio_clip))\n    if add_ctr_clamp:\n        dyx_hw = torch.clamp(dyx_hw, max=ctr_clamp, min=-ctr_clamp)\n        dhw = torch.clamp(dhw, max=max_ratio)\n    else:\n        dhw = dhw.clamp(min=-max_ratio, max=max_ratio)\n\n    gyx = pyx + dyx_hw\n    ghw = phw * dhw.exp()\n    y1x1 = gyx - (ghw * 0.5)\n    y2x2 = gyx + (ghw * 0.5)\n    ymin, xmin = y1x1[:, 0].reshape(-1, 1), y1x1[:, 1].reshape(-1, 1)\n    ymax, xmax = y2x2[:, 0].reshape(-1, 1), y2x2[:, 1].reshape(-1, 1)\n    bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)\n    if clip_border and max_shape is not None:\n        bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])\n        bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])\n    bboxes = bboxes.reshape(num_bboxes, -1)\n    return bboxes\n\n\ndef YXbbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):\n    \"\"\"Compute deltas of proposals w.r.t. gt.\n\n    We usually compute the deltas of x, y, w, h of proposals w.r.t ground\n    truth bboxes to get regression target.\n    This is the inverse function of :func:`delta2bbox`.\n\n    Args:\n        proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)\n        gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n\n    Returns:\n        Tensor: deltas with shape (N, 4), where columns represent dx, dy,\n            dw, dh.\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    proposals = proposals.float()\n    gt = gt.float()\n    py = (proposals[..., 0] + proposals[..., 2]) * 0.5\n    px = (proposals[..., 1] + proposals[..., 3]) * 0.5\n    ph = proposals[..., 2] - proposals[..., 0]\n    pw = proposals[..., 3] - proposals[..., 1]\n\n    gx = (gt[..., 0] + gt[..., 2]) * 0.5\n    gy = (gt[..., 1] + gt[..., 3]) * 0.5\n    gw = gt[..., 2] - gt[..., 0]\n    gh = gt[..., 3] - gt[..., 1]\n\n    dx = (gx - px) / pw\n    dy = (gy - py) / ph\n    dw = torch.log(gw / pw)\n    dh = torch.log(gh / ph)\n    deltas = torch.stack([dy, dx, dh, dw], dim=-1)\n\n    means = deltas.new_tensor(means).unsqueeze(0)\n    stds = deltas.new_tensor(stds).unsqueeze(0)\n    deltas = deltas.sub_(means).div_(stds)\n\n    return deltas\n\n\ndef YXonnx_delta2bbox(rois,\n                      deltas,\n                      means=(0., 0., 0., 0.),\n                      stds=(1., 1., 1., 1.),\n                      max_shape=None,\n                      wh_ratio_clip=16 / 1000,\n                      clip_border=True,\n                      add_ctr_clamp=False,\n                      ctr_clamp=32):\n    \"\"\"Apply deltas to shift/scale base boxes.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of :func:`bbox2delta`.\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)\n        deltas (Tensor): Encoded offsets with respect to each roi.\n            Has shape (B, N, num_classes * 4) or (B, N, 4) or\n            (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H\n            when rois is a grid of anchors.Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates.\n            Default (0., 0., 0., 0.).\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates. Default (1., 1., 1., 1.).\n        max_shape (Sequence[int] or torch.Tensor or Sequence[\n            Sequence[int]],optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If rois shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B. Default None.\n        wh_ratio_clip (float): Maximum aspect ratio for boxes.\n            Default 16 / 1000.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Default True.\n        add_ctr_clamp (bool): Whether to add center clamp, when added, the\n            predicted box is clamped is its center is too far away from\n            the original anchor's center. Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n\n    Returns:\n        Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or\n           (N, num_classes * 4) or (N, 4), where 4 represent\n           tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))\n        tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                [0.1409, 0.1409, 2.8591, 2.8591],\n                [0.0000, 0.3161, 4.1945, 0.6839],\n                [5.0000, 5.0000, 5.0000, 5.0000]])\n    \"\"\"\n    means = deltas.new_tensor(means).view(1,\n                                          -1).repeat(1,\n                                                     deltas.size(-1) // 4)\n    stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)\n    denorm_deltas = deltas * stds + means\n    dy = denorm_deltas[..., 0::4]\n    dx = denorm_deltas[..., 1::4]\n    dh = denorm_deltas[..., 2::4]\n    dw = denorm_deltas[..., 3::4]\n\n    y1, x1 = rois[..., 0], rois[..., 1]\n    y2, x2 = rois[..., 2], rois[..., 3]\n    # Compute center of each roi\n    px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)\n    py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)\n    # Compute width/height of each roi\n    pw = (x2 - x1).unsqueeze(-1).expand_as(dw)\n    ph = (y2 - y1).unsqueeze(-1).expand_as(dh)\n\n    dx_width = pw * dx\n    dy_height = ph * dy\n\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    if add_ctr_clamp:\n        dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)\n        dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)\n        dw = torch.clamp(dw, max=max_ratio)\n        dh = torch.clamp(dh, max=max_ratio)\n    else:\n        dw = dw.clamp(min=-max_ratio, max=max_ratio)\n        dh = dh.clamp(min=-max_ratio, max=max_ratio)\n    # Use exp(network energy) to enlarge/shrink each roi\n    gw = pw * dw.exp()\n    gh = ph * dh.exp()\n    # Use network energy to shift the center of each roi\n    gx = px + dx_width\n    gy = py + dy_height\n    # Convert center-xy/width/height to top-left, bottom-right\n    x1 = gx - gw * 0.5\n    y1 = gy - gh * 0.5\n    x2 = gx + gw * 0.5\n    y2 = gy + gh * 0.5\n\n    bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n\n    if clip_border and max_shape is not None:\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            from mmdet.core.export import dynamic_clip_for_onnx\n            x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)\n            bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = x1.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(x1)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = x1.new_tensor(0)\n        max_xy = torch.cat(\n            [max_shape] * (deltas.size(-1) // 2),\n            dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n"
  },
  {
    "path": "projects/SparseInst/README.md",
    "content": "<div align=\"center\">\n<img src=\"https://github.com/hustvl/SparseInst/raw/main/assets/banner.gif\">\n<br>\n<br>\nTianheng Cheng, <a href=\"https://xinggangw.info/\">Xinggang Wang</a><sup><span>&#8224;</span></sup>, Shaoyu Chen, Wenqiang Zhang, <a href=\"https://scholar.google.com/citations?user=pCY-bikAAAAJ&hl=zh-CN\">Qian Zhang</a>, <a href=\"https://scholar.google.com/citations?user=IyyEKyIAAAAJ&hl=zh-CN\">Chang Huang</a>, <a href=\"https://zhaoxiangzhang.net/\">Zhaoxiang Zhang</a>, <a href=\"http://eic.hust.edu.cn/professor/liuwenyu/\"> Wenyu Liu</a>\n</br>\n(<span>&#8224;</span>: corresponding author)\n<div>\n<a href=\"https://arxiv.org/abs/2203.12827\">[arXiv paper]</a>\n<a href=\"https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Sparse_Instance_Activation_for_Real-Time_Instance_Segmentation_CVPR_2022_paper.pdf\">[CVPR paper]</a>\n<a href=\"https://drive.google.com/file/d/1xhqQvQ0YVCHd8XQxnCVqef75Hey7kI-d/view?usp=sharing\">[slides]</a>\n</div>\n</div>\n\n## Description\n\nThis is an implementation of [SparseInst](https://github.com/hustvl/SparseInst) based on [MMDetection](https://github.com/open-mmlab/mmdetection/tree/3.x), [MMCV](https://github.com/open-mmlab/mmcv), and [MMEngine](https://github.com/open-mmlab/mmengine).\n\n**SparseInst** is a conceptually novel, efficient, and fully convolutional framework for real-time instance segmentation.\nIn contrast to region boxes or anchors (centers), SparseInst adopts a sparse set of **instance activation maps** as object representation, to highlight informative regions for each foreground objects.\nThen it obtains the instance-level features by aggregating features according to the highlighted regions for recognition and segmentation.\nThe bipartite matching compels the instance activation maps to predict objects in a one-to-one style, thus avoiding non-maximum suppression (NMS) in post-processing. Owing to the simple yet effective designs with instance activation maps, SparseInst has extremely fast inference speed and achieves **40 FPS** and **37.9 AP** on COCO (NVIDIA 2080Ti), significantly outperforms the counter parts in terms of speed and accuracy.\n\n<center>\n<img src=\"https://github.com/hustvl/SparseInst/raw/main/assets/sparseinst.png\">\n</center>\n\n## Usage\n\n<!-- For a typical model, this section should contain the commands for training and testing. You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. -->\n\n### Training commands\n\nIn MMDetection's root directory, run the following command to train the model:\n\n```bash\npython tools/train.py projects/SparseInst/configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py\n```\n\nFor multi-gpu training, run:\n\n```bash\npython -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=${NUM_GPUS} --master_port=29506 --master_addr=\"127.0.0.1\" tools/train.py projects/SparseInst/configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py\n```\n\n### Testing commands\n\nIn MMDetection's root directory, run the following command to test the model:\n\n```bash\npython tools/test.py projects/SparseInst/configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py ${CHECKPOINT_PATH}\n```\n\n## Results\n\nHere we provide the baseline version of SparseInst with ResNet50 backbone.\n\nTo find more variants, please visit the [official model zoo](https://github.com/hustvl/SparseInst#models).\n\n| Backbone |  Style  | Lr schd | Mem (GB) | FPS  | mask AP val2017 |                           Config                            |                                                                                                                                                                    Download                                                                                                                                                                    |\n| :------: | :-----: | :-----: | :------: | :--: | :-------------: | :---------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |\n|   R-50   | PyTorch |  270k   |   8.7    | 44.3 |      32.9       | [config](./configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v3.0/sparseinst/sparseinst_r50_iam_8xb8-ms-270k_coco/sparseinst_r50_iam_8xb8-ms-270k_coco_20221111_181051-72c711cd.pth) \\| [log](https://download.openmmlab.com/mmdetection/v3.0/sparseinst/sparseinst_r50_iam_8xb8-ms-270k_coco/sparseinst_r50_iam_8xb8-ms-270k_coco_20221111_181051.json) |\n\n## Citation\n\nIf you find SparseInst is useful in your research or applications, please consider giving a star 🌟 to the [official repository](https://github.com/hustvl/SparseInst) and citing SparseInst by the following BibTeX entry.\n\n```BibTeX\n@inproceedings{Cheng2022SparseInst,\n  title     =   {Sparse Instance Activation for Real-Time Instance Segmentation},\n  author    =   {Cheng, Tianheng and Wang, Xinggang and Chen, Shaoyu and Zhang, Wenqiang and Zhang, Qian and Huang, Chang and Zhang, Zhaoxiang and Liu, Wenyu},\n  booktitle =   {Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR)},\n  year      =   {2022}\n}\n\n```\n\n## Checklist\n\n<!-- Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR.\nOpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone.\nNote that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed.\nA project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. -->\n\n- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.\n\n  - [x] Finish the code\n\n    <!-- The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmdet.registry.MODELS` and configurable via a config file. -->\n\n  - [x] Basic docstrings & proper citation\n\n    <!-- Each major object should contain a docstring, describing its functionality and arguments. If you have adapted the code from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) -->\n\n  - [x] Test-time correctness\n\n    <!-- If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. -->\n\n  - [x] A full README\n\n    <!-- As this template does. -->\n\n- [x] Milestone 2: Indicates a successful model implementation.\n\n  - [x] Training-time correctness\n\n    <!-- If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. -->\n\n- [ ] Milestone 3: Good to be a part of our core package!\n\n  - [ ] Type hints and docstrings\n\n    <!-- Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/mmdet/datasets/transforms/transforms.py#L41-L169) -->\n\n  - [ ] Unit tests\n\n    <!-- Unit tests for each module are required. [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/tests/test_datasets/test_transforms/test_transforms.py#L35-L88) -->\n\n  - [ ] Code polishing\n\n    <!-- Refactor your code according to reviewer's comment. -->\n\n  - [ ] Metafile.yml\n\n    <!-- It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/metafile.yml) -->\n\n- [ ] Move your modules into the core package following the codebase's file hierarchy structure.\n\n  <!-- In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/README.md) -->\n\n- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.\n"
  },
  {
    "path": "projects/SparseInst/configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py",
    "content": "_base_ = [\n    'mmdet::_base_/datasets/coco_instance.py',\n    'mmdet::_base_/schedules/schedule_1x.py',\n    'mmdet::_base_/default_runtime.py'\n]\n\ncustom_imports = dict(\n    imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False)\n\nmodel = dict(\n    type='SparseInst',\n    data_preprocessor=dict(\n        type='DetDataPreprocessor',\n        mean=[123.675, 116.28, 103.53],\n        std=[58.395, 57.12, 57.375],\n        bgr_to_rgb=True,\n        pad_mask=True,\n        pad_size_divisor=32),\n    backbone=dict(\n        type='ResNet',\n        depth=50,\n        num_stages=4,\n        out_indices=(1, 2, 3),\n        frozen_stages=0,\n        norm_cfg=dict(type='BN', requires_grad=False),\n        norm_eval=True,\n        style='pytorch',\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n    encoder=dict(\n        type='InstanceContextEncoder',\n        in_channels=[512, 1024, 2048],\n        out_channels=256),\n    decoder=dict(\n        type='BaseIAMDecoder',\n        in_channels=256 + 2,\n        num_classes=80,\n        ins_dim=256,\n        ins_conv=4,\n        mask_dim=256,\n        mask_conv=4,\n        kernel_dim=128,\n        scale_factor=2.0,\n        output_iam=False,\n        num_masks=100),\n    criterion=dict(\n        type='SparseInstCriterion',\n        num_classes=80,\n        assigner=dict(type='SparseInstMatcher', alpha=0.8, beta=0.2),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            alpha=0.25,\n            gamma=2.0,\n            reduction='sum',\n            loss_weight=2.0),\n        loss_obj=dict(\n            type='CrossEntropyLoss',\n            use_sigmoid=True,\n            reduction='mean',\n            loss_weight=1.0),\n        loss_mask=dict(\n            type='CrossEntropyLoss',\n            use_sigmoid=True,\n            reduction='mean',\n            loss_weight=5.0),\n        loss_dice=dict(\n            type='DiceLoss',\n            use_sigmoid=True,\n            reduction='sum',\n            eps=5e-5,\n            loss_weight=2.0),\n    ),\n    test_cfg=dict(score_thr=0.005, mask_thr_binary=0.45))\n\nbackend = 'pillow'\ntrain_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}},\n        imdecode_backend=backend),\n    dict(\n        type='LoadAnnotations',\n        with_bbox=True,\n        with_mask=True,\n        poly2mask=False),\n    dict(\n        type='RandomChoiceResize',\n        scales=[(416, 853), (448, 853), (480, 853), (512, 853), (544, 853),\n                (576, 853), (608, 853), (640, 853)],\n        keep_ratio=True,\n        backend=backend),\n    dict(type='RandomFlip', prob=0.5),\n    dict(type='PackDetInputs')\n]\n\ntest_pipeline = [\n    dict(\n        type='LoadImageFromFile',\n        file_client_args={{_base_.file_client_args}},\n        imdecode_backend=backend),\n    dict(type='Resize', scale=(640, 853), keep_ratio=True, backend=backend),\n    dict(\n        type='PackDetInputs',\n        meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n                   'scale_factor'))\n]\n\ntrain_dataloader = dict(\n    batch_size=8,\n    num_workers=8,\n    sampler=dict(type='InfiniteSampler'),\n    dataset=dict(pipeline=train_pipeline))\ntest_dataloader = dict(dataset=dict(pipeline=test_pipeline))\nval_dataloader = test_dataloader\n\nval_evaluator = dict(metric='segm')\ntest_evaluator = val_evaluator\n\n# optimizer\noptim_wrapper = dict(\n    type='OptimWrapper',\n    optimizer=dict(_delete_=True, type='AdamW', lr=0.00005, weight_decay=0.05))\ntrain_cfg = dict(\n    _delete_=True,\n    type='IterBasedTrainLoop',\n    max_iters=270000,\n    val_interval=10000)\n# learning rate\nparam_scheduler = [\n    dict(\n        type='MultiStepLR',\n        begin=0,\n        end=270000,\n        by_epoch=False,\n        milestones=[210000, 250000],\n        gamma=0.1)\n]\n\ndefault_hooks = dict(\n    checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=3))\nlog_processor = dict(by_epoch=False)\n\n# NOTE: `auto_scale_lr` is for automatically scaling LR,\n# USER SHOULD NOT CHANGE ITS VALUES.\n# base_batch_size = (8 GPUs) x (8 samples per GPU)\nauto_scale_lr = dict(base_batch_size=64, enable=True)\n"
  },
  {
    "path": "projects/SparseInst/sparseinst/__init__.py",
    "content": "from .decoder import BaseIAMDecoder, GroupIAMDecoder, GroupIAMSoftDecoder\nfrom .encoder import PyramidPoolingModule\nfrom .loss import SparseInstCriterion, SparseInstMatcher\nfrom .sparseinst import SparseInst\n\n__all__ = [\n    'BaseIAMDecoder', 'GroupIAMDecoder', 'GroupIAMSoftDecoder',\n    'PyramidPoolingModule', 'SparseInstCriterion', 'SparseInstMatcher',\n    'SparseInst'\n]\n"
  },
  {
    "path": "projects/SparseInst/sparseinst/decoder.py",
    "content": "# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.model.weight_init import caffe2_xavier_init, kaiming_init\nfrom torch.nn import init\n\nfrom mmdet.registry import MODELS\n\n\ndef _make_stack_3x3_convs(num_convs,\n                          in_channels,\n                          out_channels,\n                          act_cfg=dict(type='ReLU', inplace=True)):\n    convs = []\n    for _ in range(num_convs):\n        convs.append(nn.Conv2d(in_channels, out_channels, 3, padding=1))\n        convs.append(MODELS.build(act_cfg))\n        in_channels = out_channels\n    return nn.Sequential(*convs)\n\n\nclass InstanceBranch(nn.Module):\n\n    def __init__(self,\n                 in_channels,\n                 dim=256,\n                 num_convs=4,\n                 num_masks=100,\n                 num_classes=80,\n                 kernel_dim=128,\n                 act_cfg=dict(type='ReLU', inplace=True)):\n        super().__init__()\n        num_masks = num_masks\n        self.num_classes = num_classes\n\n        self.inst_convs = _make_stack_3x3_convs(num_convs, in_channels, dim,\n                                                act_cfg)\n        # iam prediction, a simple conv\n        self.iam_conv = nn.Conv2d(dim, num_masks, 3, padding=1)\n\n        # outputs\n        self.cls_score = nn.Linear(dim, self.num_classes)\n        self.mask_kernel = nn.Linear(dim, kernel_dim)\n        self.objectness = nn.Linear(dim, 1)\n\n        self.prior_prob = 0.01\n        self._init_weights()\n\n    def _init_weights(self):\n        for m in self.inst_convs.modules():\n            if isinstance(m, nn.Conv2d):\n                kaiming_init(m)\n        bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)\n        for module in [self.iam_conv, self.cls_score]:\n            init.constant_(module.bias, bias_value)\n        init.normal_(self.iam_conv.weight, std=0.01)\n        init.normal_(self.cls_score.weight, std=0.01)\n\n        init.normal_(self.mask_kernel.weight, std=0.01)\n        init.constant_(self.mask_kernel.bias, 0.0)\n\n    def forward(self, features):\n        # instance features (x4 convs)\n        features = self.inst_convs(features)\n        # predict instance activation maps\n        iam = self.iam_conv(features)\n        iam_prob = iam.sigmoid()\n\n        B, N = iam_prob.shape[:2]\n        C = features.size(1)\n        # BxNxHxW -> BxNx(HW)\n        iam_prob = iam_prob.view(B, N, -1)\n        normalizer = iam_prob.sum(-1).clamp(min=1e-6)\n        iam_prob = iam_prob / normalizer[:, :, None]\n        # aggregate features: BxCxHxW -> Bx(HW)xC\n        inst_features = torch.bmm(iam_prob,\n                                  features.view(B, C, -1).permute(0, 2, 1))\n        # predict classification & segmentation kernel & objectness\n        pred_logits = self.cls_score(inst_features)\n        pred_kernel = self.mask_kernel(inst_features)\n        pred_scores = self.objectness(inst_features)\n        return pred_logits, pred_kernel, pred_scores, iam\n\n\nclass MaskBranch(nn.Module):\n\n    def __init__(self,\n                 in_channels,\n                 dim=256,\n                 num_convs=4,\n                 kernel_dim=128,\n                 act_cfg=dict(type='ReLU', inplace=True)):\n        super().__init__()\n        self.mask_convs = _make_stack_3x3_convs(num_convs, in_channels, dim,\n                                                act_cfg)\n        self.projection = nn.Conv2d(dim, kernel_dim, kernel_size=1)\n        self._init_weights()\n\n    def _init_weights(self):\n        for m in self.mask_convs.modules():\n            if isinstance(m, nn.Conv2d):\n                kaiming_init(m)\n        kaiming_init(self.projection)\n\n    def forward(self, features):\n        # mask features (x4 convs)\n        features = self.mask_convs(features)\n        return self.projection(features)\n\n\n@MODELS.register_module()\nclass BaseIAMDecoder(nn.Module):\n\n    def __init__(self,\n                 in_channels,\n                 num_classes,\n                 ins_dim=256,\n                 ins_conv=4,\n                 mask_dim=256,\n                 mask_conv=4,\n                 kernel_dim=128,\n                 scale_factor=2.0,\n                 output_iam=False,\n                 num_masks=100,\n                 act_cfg=dict(type='ReLU', inplace=True)):\n        super().__init__()\n        # add 2 for coordinates\n        in_channels = in_channels  # ENCODER.NUM_CHANNELS + 2\n\n        self.scale_factor = scale_factor\n        self.output_iam = output_iam\n\n        self.inst_branch = InstanceBranch(\n            in_channels,\n            dim=ins_dim,\n            num_convs=ins_conv,\n            num_masks=num_masks,\n            num_classes=num_classes,\n            kernel_dim=kernel_dim,\n            act_cfg=act_cfg)\n        self.mask_branch = MaskBranch(\n            in_channels,\n            dim=mask_dim,\n            num_convs=mask_conv,\n            kernel_dim=kernel_dim,\n            act_cfg=act_cfg)\n\n    @torch.no_grad()\n    def compute_coordinates_linspace(self, x):\n        # linspace is not supported in ONNX\n        h, w = x.size(2), x.size(3)\n        y_loc = torch.linspace(-1, 1, h, device=x.device)\n        x_loc = torch.linspace(-1, 1, w, device=x.device)\n        y_loc, x_loc = torch.meshgrid(y_loc, x_loc)\n        y_loc = y_loc.expand([x.shape[0], 1, -1, -1])\n        x_loc = x_loc.expand([x.shape[0], 1, -1, -1])\n        locations = torch.cat([x_loc, y_loc], 1)\n        return locations.to(x)\n\n    @torch.no_grad()\n    def compute_coordinates(self, x):\n        h, w = x.size(2), x.size(3)\n        y_loc = -1.0 + 2.0 * torch.arange(h, device=x.device) / (h - 1)\n        x_loc = -1.0 + 2.0 * torch.arange(w, device=x.device) / (w - 1)\n        y_loc, x_loc = torch.meshgrid(y_loc, x_loc)\n        y_loc = y_loc.expand([x.shape[0], 1, -1, -1])\n        x_loc = x_loc.expand([x.shape[0], 1, -1, -1])\n        locations = torch.cat([x_loc, y_loc], 1)\n        return locations.to(x)\n\n    def forward(self, features):\n        coord_features = self.compute_coordinates(features)\n        features = torch.cat([coord_features, features], dim=1)\n        pred_logits, pred_kernel, pred_scores, iam = self.inst_branch(features)\n        mask_features = self.mask_branch(features)\n\n        N = pred_kernel.shape[1]\n        # mask_features: BxCxHxW\n        B, C, H, W = mask_features.shape\n        pred_masks = torch.bmm(pred_kernel,\n                               mask_features.view(B, C,\n                                                  H * W)).view(B, N, H, W)\n\n        pred_masks = F.interpolate(\n            pred_masks,\n            scale_factor=self.scale_factor,\n            mode='bilinear',\n            align_corners=False)\n\n        output = {\n            'pred_logits': pred_logits,\n            'pred_masks': pred_masks,\n            'pred_scores': pred_scores,\n        }\n\n        if self.output_iam:\n            iam = F.interpolate(\n                iam,\n                scale_factor=self.scale_factor,\n                mode='bilinear',\n                align_corners=False)\n            output['pred_iam'] = iam\n\n        return output\n\n\nclass GroupInstanceBranch(nn.Module):\n\n    def __init__(self,\n                 in_channels,\n                 num_groups=4,\n                 dim=256,\n                 num_convs=4,\n                 num_masks=100,\n                 num_classes=80,\n                 kernel_dim=128,\n                 act_cfg=dict(type='ReLU', inplace=True)):\n        super().__init__()\n        self.num_groups = num_groups\n        self.num_classes = num_classes\n\n        self.inst_convs = _make_stack_3x3_convs(\n            num_convs, in_channels, dim, act_cfg=act_cfg)\n        # iam prediction, a group conv\n        expand_dim = dim * self.num_groups\n        self.iam_conv = nn.Conv2d(\n            dim,\n            num_masks * self.num_groups,\n            3,\n            padding=1,\n            groups=self.num_groups)\n        # outputs\n        self.fc = nn.Linear(expand_dim, expand_dim)\n\n        self.cls_score = nn.Linear(expand_dim, self.num_classes)\n        self.mask_kernel = nn.Linear(expand_dim, kernel_dim)\n        self.objectness = nn.Linear(expand_dim, 1)\n\n        self.prior_prob = 0.01\n        self._init_weights()\n\n    def _init_weights(self):\n        for m in self.inst_convs.modules():\n            if isinstance(m, nn.Conv2d):\n                kaiming_init(m)\n        bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)\n        for module in [self.iam_conv, self.cls_score]:\n            init.constant_(module.bias, bias_value)\n        init.normal_(self.iam_conv.weight, std=0.01)\n        init.normal_(self.cls_score.weight, std=0.01)\n\n        init.normal_(self.mask_kernel.weight, std=0.01)\n        init.constant_(self.mask_kernel.bias, 0.0)\n        caffe2_xavier_init(self.fc)\n\n    def forward(self, features):\n        # instance features (x4 convs)\n        features = self.inst_convs(features)\n        # predict instance activation maps\n        iam = self.iam_conv(features)\n        iam_prob = iam.sigmoid()\n\n        B, N = iam_prob.shape[:2]\n        C = features.size(1)\n        # BxNxHxW -> BxNx(HW)\n        iam_prob = iam_prob.view(B, N, -1)\n        normalizer = iam_prob.sum(-1).clamp(min=1e-6)\n        iam_prob = iam_prob / normalizer[:, :, None]\n\n        # aggregate features: BxCxHxW -> Bx(HW)xC\n        inst_features = torch.bmm(iam_prob,\n                                  features.view(B, C, -1).permute(0, 2, 1))\n\n        inst_features = inst_features.reshape(B, 4, N // self.num_groups,\n                                              -1).transpose(1, 2).reshape(\n                                                  B, N // self.num_groups, -1)\n\n        inst_features = F.relu_(self.fc(inst_features))\n        # predict classification & segmentation kernel & objectness\n        pred_logits = self.cls_score(inst_features)\n        pred_kernel = self.mask_kernel(inst_features)\n        pred_scores = self.objectness(inst_features)\n        return pred_logits, pred_kernel, pred_scores, iam\n\n\n@MODELS.register_module()\nclass GroupIAMDecoder(BaseIAMDecoder):\n\n    def __init__(self,\n                 in_channels,\n                 num_classes,\n                 num_groups=4,\n                 ins_dim=256,\n                 ins_conv=4,\n                 mask_dim=256,\n                 mask_conv=4,\n                 kernel_dim=128,\n                 scale_factor=2.0,\n                 output_iam=False,\n                 num_masks=100,\n                 act_cfg=dict(type='ReLU', inplace=True)):\n        super().__init__(\n            in_channels=in_channels,\n            num_classes=num_classes,\n            ins_dim=ins_dim,\n            ins_conv=ins_conv,\n            mask_dim=mask_dim,\n            mask_conv=mask_conv,\n            kernel_dim=kernel_dim,\n            scale_factor=scale_factor,\n            output_iam=output_iam,\n            num_masks=num_masks,\n            act_cfg=act_cfg)\n        self.inst_branch = GroupInstanceBranch(\n            in_channels,\n            num_groups=num_groups,\n            dim=ins_dim,\n            num_convs=ins_conv,\n            num_masks=num_masks,\n            num_classes=num_classes,\n            kernel_dim=kernel_dim,\n            act_cfg=act_cfg)\n\n\nclass GroupInstanceSoftBranch(GroupInstanceBranch):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.softmax_bias = nn.Parameter(torch.ones([\n            1,\n        ]))\n\n    def forward(self, features):\n        # instance features (x4 convs)\n        features = self.inst_convs(features)\n        # predict instance activation maps\n        iam = self.iam_conv(features)\n\n        B, N = iam.shape[:2]\n        C = features.size(1)\n        # BxNxHxW -> BxNx(HW)\n        iam_prob = F.softmax(iam.view(B, N, -1) + self.softmax_bias, dim=-1)\n        # aggregate features: BxCxHxW -> Bx(HW)xC\n        inst_features = torch.bmm(iam_prob,\n                                  features.view(B, C, -1).permute(0, 2, 1))\n\n        inst_features = inst_features.reshape(B, self.num_groups,\n                                              N // self.num_groups,\n                                              -1).transpose(1, 2).reshape(\n                                                  B, N // self.num_groups, -1)\n\n        inst_features = F.relu_(self.fc(inst_features))\n        # predict classification & segmentation kernel & objectness\n        pred_logits = self.cls_score(inst_features)\n        pred_kernel = self.mask_kernel(inst_features)\n        pred_scores = self.objectness(inst_features)\n        return pred_logits, pred_kernel, pred_scores, iam\n\n\n@MODELS.register_module()\nclass GroupIAMSoftDecoder(BaseIAMDecoder):\n\n    def __init__(self,\n                 in_channels,\n                 num_classes,\n                 num_groups=4,\n                 ins_dim=256,\n                 ins_conv=4,\n                 mask_dim=256,\n                 mask_conv=4,\n                 kernel_dim=128,\n                 scale_factor=2.0,\n                 output_iam=False,\n                 num_masks=100,\n                 act_cfg=dict(type='ReLU', inplace=True)):\n        super().__init__(\n            in_channels=in_channels,\n            num_classes=num_classes,\n            ins_dim=ins_dim,\n            ins_conv=ins_conv,\n            mask_dim=mask_dim,\n            mask_conv=mask_conv,\n            kernel_dim=kernel_dim,\n            scale_factor=scale_factor,\n            output_iam=output_iam,\n            num_masks=num_masks,\n            act_cfg=act_cfg)\n        self.inst_branch = GroupInstanceSoftBranch(\n            in_channels,\n            num_groups=num_groups,\n            dim=ins_dim,\n            num_convs=ins_conv,\n            num_masks=num_masks,\n            num_classes=num_classes,\n            kernel_dim=kernel_dim,\n            act_cfg=act_cfg)\n"
  },
  {
    "path": "projects/SparseInst/sparseinst/encoder.py",
    "content": "# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmengine.model.weight_init import caffe2_xavier_init, kaiming_init\n\nfrom mmdet.registry import MODELS\n\n\nclass PyramidPoolingModule(nn.Module):\n\n    def __init__(self,\n                 in_channels,\n                 channels=512,\n                 sizes=(1, 2, 3, 6),\n                 act_cfg=dict(type='ReLU')):\n        super().__init__()\n        self.stages = []\n        self.stages = nn.ModuleList(\n            [self._make_stage(in_channels, channels, size) for size in sizes])\n        self.bottleneck = nn.Conv2d(in_channels + len(sizes) * channels,\n                                    in_channels, 1)\n        self.act = MODELS.build(act_cfg)\n\n    def _make_stage(self, features, out_features, size):\n        prior = nn.AdaptiveAvgPool2d(output_size=(size, size))\n        conv = nn.Conv2d(features, out_features, 1)\n        return nn.Sequential(prior, conv)\n\n    def forward(self, feats):\n        h, w = feats.size(2), feats.size(3)\n        priors = [\n            F.interpolate(\n                input=self.act(stage(feats)),\n                size=(h, w),\n                mode='bilinear',\n                align_corners=False) for stage in self.stages\n        ] + [feats]\n        out = self.act(self.bottleneck(torch.cat(priors, 1)))\n        return out\n\n\n@MODELS.register_module()\nclass InstanceContextEncoder(nn.Module):\n    \"\"\"\n    Instance Context Encoder\n    1. construct feature pyramids from ResNet\n    2. enlarge receptive fields (ppm)\n    3. multi-scale fusion\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels=256,\n                 with_ppm=True,\n                 act_cfg=dict(type='ReLU')):\n        super().__init__()\n        self.num_channels = out_channels\n        self.in_channels = in_channels\n        self.with_ppm = with_ppm\n        fpn_laterals = []\n        fpn_outputs = []\n        for in_channel in reversed(self.in_channels):\n            lateral_conv = nn.Conv2d(in_channel, self.num_channels, 1)\n            output_conv = nn.Conv2d(\n                self.num_channels, self.num_channels, 3, padding=1)\n            caffe2_xavier_init(lateral_conv)\n            caffe2_xavier_init(output_conv)\n            fpn_laterals.append(lateral_conv)\n            fpn_outputs.append(output_conv)\n        self.fpn_laterals = nn.ModuleList(fpn_laterals)\n        self.fpn_outputs = nn.ModuleList(fpn_outputs)\n        # ppm\n        if self.with_ppm:\n            self.ppm = PyramidPoolingModule(\n                self.num_channels, self.num_channels // 4, act_cfg=act_cfg)\n        # final fusion\n        self.fusion = nn.Conv2d(self.num_channels * 3, self.num_channels, 1)\n        kaiming_init(self.fusion)\n\n    def forward(self, features):\n        features = features[::-1]\n        prev_features = self.fpn_laterals[0](features[0])\n        if self.with_ppm:\n            prev_features = self.ppm(prev_features)\n        outputs = [self.fpn_outputs[0](prev_features)]\n        for feature, lat_conv, output_conv in zip(features[1:],\n                                                  self.fpn_laterals[1:],\n                                                  self.fpn_outputs[1:]):\n            lat_features = lat_conv(feature)\n            top_down_features = F.interpolate(\n                prev_features, scale_factor=2.0, mode='nearest')\n            prev_features = lat_features + top_down_features\n            outputs.insert(0, output_conv(prev_features))\n        size = outputs[0].shape[2:]\n        features = [outputs[0]] + [\n            F.interpolate(x, size, mode='bilinear', align_corners=False)\n            for x in outputs[1:]\n        ]\n        features = self.fusion(torch.cat(features, dim=1))\n        return features\n"
  },
  {
    "path": "projects/SparseInst/sparseinst/loss.py",
    "content": "# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom scipy.optimize import linear_sum_assignment\nfrom torch.cuda.amp import autocast\n\nfrom mmdet.registry import MODELS, TASK_UTILS\nfrom mmdet.utils import reduce_mean\n\n\ndef compute_mask_iou(inputs, targets):\n    inputs = inputs.sigmoid()\n    # thresholding\n    binarized_inputs = (inputs >= 0.4).float()\n    targets = (targets > 0.5).float()\n    intersection = (binarized_inputs * targets).sum(-1)\n    union = targets.sum(-1) + binarized_inputs.sum(-1) - intersection\n    score = intersection / (union + 1e-6)\n    return score\n\n\ndef dice_score(inputs, targets):\n    inputs = inputs.sigmoid()\n    numerator = 2 * torch.matmul(inputs, targets.t())\n    denominator = (inputs * inputs).sum(-1)[:,\n                                            None] + (targets * targets).sum(-1)\n    score = numerator / (denominator + 1e-4)\n    return score\n\n\n@MODELS.register_module()\nclass SparseInstCriterion(nn.Module):\n    \"\"\"This part is partially derivated from:\n\n    https://github.com/facebookresearch/detr/blob/main/models/detr.py.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes,\n        assigner,\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            alpha=0.25,\n            gamma=2.0,\n            reduction='sum',\n            loss_weight=2.0),\n        loss_obj=dict(\n            type='CrossEntropyLoss',\n            use_sigmoid=True,\n            reduction='mean',\n            loss_weight=1.0),\n        loss_mask=dict(\n            type='CrossEntropyLoss',\n            use_sigmoid=True,\n            reduction='mean',\n            loss_weight=5.0),\n        loss_dice=dict(\n            type='DiceLoss',\n            use_sigmoid=True,\n            reduction='sum',\n            eps=5e-5,\n            loss_weight=2.0),\n    ):\n        super().__init__()\n        self.matcher = TASK_UTILS.build(assigner)\n        self.num_classes = num_classes\n        self.loss_cls = MODELS.build(loss_cls)\n        self.loss_obj = MODELS.build(loss_obj)\n        self.loss_mask = MODELS.build(loss_mask)\n        self.loss_dice = MODELS.build(loss_dice)\n\n    def _get_src_permutation_idx(self, indices):\n        # permute predictions following indices\n        batch_idx = torch.cat(\n            [torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n        src_idx = torch.cat([src for (src, _) in indices])\n        return batch_idx, src_idx\n\n    def _get_tgt_permutation_idx(self, indices):\n        # permute targets following indices\n        batch_idx = torch.cat(\n            [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n        tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n        return batch_idx, tgt_idx\n\n    def loss_classification(self, outputs, batch_gt_instances, indices,\n                            num_instances):\n        assert 'pred_logits' in outputs\n        src_logits = outputs['pred_logits']\n        idx = self._get_src_permutation_idx(indices)\n        target_classes_o = torch.cat(\n            [gt.labels[J] for gt, (_, J) in zip(batch_gt_instances, indices)])\n        target_classes = torch.full(\n            src_logits.shape[:2],\n            self.num_classes,\n            dtype=torch.int64,\n            device=src_logits.device)\n        target_classes[idx] = target_classes_o\n\n        src_logits = src_logits.flatten(0, 1)\n        target_classes = target_classes.flatten(0, 1)\n        # comp focal loss.\n        class_loss = self.loss_cls(\n            src_logits,\n            target_classes,\n        ) / num_instances\n        return class_loss\n\n    def loss_masks_with_iou_objectness(self, outputs, batch_gt_instances,\n                                       indices, num_instances):\n        src_idx = self._get_src_permutation_idx(indices)\n        tgt_idx = self._get_tgt_permutation_idx(indices)\n        # Bx100xHxW\n        assert 'pred_masks' in outputs\n        assert 'pred_scores' in outputs\n        src_iou_scores = outputs['pred_scores']\n        src_masks = outputs['pred_masks']\n        with torch.no_grad():\n            target_masks = torch.cat([\n                gt.masks.to_tensor(\n                    dtype=src_masks.dtype, device=src_masks.device)\n                for gt in batch_gt_instances\n            ])\n        num_masks = [len(gt.masks) for gt in batch_gt_instances]\n        target_masks = target_masks.to(src_masks)\n        if len(target_masks) == 0:\n\n            loss_dice = src_masks.sum() * 0.0\n            loss_mask = src_masks.sum() * 0.0\n            loss_objectness = src_iou_scores.sum() * 0.0\n\n            return loss_objectness, loss_dice, loss_mask\n\n        src_masks = src_masks[src_idx]\n        target_masks = F.interpolate(\n            target_masks[:, None],\n            size=src_masks.shape[-2:],\n            mode='bilinear',\n            align_corners=False).squeeze(1)\n\n        src_masks = src_masks.flatten(1)\n        # FIXME: tgt_idx\n        mix_tgt_idx = torch.zeros_like(tgt_idx[1])\n        cum_sum = 0\n        for num_mask in num_masks:\n            mix_tgt_idx[cum_sum:cum_sum + num_mask] = cum_sum\n            cum_sum += num_mask\n        mix_tgt_idx += tgt_idx[1]\n\n        target_masks = target_masks[mix_tgt_idx].flatten(1)\n\n        with torch.no_grad():\n            ious = compute_mask_iou(src_masks, target_masks)\n\n        tgt_iou_scores = ious\n        src_iou_scores = src_iou_scores[src_idx]\n        tgt_iou_scores = tgt_iou_scores.flatten(0)\n        src_iou_scores = src_iou_scores.flatten(0)\n\n        loss_objectness = self.loss_obj(src_iou_scores, tgt_iou_scores)\n        loss_dice = self.loss_dice(src_masks, target_masks) / num_instances\n        loss_mask = self.loss_mask(src_masks, target_masks)\n\n        return loss_objectness, loss_dice, loss_mask\n\n    def forward(self, outputs, batch_gt_instances, batch_img_metas,\n                batch_gt_instances_ignore):\n        # Retrieve the matching between the outputs of\n        # the last layer and the targets\n        indices = self.matcher(outputs, batch_gt_instances)\n        # Compute the average number of target boxes\n        # across all nodes, for normalization purposes\n        num_instances = sum(gt.labels.shape[0] for gt in batch_gt_instances)\n        num_instances = torch.as_tensor([num_instances],\n                                        dtype=torch.float,\n                                        device=next(iter(\n                                            outputs.values())).device)\n        num_instances = reduce_mean(num_instances).clamp_(min=1).item()\n        # Compute all the requested losses\n        loss_cls = self.loss_classification(outputs, batch_gt_instances,\n                                            indices, num_instances)\n        loss_obj, loss_dice, loss_mask = self.loss_masks_with_iou_objectness(\n            outputs, batch_gt_instances, indices, num_instances)\n\n        return dict(\n            loss_cls=loss_cls,\n            loss_obj=loss_obj,\n            loss_dice=loss_dice,\n            loss_mask=loss_mask)\n\n\n@TASK_UTILS.register_module()\nclass SparseInstMatcher(nn.Module):\n\n    def __init__(self, alpha=0.8, beta=0.2):\n        super().__init__()\n        self.alpha = alpha\n        self.beta = beta\n        self.mask_score = dice_score\n\n    def forward(self, outputs, batch_gt_instances):\n        with torch.no_grad():\n            B, N, H, W = outputs['pred_masks'].shape\n            pred_masks = outputs['pred_masks']\n            pred_logits = outputs['pred_logits'].sigmoid()\n            device = pred_masks.device\n\n            tgt_ids = torch.cat([gt.labels for gt in batch_gt_instances])\n\n            if tgt_ids.shape[0] == 0:\n                return [(torch.as_tensor([]).to(pred_logits),\n                         torch.as_tensor([]).to(pred_logits))] * B\n            tgt_masks = torch.cat([\n                gt.masks.to_tensor(dtype=pred_masks.dtype, device=device)\n                for gt in batch_gt_instances\n            ])\n\n            tgt_masks = F.interpolate(\n                tgt_masks[:, None],\n                size=pred_masks.shape[-2:],\n                mode='bilinear',\n                align_corners=False).squeeze(1)\n\n            pred_masks = pred_masks.view(B * N, -1)\n            tgt_masks = tgt_masks.flatten(1)\n            with autocast(enabled=False):\n                pred_masks = pred_masks.float()\n                tgt_masks = tgt_masks.float()\n                pred_logits = pred_logits.float()\n                mask_score = self.mask_score(pred_masks, tgt_masks)\n                # Nx(Number of gts)\n                matching_prob = pred_logits.view(B * N, -1)[:, tgt_ids]\n                C = (mask_score**self.alpha) * (matching_prob**self.beta)\n\n            C = C.view(B, N, -1).cpu()\n            # hungarian matching\n            sizes = [len(gt.masks) for gt in batch_gt_instances]\n            indices = [\n                linear_sum_assignment(c[i], maximize=True)\n                for i, c in enumerate(C.split(sizes, -1))\n            ]\n            indices = [(torch.as_tensor(i, dtype=torch.int64),\n                        torch.as_tensor(j, dtype=torch.int64))\n                       for i, j in indices]\n            return indices\n"
  },
  {
    "path": "projects/SparseInst/sparseinst/sparseinst.py",
    "content": "# Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved\nfrom typing import List, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.structures import InstanceData\nfrom torch import Tensor\n\nfrom mmdet.models import BaseDetector\nfrom mmdet.models.utils import unpack_gt_instances\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import OptSampleList, SampleList\nfrom mmdet.utils import ConfigType, OptConfigType\n\n\n@torch.jit.script\ndef rescoring_mask(scores, mask_pred, masks):\n    mask_pred_ = mask_pred.float()\n    return scores * ((masks * mask_pred_).sum([1, 2]) /\n                     (mask_pred_.sum([1, 2]) + 1e-6))\n\n\n@MODELS.register_module()\nclass SparseInst(BaseDetector):\n    \"\"\"Implementation of `SparseInst <https://arxiv.org/abs/1912.02424>`_\n\n    Args:\n        data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n            :class:`DetDataPreprocessor` to process the input data.\n            Defaults to None.\n        backbone (:obj:`ConfigDict` or dict): The backbone module.\n        encoder (:obj:`ConfigDict` or dict): The encoder module.\n        decoder (:obj:`ConfigDict` or dict): The decoder module.\n        criterion (:obj:`ConfigDict` or dict, optional): The training matcher\n            and losses. Defaults to None.\n        test_cfg (:obj:`ConfigDict` or dict, optional): The testing config\n            of SparseInst. Defaults to None.\n        init_cfg (:obj:`ConfigDict` or dict, optional): the config to control\n            the initialization. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 data_preprocessor: ConfigType,\n                 backbone: ConfigType,\n                 encoder: ConfigType,\n                 decoder: ConfigType,\n                 criterion: OptConfigType = None,\n                 test_cfg: OptConfigType = None,\n                 init_cfg: OptConfigType = None):\n        super().__init__(\n            data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n\n        # backbone\n        self.backbone = MODELS.build(backbone)\n        # encoder & decoder\n        self.encoder = MODELS.build(encoder)\n        self.decoder = MODELS.build(decoder)\n\n        # matcher & loss (matcher is built in loss)\n        self.criterion = MODELS.build(criterion)\n\n        # inference\n        self.cls_threshold = test_cfg.score_thr\n        self.mask_threshold = test_cfg.mask_thr_binary\n\n    def _forward(\n            self,\n            batch_inputs: Tensor,\n            batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:\n        \"\"\"Network forward process. Usually includes backbone, neck and head\n        forward without any post-processing.\n\n         Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n\n        Returns:\n            tuple[list]: A tuple of features from ``bbox_head`` forward.\n        \"\"\"\n        x = self.backbone(batch_inputs)\n        x = self.encoder(x)\n        results = self.decoder(x)\n        return results\n\n    def predict(self,\n                batch_inputs: Tensor,\n                batch_data_samples: SampleList,\n                rescale: bool = True) -> SampleList:\n        \"\"\"Predict results from a batch of inputs and data samples with post-\n        processing.\n\n        Args:\n            batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n            batch_data_samples (List[:obj:`DetDataSample`]): The Data\n                Samples. It usually includes information such as\n                `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n            rescale (bool): Whether to rescale the results.\n                Defaults to True.\n\n        Returns:\n            list[:obj:`DetDataSample`]: Detection results of the\n            input images. Each DetDataSample usually contain\n            'pred_instances'. And the ``pred_instances`` usually\n            contains following keys.\n\n                - scores (Tensor): Classification scores, has a shape\n                    (num_instance, )\n                - labels (Tensor): Labels of bboxes, has a shape\n                    (num_instances, ).\n                - bboxes (Tensor): Has a shape (num_instances, 4),\n                    the last dimension 4 arrange as (x1, y1, x2, y2).\n        \"\"\"\n        max_shape = batch_inputs.shape[-2:]\n        output = self._forward(batch_inputs)\n\n        pred_scores = output['pred_logits'].sigmoid()\n        pred_masks = output['pred_masks'].sigmoid()\n        pred_objectness = output['pred_scores'].sigmoid()\n        pred_scores = torch.sqrt(pred_scores * pred_objectness)\n\n        results_list = []\n        for batch_idx, (scores_per_image, mask_pred_per_image,\n                        datasample) in enumerate(\n                            zip(pred_scores, pred_masks, batch_data_samples)):\n            result = InstanceData()\n            # max/argmax\n            scores, labels = scores_per_image.max(dim=-1)\n            # cls threshold\n            keep = scores > self.cls_threshold\n            scores = scores[keep]\n            labels = labels[keep]\n            mask_pred_per_image = mask_pred_per_image[keep]\n\n            if scores.size(0) == 0:\n                result.scores = scores\n                result.labels = labels\n                results_list.append(result)\n                continue\n\n            img_meta = datasample.metainfo\n            # rescoring mask using maskness\n            scores = rescoring_mask(scores,\n                                    mask_pred_per_image > self.mask_threshold,\n                                    mask_pred_per_image)\n            h, w = img_meta['img_shape'][:2]\n            mask_pred_per_image = F.interpolate(\n                mask_pred_per_image.unsqueeze(1),\n                size=max_shape,\n                mode='bilinear',\n                align_corners=False)[:, :, :h, :w]\n\n            if rescale:\n                ori_h, ori_w = img_meta['ori_shape'][:2]\n                mask_pred_per_image = F.interpolate(\n                    mask_pred_per_image,\n                    size=(ori_h, ori_w),\n                    mode='bilinear',\n                    align_corners=False).squeeze(1)\n\n            mask_pred = mask_pred_per_image > self.mask_threshold\n            result.masks = mask_pred\n            result.scores = scores\n            result.labels = labels\n            # create an empty bbox in InstanceData to avoid bugs when\n            # calculating metrics.\n            result.bboxes = result.scores.new_zeros(len(scores), 4)\n            results_list.append(result)\n\n        batch_data_samples = self.add_pred_to_datasample(\n            batch_data_samples, results_list)\n        return batch_data_samples\n\n    def loss(self, batch_inputs: Tensor,\n             batch_data_samples: SampleList) -> Union[dict, list]:\n        \"\"\"Calculate losses from a batch of inputs and data samples.\n\n        Args:\n            batch_inputs (Tensor): Input images of shape (N, C, H, W).\n                These should usually be mean centered and std scaled.\n            batch_data_samples (list[:obj:`DetDataSample`]): The batch\n                data samples. It usually includes information such\n                as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n        Returns:\n            dict: A dictionary of loss components.\n        \"\"\"\n        outs = self._forward(batch_inputs)\n        (batch_gt_instances, batch_gt_instances_ignore,\n         batch_img_metas) = unpack_gt_instances(batch_data_samples)\n\n        losses = self.criterion(outs, batch_gt_instances, batch_img_metas,\n                                batch_gt_instances_ignore)\n        return losses\n\n    def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n        \"\"\"Extract features.\n\n        Args:\n            batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).\n\n        Returns:\n            tuple[Tensor]: Multi-level features that may have\n            different resolutions.\n        \"\"\"\n        x = self.backbone(batch_inputs)\n        x = self.encoder(x)\n        return x\n"
  },
  {
    "path": "projects/example_project/README.md",
    "content": "# Dummy ResNet Wrapper\n\nThis is an example README for community `projects/`. We have provided detailed explanations for each field in the form of html comments, which are visible when you read the source of this README file. If you wish to submit your project to our main repository, then all the fields in this README are mandatory for others to understand what you have achieved in this implementation. For more details, read our [contribution guide](https://mmdetection.readthedocs.io/en/3.x/notes/contribution_guide.html) or approach us in [Discussions](https://github.com/open-mmlab/mmdetection/discussions).\n\n## Description\n\n<!-- Share any information you would like others to know. For example:\nAuthor: @xxx.\nThis is an implementation of \\[XXX\\]. -->\n\nThis project implements a dummy ResNet wrapper, which literally does nothing new but prints \"hello world\" during initialization.\n\n## Usage\n\n<!-- For a typical model, this section should contain the commands for training and testing. You are also suggested to dump your environment specification to env.yml by `conda env export > env.yml`. -->\n\n### Training commands\n\nIn MMDetection's root directory, run the following command to train the model:\n\n```bash\npython tools/train.py projects/example_project/configs/faster-rcnn_dummy-resnet_fpn_1x_coco.py\n```\n\nFor multi-gpu training, run:\n\n```bash\npython -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=${NUM_GPUS} --master_port=29506 --master_addr=\"127.0.0.1\" tools/train.py projects/example_project/configs/faster-rcnn_dummy-resnet_fpn_1x_coco.py\n```\n\n### Testing commands\n\nIn MMDetection's root directory, run the following command to test the model:\n\n```bash\npython tools/test.py projects/example_project/configs/faster-rcnn_dummy-resnet_fpn_1x_coco.py ${CHECKPOINT_PATH}\n```\n\n## Results\n\n<!-- List the results as usually done in other model's README. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/README.md#results-and-models)\nYou should claim whether this is based on the pre-trained weights, which are converted from the official release; or it's a reproduced result obtained from retraining the model in this project. -->\n\n|                                Method                                 |  Backbone   | Pretrained Model |  Training set  |   Test set   | #epoch | box AP |         Download         |\n| :-------------------------------------------------------------------: | :---------: | :--------------: | :------------: | :----------: | :----: | :----: | :----------------------: |\n| [Faster R-CNN dummy](configs/faster-rcnn_dummy-resnet_fpn_1x_coco.py) | DummyResNet |        -         | COCO2017 Train | COCO2017 Val |   12   | 0.8853 | [model](<>) \\| [log](<>) |\n\n## Citation\n\n<!-- You may remove this section if not applicable. -->\n\n```latex\n@article{Ren_2017,\n   title={Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks},\n   journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n   publisher={Institute of Electrical and Electronics Engineers (IEEE)},\n   author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian},\n   year={2017},\n   month={Jun},\n}\n```\n\n## Checklist\n\n<!-- Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. The PIC (person in charge) or contributors of this project should check all the items that they believe have been finished, which will further be verified by codebase maintainers via a PR.\nOpenMMLab's maintainer will review the code to ensure the project's quality. Reaching the first milestone means that this project suffices the minimum requirement of being merged into 'projects/'. But this project is only eligible to become a part of the core package upon attaining the last milestone.\nNote that keeping this section up-to-date is crucial not only for this project's developers but the entire community, since there might be some other contributors joining this project and deciding their starting point from this list. It also helps maintainers accurately estimate time and effort on further code polishing, if needed.\nA project does not necessarily have to be finished in a single PR, but it's essential for the project to at least reach the first milestone in its very first PR. -->\n\n- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`.\n\n  - [ ] Finish the code\n\n    <!-- The code's design shall follow existing interfaces and convention. For example, each model component should be registered into `mmdet.registry.MODELS` and configurable via a config file. -->\n\n  - [ ] Basic docstrings & proper citation\n\n    <!-- Each major object should contain a docstring, describing its functionality and arguments. If you have adapted the code from other open-source projects, don't forget to cite the source project in docstring and make sure your behavior is not against its license. Typically, we do not accept any code snippet under GPL license. [A Short Guide to Open Source Licenses](https://medium.com/nationwide-technology/a-short-guide-to-open-source-licenses-cf5b1c329edd) -->\n\n  - [ ] Test-time correctness\n\n    <!-- If you are reproducing the result from a paper, make sure your model's inference-time performance matches that in the original paper. The weights usually could be obtained by simply renaming the keys in the official pre-trained weights. This test could be skipped though, if you are able to prove the training-time correctness and check the second milestone. -->\n\n  - [ ] A full README\n\n    <!-- As this template does. -->\n\n- [ ] Milestone 2: Indicates a successful model implementation.\n\n  - [ ] Training-time correctness\n\n    <!-- If you are reproducing the result from a paper, checking this item means that you should have trained your model from scratch based on the original paper's specification and verified that the final result matches the report within a minor error range. -->\n\n- [ ] Milestone 3: Good to be a part of our core package!\n\n  - [ ] Type hints and docstrings\n\n    <!-- Ideally *all* the methods should have [type hints](https://www.pythontutorial.net/python-basics/python-type-hints/) and [docstrings](https://google.github.io/styleguide/pyguide.html#381-docstrings). [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/mmdet/datasets/transforms/transforms.py#L41-L169) -->\n\n  - [ ] Unit tests\n\n    <!-- Unit tests for each module are required. [Example](https://github.com/open-mmlab/mmdetection/blob/5b0d5b40d5c6cfda906db7464ca22cbd4396728a/tests/test_datasets/test_transforms/test_transforms.py#L35-L88) -->\n\n  - [ ] Code polishing\n\n    <!-- Refactor your code according to reviewer's comment. -->\n\n  - [ ] Metafile.yml\n\n    <!-- It will be parsed by MIM and Inferencer. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/metafile.yml) -->\n\n- [ ] Move your modules into the core package following the codebase's file hierarchy structure.\n\n  <!-- In particular, you may have to refactor this README into a standard one. [Example](https://github.com/open-mmlab/mmdetection/blob/3.x/configs/faster_rcnn/README.md) -->\n\n- [ ] Refactor your modules into the core package following the codebase's file hierarchy structure.\n"
  },
  {
    "path": "projects/example_project/configs/faster-rcnn_dummy-resnet_fpn_1x_coco.py",
    "content": "_base_ = ['../../../configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py']\n\ncustom_imports = dict(imports=['projects.example_project.dummy'])\n\n_base_.model.backbone.type = 'DummyResNet'\n"
  },
  {
    "path": "projects/example_project/dummy/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .dummy_resnet import DummyResNet\n\n__all__ = ['DummyResNet']\n"
  },
  {
    "path": "projects/example_project/dummy/dummy_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.backbones import ResNet\nfrom mmdet.registry import MODELS\n\n\n@MODELS.register_module()\nclass DummyResNet(ResNet):\n    \"\"\"Implements a dummy ResNet wrapper for demonstration purpose.\n    Args:\n        **kwargs: All the arguments are passed to the parent class.\n    \"\"\"\n\n    def __init__(self, **kwargs) -> None:\n        print('Hello world!')\n        super().__init__(**kwargs)\n"
  },
  {
    "path": "pytest.ini",
    "content": "[pytest]\naddopts = --xdoctest --xdoctest-style=auto\nnorecursedirs = .git ignore build __pycache__ data docker docs .eggs\n\nfilterwarnings= default\n                ignore:.*No cfgstr given in Cacher constructor or call.*:Warning\n                ignore:.*Define the __nice__ method for.*:Warning\n"
  },
  {
    "path": "requirements/albu.txt",
    "content": "albumentations>=0.3.2 --no-binary qudida,albumentations\n"
  },
  {
    "path": "requirements/build.txt",
    "content": "# These must be installed before building mmdetection\ncython\nnumpy\n"
  },
  {
    "path": "requirements/docs.txt",
    "content": "docutils==0.16.0\nmyst-parser\n-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme\nsphinx==4.0.2\nsphinx-copybutton\nsphinx_markdown_tables\nsphinx_rtd_theme==0.5.2\n"
  },
  {
    "path": "requirements/mminstall.txt",
    "content": "mmcv>=2.0.0rc4,<2.1.0\nmmengine>=0.4.0,<1.0.0\n"
  },
  {
    "path": "requirements/optional.txt",
    "content": "cityscapesscripts\nimagecorruptions\nscikit-learn\n"
  },
  {
    "path": "requirements/readthedocs.txt",
    "content": "mmcv>=2.0.0rc1,<2.1.0\nmmengine>=0.1.0,<1.0.0\nscipy\ntorch\ntorchvision\n"
  },
  {
    "path": "requirements/runtime.txt",
    "content": "matplotlib\nnumpy\npycocotools\nscipy\nsix\nterminaltables\n"
  },
  {
    "path": "requirements/tests.txt",
    "content": "asynctest\ncityscapesscripts\ncodecov\nflake8\nimagecorruptions\ninstaboostfast\ninterrogate\nisort==4.3.21\n# Note: used for kwarray.group_items, this may be ported to mmcv in the future.\nkwarray\nmemory_profiler\n-e git+https://github.com/open-mmlab/mmtracking@dev-1.x#egg=mmtrack\nonnx==1.7.0\nonnxruntime>=1.8.0\nparameterized\nprotobuf<=3.20.1\npsutil\npytest\nubelt\nxdoctest>=0.10.0\nyapf\n"
  },
  {
    "path": "requirements.txt",
    "content": "-r requirements/build.txt\n-r requirements/optional.txt\n-r requirements/runtime.txt\n"
  },
  {
    "path": "setup.cfg",
    "content": "[isort]\nline_length = 79\nmulti_line_output = 0\nextra_standard_library = setuptools\nknown_first_party = mmdet\nknown_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,mmengine,numpy,onnx,onnxruntime,pycocotools,parameterized,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml\nno_lines_before = STDLIB,LOCALFOLDER\ndefault_section = THIRDPARTY\n\n[yapf]\nBASED_ON_STYLE = pep8\nBLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true\nSPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true\n\n# ignore-words-list needs to be lowercase format. For example, if we want to\n# ignore word \"BA\", then we need to append \"ba\" to ignore-words-list rather\n# than \"BA\"\n[codespell]\nskip = *.ipynb\nquiet-level = 3\nignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood,ba,warmup,nam,DOTA,dota\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python\n# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport os.path as osp\nimport platform\nimport shutil\nimport sys\nimport warnings\nfrom setuptools import find_packages, setup\n\nimport torch\nfrom torch.utils.cpp_extension import (BuildExtension, CppExtension,\n                                       CUDAExtension)\n\n\ndef readme():\n    with open('README.md', encoding='utf-8') as f:\n        content = f.read()\n    return content\n\n\nversion_file = 'mmdet/version.py'\n\n\ndef get_version():\n    with open(version_file, 'r') as f:\n        exec(compile(f.read(), version_file, 'exec'))\n    return locals()['__version__']\n\n\ndef make_cuda_ext(name, module, sources, sources_cuda=[]):\n\n    define_macros = []\n    extra_compile_args = {'cxx': []}\n\n    if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':\n        define_macros += [('WITH_CUDA', None)]\n        extension = CUDAExtension\n        extra_compile_args['nvcc'] = [\n            '-D__CUDA_NO_HALF_OPERATORS__',\n            '-D__CUDA_NO_HALF_CONVERSIONS__',\n            '-D__CUDA_NO_HALF2_OPERATORS__',\n        ]\n        sources += sources_cuda\n    else:\n        print(f'Compiling {name} without CUDA')\n        extension = CppExtension\n\n    return extension(\n        name=f'{module}.{name}',\n        sources=[os.path.join(*module.split('.'), p) for p in sources],\n        define_macros=define_macros,\n        extra_compile_args=extra_compile_args)\n\n\ndef parse_requirements(fname='requirements.txt', with_version=True):\n    \"\"\"Parse the package dependencies listed in a requirements file but strips\n    specific versioning information.\n\n    Args:\n        fname (str): path to requirements file\n        with_version (bool, default=False): if True include version specs\n\n    Returns:\n        List[str]: list of requirements items\n\n    CommandLine:\n        python -c \"import setup; print(setup.parse_requirements())\"\n    \"\"\"\n    import re\n    import sys\n    from os.path import exists\n    require_fpath = fname\n\n    def parse_line(line):\n        \"\"\"Parse information from a line in a requirements text file.\"\"\"\n        if line.startswith('-r '):\n            # Allow specifying requirements in other files\n            target = line.split(' ')[1]\n            for info in parse_require_file(target):\n                yield info\n        else:\n            info = {'line': line}\n            if line.startswith('-e '):\n                info['package'] = line.split('#egg=')[1]\n            elif '@git+' in line:\n                info['package'] = line\n            else:\n                # Remove versioning from the package\n                pat = '(' + '|'.join(['>=', '==', '>']) + ')'\n                parts = re.split(pat, line, maxsplit=1)\n                parts = [p.strip() for p in parts]\n\n                info['package'] = parts[0]\n                if len(parts) > 1:\n                    op, rest = parts[1:]\n                    if ';' in rest:\n                        # Handle platform specific dependencies\n                        # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies\n                        version, platform_deps = map(str.strip,\n                                                     rest.split(';'))\n                        info['platform_deps'] = platform_deps\n                    else:\n                        version = rest  # NOQA\n                    info['version'] = (op, version)\n            yield info\n\n    def parse_require_file(fpath):\n        with open(fpath, 'r') as f:\n            for line in f.readlines():\n                line = line.strip()\n                if line and not line.startswith('#'):\n                    for info in parse_line(line):\n                        yield info\n\n    def gen_packages_items():\n        if exists(require_fpath):\n            for info in parse_require_file(require_fpath):\n                parts = [info['package']]\n                if with_version and 'version' in info:\n                    parts.extend(info['version'])\n                if not sys.version.startswith('3.4'):\n                    # apparently package_deps are broken in 3.4\n                    platform_deps = info.get('platform_deps')\n                    if platform_deps is not None:\n                        parts.append(';' + platform_deps)\n                item = ''.join(parts)\n                yield item\n\n    packages = list(gen_packages_items())\n    return packages\n\n\ndef add_mim_extension():\n    \"\"\"Add extra files that are required to support MIM into the package.\n\n    These files will be added by creating a symlink to the originals if the\n    package is installed in `editable` mode (e.g. pip install -e .), or by\n    copying from the originals otherwise.\n    \"\"\"\n\n    # parse installment mode\n    if 'develop' in sys.argv:\n        # installed by `pip install -e .`\n        if platform.system() == 'Windows':\n            # set `copy` mode here since symlink fails on Windows.\n            mode = 'copy'\n        else:\n            mode = 'symlink'\n    elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:\n        # installed by `pip install .`\n        # or create source distribution by `python setup.py sdist`\n        mode = 'copy'\n    else:\n        return\n\n    filenames = ['tools', 'configs', 'demo', 'model-index.yml']\n    repo_path = osp.dirname(__file__)\n    mim_path = osp.join(repo_path, 'mmdet', '.mim')\n    os.makedirs(mim_path, exist_ok=True)\n\n    for filename in filenames:\n        if osp.exists(filename):\n            src_path = osp.join(repo_path, filename)\n            tar_path = osp.join(mim_path, filename)\n\n            if osp.isfile(tar_path) or osp.islink(tar_path):\n                os.remove(tar_path)\n            elif osp.isdir(tar_path):\n                shutil.rmtree(tar_path)\n\n            if mode == 'symlink':\n                src_relpath = osp.relpath(src_path, osp.dirname(tar_path))\n                os.symlink(src_relpath, tar_path)\n            elif mode == 'copy':\n                if osp.isfile(src_path):\n                    shutil.copyfile(src_path, tar_path)\n                elif osp.isdir(src_path):\n                    shutil.copytree(src_path, tar_path)\n                else:\n                    warnings.warn(f'Cannot copy file {src_path}.')\n            else:\n                raise ValueError(f'Invalid mode {mode}')\n\n\nif __name__ == '__main__':\n    add_mim_extension()\n    setup(\n        name='mmdet',\n        version=get_version(),\n        description='OpenMMLab Detection Toolbox and Benchmark',\n        long_description=readme(),\n        long_description_content_type='text/markdown',\n        author='MMDetection Contributors',\n        author_email='openmmlab@gmail.com',\n        keywords='computer vision, object detection',\n        url='https://github.com/open-mmlab/mmdetection',\n        packages=find_packages(exclude=('configs', 'tools', 'demo')),\n        include_package_data=True,\n        classifiers=[\n            'Development Status :: 5 - Production/Stable',\n            'License :: OSI Approved :: Apache Software License',\n            'Operating System :: OS Independent',\n            'Programming Language :: Python :: 3',\n            'Programming Language :: Python :: 3.7',\n            'Programming Language :: Python :: 3.8',\n            'Programming Language :: Python :: 3.9',\n        ],\n        license='Apache License 2.0',\n        install_requires=parse_requirements('requirements/runtime.txt'),\n        extras_require={\n            'all': parse_requirements('requirements.txt'),\n            'tests': parse_requirements('requirements/tests.txt'),\n            'build': parse_requirements('requirements/build.txt'),\n            'optional': parse_requirements('requirements/optional.txt'),\n            'mim': parse_requirements('requirements/mminstall.txt'),\n        },\n        ext_modules=[],\n        cmdclass={'build_ext': BuildExtension},\n        zip_safe=False)\n"
  },
  {
    "path": "tests/test_apis/test_det_inferencer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport tempfile\nfrom unittest import TestCase, mock\nfrom unittest.mock import Mock, patch\n\nimport mmcv\nimport mmengine\nimport numpy as np\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.utils import is_list_of\nfrom parameterized import parameterized\n\nfrom mmdet.apis import DetInferencer\nfrom mmdet.evaluation.functional import get_classes\nfrom mmdet.structures import DetDataSample\n\n\nclass TestDetInferencer(TestCase):\n\n    @mock.patch('mmengine.infer.infer._load_checkpoint', return_value=None)\n    def test_init(self, mock):\n        # init from metafile\n        DetInferencer('rtmdet-t')\n        # init from cfg\n        DetInferencer('configs/yolox/yolox_tiny_8xb8-300e_coco.py')\n\n    def assert_predictions_equal(self, preds1, preds2):\n        for pred1, pred2 in zip(preds1, preds2):\n            if 'bboxes' in pred1:\n                self.assertTrue(\n                    np.allclose(pred1['bboxes'], pred2['bboxes'], 0.1))\n            if 'scores' in pred1:\n                self.assertTrue(\n                    np.allclose(pred1['scores'], pred2['scores'], 0.1))\n            if 'labels' in pred1:\n                self.assertTrue(np.allclose(pred1['labels'], pred2['labels']))\n            if 'panoptic_seg_path' in pred1:\n                self.assertTrue(\n                    pred1['panoptic_seg_path'] == pred2['panoptic_seg_path'])\n\n    @parameterized.expand([\n        'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'\n    ])\n    def test_call(self, model):\n        # single img\n        img_path = 'tests/data/color.jpg'\n\n        mock_load = Mock(return_value=None)\n        with patch('mmengine.infer.infer._load_checkpoint', mock_load):\n            inferencer = DetInferencer(model)\n\n        # In the case of not loading the pretrained weight, the category\n        # defaults to COCO 80, so it needs to be replaced.\n        if model == 'panoptic_fpn_r50_fpn_1x_coco':\n            inferencer.visualizer.dataset_meta = {\n                'classes': get_classes('coco_panoptic'),\n                'palette': 'random'\n            }\n\n        res_path = inferencer(img_path, return_vis=True)\n        # ndarray\n        img = mmcv.imread(img_path)\n        res_ndarray = inferencer(img, return_vis=True)\n        self.assert_predictions_equal(res_path['predictions'],\n                                      res_ndarray['predictions'])\n        self.assertIn('visualization', res_path)\n        self.assertIn('visualization', res_ndarray)\n\n        # multiple images\n        img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']\n        res_path = inferencer(img_paths, return_vis=True)\n        # list of ndarray\n        imgs = [mmcv.imread(p) for p in img_paths]\n        res_ndarray = inferencer(imgs, return_vis=True)\n        self.assert_predictions_equal(res_path['predictions'],\n                                      res_ndarray['predictions'])\n        self.assertIn('visualization', res_path)\n        self.assertIn('visualization', res_ndarray)\n\n        # img dir, test different batch sizes\n        img_dir = 'tests/data/VOCdevkit/VOC2007/JPEGImages/'\n        res_bs1 = inferencer(img_dir, batch_size=1, return_vis=True)\n        res_bs3 = inferencer(img_dir, batch_size=3, return_vis=True)\n        self.assert_predictions_equal(res_bs1['predictions'],\n                                      res_bs3['predictions'])\n\n        # There is a jitter operation when the mask is drawn,\n        # so it cannot be asserted.\n        if model == 'rtmdet-t':\n            for res_bs1_vis, res_bs3_vis in zip(res_bs1['visualization'],\n                                                res_bs3['visualization']):\n                self.assertTrue(np.allclose(res_bs1_vis, res_bs3_vis))\n\n    @parameterized.expand([\n        'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'\n    ])\n    def test_visualize(self, model):\n        img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']\n\n        mock_load = Mock(return_value=None)\n        with patch('mmengine.infer.infer._load_checkpoint', mock_load):\n            inferencer = DetInferencer(model)\n\n        # In the case of not loading the pretrained weight, the category\n        # defaults to COCO 80, so it needs to be replaced.\n        if model == 'panoptic_fpn_r50_fpn_1x_coco':\n            inferencer.visualizer.dataset_meta = {\n                'classes': get_classes('coco_panoptic'),\n                'palette': 'random'\n            }\n\n        with tempfile.TemporaryDirectory() as tmp_dir:\n            inferencer(img_paths, out_dir=tmp_dir)\n            for img_dir in ['color.jpg', 'gray.jpg']:\n                self.assertTrue(osp.exists(osp.join(tmp_dir, 'vis', img_dir)))\n\n    @parameterized.expand([\n        'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'\n    ])\n    def test_postprocess(self, model):\n        # return_datasample\n        img_path = 'tests/data/color.jpg'\n\n        mock_load = Mock(return_value=None)\n        with patch('mmengine.infer.infer._load_checkpoint', mock_load):\n            inferencer = DetInferencer(model)\n\n        # In the case of not loading the pretrained weight, the category\n        # defaults to COCO 80, so it needs to be replaced.\n        if model == 'panoptic_fpn_r50_fpn_1x_coco':\n            inferencer.visualizer.dataset_meta = {\n                'classes': get_classes('coco_panoptic'),\n                'palette': 'random'\n            }\n\n        res = inferencer(img_path, return_datasample=True)\n        self.assertTrue(is_list_of(res['predictions'], DetDataSample))\n\n        with tempfile.TemporaryDirectory() as tmp_dir:\n            res = inferencer(img_path, out_dir=tmp_dir, no_save_pred=False)\n            dumped_res = mmengine.load(\n                osp.join(tmp_dir, 'preds', 'color.json'))\n            self.assertEqual(res['predictions'][0], dumped_res)\n\n    @mock.patch('mmengine.infer.infer._load_checkpoint', return_value=None)\n    def test_pred2dict(self, mock):\n        data_sample = DetDataSample()\n        data_sample.pred_instances = InstanceData()\n\n        data_sample.pred_instances.bboxes = np.array([[0, 0, 1, 1]])\n        data_sample.pred_instances.labels = np.array([0])\n        data_sample.pred_instances.scores = torch.FloatTensor([0.9])\n        res = DetInferencer('rtmdet-t').pred2dict(data_sample)\n        self.assertListAlmostEqual(res['bboxes'], [[0, 0, 1, 1]])\n        self.assertListAlmostEqual(res['labels'], [0])\n        self.assertListAlmostEqual(res['scores'], [0.9])\n\n    def assertListAlmostEqual(self, list1, list2, places=7):\n        for i in range(len(list1)):\n            if isinstance(list1[i], list):\n                self.assertListAlmostEqual(list1[i], list2[i], places=places)\n            else:\n                self.assertAlmostEqual(list1[i], list2[i], places=places)\n"
  },
  {
    "path": "tests/test_apis/test_inference.py",
    "content": "import os\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmdet.apis import inference_detector, init_detector\nfrom mmdet.structures import DetDataSample\nfrom mmdet.utils import register_all_modules\n\n# TODO: Waiting to fix multiple call error bug\nregister_all_modules()\n\n\n@pytest.mark.parametrize('config,devices',\n                         [('configs/retinanet/retinanet_r18_fpn_1x_coco.py',\n                           ('cpu', 'cuda'))])\ndef test_init_detector(config, devices):\n    assert all([device in ['cpu', 'cuda'] for device in devices])\n\n    project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n    project_dir = os.path.join(project_dir, '..')\n\n    config_file = os.path.join(project_dir, config)\n\n    # test init_detector with config_file: str and cfg_options\n    cfg_options = dict(\n        model=dict(\n            backbone=dict(\n                depth=18,\n                init_cfg=dict(\n                    type='Pretrained', checkpoint='torchvision://resnet18'))))\n\n    for device in devices:\n        if device == 'cuda' and not torch.cuda.is_available():\n            pytest.skip('test requires GPU and torch+cuda')\n\n        model = init_detector(\n            config_file, device=device, cfg_options=cfg_options)\n\n        # test init_detector with :obj:`Path`\n        config_path_object = Path(config_file)\n        model = init_detector(config_path_object, device=device)\n\n        # test init_detector with undesirable type\n        with pytest.raises(TypeError):\n            config_list = [config_file]\n            model = init_detector(config_list)  # noqa: F841\n\n\n@pytest.mark.parametrize('config,devices',\n                         [('configs/retinanet/retinanet_r18_fpn_1x_coco.py',\n                           ('cpu', 'cuda'))])\ndef test_inference_detector(config, devices):\n    assert all([device in ['cpu', 'cuda'] for device in devices])\n\n    project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n    project_dir = os.path.join(project_dir, '..')\n\n    config_file = os.path.join(project_dir, config)\n\n    # test init_detector with config_file: str and cfg_options\n    rng = np.random.RandomState(0)\n    img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)\n    img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)\n\n    for device in devices:\n        if device == 'cuda' and not torch.cuda.is_available():\n            pytest.skip('test requires GPU and torch+cuda')\n\n        model = init_detector(config_file, device=device)\n        result = inference_detector(model, img1)\n        assert isinstance(result, DetDataSample)\n        result = inference_detector(model, [img1, img2])\n        assert isinstance(result, list) and len(result) == 2\n"
  },
  {
    "path": "tests/test_datasets/test_cityscapes.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport unittest\n\nfrom mmengine.fileio import dump\n\nfrom mmdet.datasets import CityscapesDataset\n\n\nclass TestCityscapesDataset(unittest.TestCase):\n\n    def setUp(self) -> None:\n        image1 = {\n            'file_name': 'munster/munster_000102_000019_leftImg8bit.png',\n            'height': 1024,\n            'width': 2048,\n            'segm_file': 'munster/munster_000102_000019_gtFine_labelIds.png',\n            'id': 0\n        }\n        image2 = {\n            'file_name': 'munster/munster_000157_000019_leftImg8bit.png',\n            'height': 1024,\n            'width': 2048,\n            'segm_file': 'munster/munster_000157_000019_gtFine_labelIds.png',\n            'id': 1\n        }\n        image3 = {\n            'file_name': 'munster/munster_000139_000019_leftImg8bit.png',\n            'height': 1024,\n            'width': 2048,\n            'segm_file': 'munster/munster_000139_000019_gtFine_labelIds.png',\n            'id': 2\n        }\n        image4 = {\n            'file_name': 'munster/munster_000034_000019_leftImg8bit.png',\n            'height': 31,\n            'width': 15,\n            'segm_file': 'munster/munster_000034_000019_gtFine_labelIds.png',\n            'id': 3\n        }\n\n        images = [image1, image2, image3, image4]\n\n        categories = [{\n            'id': 24,\n            'name': 'person'\n        }, {\n            'id': 25,\n            'name': 'rider'\n        }, {\n            'id': 26,\n            'name': 'car'\n        }]\n\n        annotations = [\n            {\n                'iscrowd': 0,\n                'category_id': 24,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': 2595,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 0,\n                'id': 0\n            },\n            {\n                'iscrowd': 0,\n                'category_id': 25,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': -1,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 0,\n                'id': 1\n            },\n            {\n                'iscrowd': 0,\n                'category_id': 26,\n                'bbox': [379.0, 435.0, -1, 124.0],\n                'area': 2,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 0,\n                'id': 2\n            },\n            {\n                'iscrowd': 0,\n                'category_id': 24,\n                'bbox': [379.0, 435.0, 52.0, -1],\n                'area': 2,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 0,\n                'id': 3\n            },\n            {\n                'iscrowd': 0,\n                'category_id': 1,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': 2595,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 0,\n                'id': 4\n            },\n            {\n                'iscrowd': 1,\n                'category_id': 26,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': 2595,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 1,\n                'id': 5\n            },\n            {\n                'iscrowd': 0,\n                'category_id': 26,\n                'bbox': [379.0, 435.0, 10, 2],\n                'area': 2595,\n                'segmentation': {\n                    'size': [1024, 2048],\n                    'counts': 'xxx'\n                },\n                'image_id': 3,\n                'id': 6\n            },\n        ]\n        fake_json = {\n            'images': images,\n            'annotations': annotations,\n            'categories': categories\n        }\n        self.json_name = 'cityscapes.json'\n        dump(fake_json, self.json_name)\n\n        self.metainfo = dict(classes=('person', 'rider', 'car'))\n\n    def tearDown(self):\n        os.remove(self.json_name)\n\n    def test_cityscapes_dataset(self):\n        dataset = CityscapesDataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 1)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n        dataset = CityscapesDataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            test_mode=True,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n    def test_cityscapes_dataset_without_filter_cfg(self):\n        dataset = CityscapesDataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            filter_cfg=None,\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n        dataset = CityscapesDataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            test_mode=True,\n            filter_cfg=None,\n            pipeline=[])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n"
  },
  {
    "path": "tests/test_datasets/test_coco.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nfrom mmdet.datasets import CocoDataset\n\n\nclass TestCocoDataset(unittest.TestCase):\n\n    def test_coco_dataset(self):\n        # test CocoDataset\n        metainfo = dict(classes=('bus', 'car'), task_name='new_task')\n        dataset = CocoDataset(\n            data_prefix=dict(img='imgs'),\n            ann_file='tests/data/coco_sample.json',\n            metainfo=metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[],\n            serialize_data=False,\n            lazy_init=False)\n        self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))\n        self.assertEqual(dataset.metainfo['task_name'], 'new_task')\n        self.assertListEqual(dataset.get_cat_ids(0), [0, 1])\n\n    def test_coco_dataset_without_filter_cfg(self):\n        # test CocoDataset without filter_cfg\n        dataset = CocoDataset(\n            data_prefix=dict(img='imgs'),\n            ann_file='tests/data/coco_sample.json',\n            pipeline=[])\n        self.assertEqual(len(dataset), 4)\n\n        # test with test_mode = True\n        dataset = CocoDataset(\n            data_prefix=dict(img='imgs'),\n            ann_file='tests/data/coco_sample.json',\n            test_mode=True,\n            pipeline=[])\n        self.assertEqual(len(dataset), 4)\n\n    def test_coco_annotation_ids_unique(self):\n        # test annotation ids not unique error\n        metainfo = dict(classes=('car', ), task_name='new_task')\n        with self.assertRaisesRegex(AssertionError, 'are not unique!'):\n            CocoDataset(\n                data_prefix=dict(img='imgs'),\n                ann_file='tests/data/coco_wrong_format_sample.json',\n                metainfo=metainfo,\n                pipeline=[])\n"
  },
  {
    "path": "tests/test_datasets/test_coco_api_wrapper.py",
    "content": "import os.path as osp\nimport tempfile\nimport unittest\n\nfrom mmengine.fileio import dump\n\nfrom mmdet.datasets.api_wrappers import COCOPanoptic\n\n\nclass TestCOCOPanoptic(unittest.TestCase):\n\n    def setUp(self):\n        self.tmp_dir = tempfile.TemporaryDirectory()\n\n    def tearDown(self):\n        self.tmp_dir.cleanup()\n\n    def test_create_index(self):\n        ann_json = {'test': ['test', 'createIndex']}\n        annotation_file = osp.join(self.tmp_dir.name, 'createIndex.json')\n        dump(ann_json, annotation_file)\n        COCOPanoptic(annotation_file)\n\n    def test_load_anns(self):\n        categories = [{\n            'id': 0,\n            'name': 'person',\n            'supercategory': 'person',\n            'isthing': 1\n        }]\n\n        images = [{\n            'id': 0,\n            'width': 80,\n            'height': 60,\n            'file_name': 'fake_name1.jpg',\n        }]\n\n        annotations = [{\n            'segments_info': [\n                {\n                    'id': 1,\n                    'category_id': 0,\n                    'area': 400,\n                    'bbox': [10, 10, 10, 40],\n                    'iscrowd': 0\n                },\n            ],\n            'file_name':\n            'fake_name1.png',\n            'image_id':\n            0\n        }]\n\n        ann_json = {\n            'images': images,\n            'annotations': annotations,\n            'categories': categories,\n        }\n\n        annotation_file = osp.join(self.tmp_dir.name, 'load_anns.json')\n        dump(ann_json, annotation_file)\n\n        api = COCOPanoptic(annotation_file)\n        api.load_anns(1)\n\n        self.assertIsNone(api.load_anns(0.1))\n"
  },
  {
    "path": "tests/test_datasets/test_coco_panoptic.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport unittest\n\nfrom mmengine.fileio import dump\n\nfrom mmdet.datasets import CocoPanopticDataset\n\n\nclass TestCocoPanopticDataset(unittest.TestCase):\n\n    def setUp(self):\n        image1 = {\n            'id': 0,\n            'width': 640,\n            'height': 640,\n            'file_name': 'fake_name1.jpg',\n        }\n\n        image2 = {\n            'id': 1,\n            'width': 640,\n            'height': 800,\n            'file_name': 'fake_name2.jpg',\n        }\n\n        image3 = {\n            'id': 2,\n            'width': 31,\n            'height': 40,\n            'file_name': 'fake_name3.jpg',\n        }\n\n        image4 = {\n            'id': 3,\n            'width': 400,\n            'height': 400,\n            'file_name': 'fake_name4.jpg',\n        }\n        images = [image1, image2, image3, image4]\n\n        annotations = [\n            {\n                'segments_info': [{\n                    'id': 1,\n                    'category_id': 0,\n                    'area': 400,\n                    'bbox': [50, 60, 20, 20],\n                    'iscrowd': 0\n                }, {\n                    'id': 2,\n                    'category_id': 1,\n                    'area': 900,\n                    'bbox': [100, 120, 30, 30],\n                    'iscrowd': 0\n                }, {\n                    'id': 3,\n                    'category_id': 2,\n                    'iscrowd': 0,\n                    'bbox': [1, 189, 612, 285],\n                    'area': 70036\n                }],\n                'file_name':\n                'fake_name1.jpg',\n                'image_id':\n                0\n            },\n            {\n                'segments_info': [\n                    {\n                        # Different to instance style json, there\n                        # are duplicate ids in panoptic style json\n                        'id': 1,\n                        'category_id': 0,\n                        'area': 400,\n                        'bbox': [50, 60, 20, 20],\n                        'iscrowd': 0\n                    },\n                    {\n                        'id': 4,\n                        'category_id': 1,\n                        'area': 900,\n                        'bbox': [100, 120, 30, 30],\n                        'iscrowd': 1\n                    },\n                    {\n                        'id': 5,\n                        'category_id': 2,\n                        'iscrowd': 0,\n                        'bbox': [100, 200, 200, 300],\n                        'area': 66666\n                    },\n                    {\n                        'id': 6,\n                        'category_id': 0,\n                        'iscrowd': 0,\n                        'bbox': [1, 189, -10, 285],\n                        'area': -2\n                    },\n                    {\n                        'id': 10,\n                        'category_id': 0,\n                        'iscrowd': 0,\n                        'bbox': [1, 189, 10, -285],\n                        'area': 100\n                    }\n                ],\n                'file_name':\n                'fake_name2.jpg',\n                'image_id':\n                1\n            },\n            {\n                'segments_info': [{\n                    'id': 7,\n                    'category_id': 0,\n                    'area': 25,\n                    'bbox': [0, 0, 5, 5],\n                    'iscrowd': 0\n                }],\n                'file_name':\n                'fake_name3.jpg',\n                'image_id':\n                2\n            },\n            {\n                'segments_info': [{\n                    'id': 8,\n                    'category_id': 0,\n                    'area': 25,\n                    'bbox': [0, 0, 400, 400],\n                    'iscrowd': 1\n                }],\n                'file_name':\n                'fake_name4.jpg',\n                'image_id':\n                3\n            }\n        ]\n\n        categories = [{\n            'id': 0,\n            'name': 'car',\n            'supercategory': 'car',\n            'isthing': 1\n        }, {\n            'id': 1,\n            'name': 'person',\n            'supercategory': 'person',\n            'isthing': 1\n        }, {\n            'id': 2,\n            'name': 'wall',\n            'supercategory': 'wall',\n            'isthing': 0\n        }]\n\n        fake_json = {\n            'images': images,\n            'annotations': annotations,\n            'categories': categories\n        }\n        self.json_name = 'coco_panoptic.json'\n        dump(fake_json, self.json_name)\n\n        self.metainfo = dict(\n            classes=('person', 'car', 'wall'),\n            thing_classes=('person', 'car'),\n            stuff_classes=('wall', ))\n\n    def tearDown(self):\n        os.remove(self.json_name)\n\n    def test_coco_panoptic_dataset(self):\n        dataset = CocoPanopticDataset(\n            data_root='./',\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs', seg='seg'),\n            metainfo=self.metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        self.assertEqual(dataset.metainfo['thing_classes'],\n                         self.metainfo['thing_classes'])\n        self.assertEqual(dataset.metainfo['stuff_classes'],\n                         self.metainfo['stuff_classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 2)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n        # test mode\n        dataset = CocoPanopticDataset(\n            data_root='./',\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs', seg='seg'),\n            metainfo=self.metainfo,\n            test_mode=True,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        self.assertEqual(dataset.metainfo['thing_classes'],\n                         self.metainfo['thing_classes'])\n        self.assertEqual(dataset.metainfo['stuff_classes'],\n                         self.metainfo['stuff_classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n    def test_coco_panoptic_dataset_without_filter_cfg(self):\n        dataset = CocoPanopticDataset(\n            data_root='./',\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs', seg='seg'),\n            metainfo=self.metainfo,\n            filter_cfg=None,\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        self.assertEqual(dataset.metainfo['thing_classes'],\n                         self.metainfo['thing_classes'])\n        self.assertEqual(dataset.metainfo['stuff_classes'],\n                         self.metainfo['stuff_classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n        # test mode\n        dataset = CocoPanopticDataset(\n            data_root='./',\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs', seg='seg'),\n            metainfo=self.metainfo,\n            filter_cfg=None,\n            test_mode=True,\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        self.assertEqual(dataset.metainfo['thing_classes'],\n                         self.metainfo['thing_classes'])\n        self.assertEqual(dataset.metainfo['stuff_classes'],\n                         self.metainfo['stuff_classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n"
  },
  {
    "path": "tests/test_datasets/test_crowdhuman.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nfrom mmdet.datasets import CrowdHumanDataset\n\n\nclass TestCrowdHumanDataset(unittest.TestCase):\n\n    def test_crowdhuman_init(self):\n        dataset = CrowdHumanDataset(\n            data_root='tests/data/crowdhuman_dataset/',\n            ann_file='test_annotation_train.odgt',\n            data_prefix=dict(img='Images/'),\n            pipeline=[])\n        self.assertEqual(len(dataset), 1)\n        self.assertEqual(dataset.metainfo['classes'], ('person', ))\n"
  },
  {
    "path": "tests/test_datasets/test_lvis.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport unittest\n\nfrom mmengine.fileio import dump\n\nfrom mmdet.datasets import LVISV1Dataset, LVISV05Dataset\n\ntry:\n    import lvis\nexcept ImportError:\n    lvis = None\n\n\nclass TestLVISDataset(unittest.TestCase):\n\n    def setUp(self) -> None:\n\n        image1 = {\n            # ``coco_url`` for v1 only.\n            'coco_url': 'http://images.cocodataset.org/train2017/0.jpg',\n            # ``file_name`` for v0.5 only.\n            'file_name': '0.jpg',\n            'height': 1024,\n            'width': 2048,\n            'neg_category_ids': [],\n            'not_exhaustive_category_ids': [],\n            'id': 0\n        }\n        image2 = {\n            'coco_url': 'http://images.cocodataset.org/train2017/1.jpg',\n            'file_name': '1.jpg',\n            'height': 1024,\n            'width': 2048,\n            'neg_category_ids': [],\n            'not_exhaustive_category_ids': [],\n            'id': 1\n        }\n        image3 = {\n            'coco_url': 'http://images.cocodataset.org/train2017/2.jpg',\n            'file_name': '2.jpg',\n            'height': 1024,\n            'width': 2048,\n            'neg_category_ids': [],\n            'not_exhaustive_category_ids': [],\n            'id': 2\n        }\n        image4 = {\n            'coco_url': 'http://images.cocodataset.org/train2017/3.jpg',\n            'file_name': '3.jpg',\n            'height': 31,\n            'width': 15,\n            'neg_category_ids': [],\n            'not_exhaustive_category_ids': [],\n            'id': 3\n        }\n\n        images = [image1, image2, image3, image4]\n\n        categories = [{\n            'id': 1,\n            'name': 'aerosol_can',\n            'frequency': 'c',\n            'image_count': 64\n        }, {\n            'id': 2,\n            'name': 'air_conditioner',\n            'frequency': 'f',\n            'image_count': 364\n        }, {\n            'id': 3,\n            'name': 'airplane',\n            'frequency': 'f',\n            'image_count': 1911\n        }]\n\n        annotations = [\n            {\n                'category_id': 1,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': 2595,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 0,\n                'id': 0\n            },\n            {\n                'category_id': 2,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': -1,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 0,\n                'id': 1\n            },\n            {\n                'category_id': 3,\n                'bbox': [379.0, 435.0, -1, 124.0],\n                'area': 2,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 0,\n                'id': 2\n            },\n            {\n                'category_id': 1,\n                'bbox': [379.0, 435.0, 52.0, -1],\n                'area': 2,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 0,\n                'id': 3\n            },\n            {\n                'category_id': 1,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': 2595,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 0,\n                'id': 4\n            },\n            {\n                'category_id': 3,\n                'bbox': [379.0, 435.0, 52.0, 124.0],\n                'area': 2595,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 1,\n                'id': 5\n            },\n            {\n                'category_id': 3,\n                'bbox': [379.0, 435.0, 10, 2],\n                'area': 2595,\n                'segmentation': [[0.0, 0.0]],\n                'image_id': 3,\n                'id': 6\n            },\n        ]\n        fake_json = {\n            'images': images,\n            'annotations': annotations,\n            'categories': categories\n        }\n        self.json_name = 'lvis.json'\n        dump(fake_json, self.json_name)\n\n        self.metainfo = dict(\n            classes=('aerosol_can', 'air_conditioner', 'airplane'))\n\n    def tearDown(self):\n        os.remove(self.json_name)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_lvis05_dataset(self):\n        dataset = LVISV05Dataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 2)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n        dataset = LVISV05Dataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            test_mode=True,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_lvis1_dataset(self):\n        dataset = LVISV1Dataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 2)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n        dataset = LVISV1Dataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            test_mode=True,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_lvis1_dataset_without_filter_cfg(self):\n        dataset = LVISV1Dataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            filter_cfg=None,\n            pipeline=[])\n        self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n\n        dataset = LVISV1Dataset(\n            ann_file=self.json_name,\n            data_prefix=dict(img='imgs'),\n            metainfo=self.metainfo,\n            test_mode=True,\n            filter_cfg=None,\n            pipeline=[])\n        dataset.full_init()\n        # filter images of small size and images\n        # with all illegal annotations\n        self.assertEqual(len(dataset), 4)\n        self.assertEqual(len(dataset.load_data_list()), 4)\n"
  },
  {
    "path": "tests/test_datasets/test_objects365.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nfrom mmdet.datasets import Objects365V1Dataset, Objects365V2Dataset\n\n\nclass TestObjects365V1Dataset(unittest.TestCase):\n\n    def test_obj365v1_dataset(self):\n        # test Objects365V1Dataset\n        metainfo = dict(classes=('bus', 'car'), task_name='new_task')\n        dataset = Objects365V1Dataset(\n            data_prefix=dict(img='imgs'),\n            ann_file='tests/data/coco_sample.json',\n            metainfo=metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[],\n            serialize_data=False,\n            lazy_init=False)\n        self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))\n        self.assertEqual(dataset.metainfo['task_name'], 'new_task')\n        self.assertListEqual(dataset.get_cat_ids(0), [0, 1])\n        self.assertEqual(dataset.cat_ids, [1, 2])\n\n    def test_obj365v1_with_unsorted_annotation(self):\n        # test Objects365V1Dataset with unsorted annotations\n        metainfo = dict(classes=('bus', 'car'), task_name='new_task')\n        dataset = Objects365V1Dataset(\n            data_prefix=dict(img='imgs'),\n            ann_file='tests/data/Objects365/unsorted_obj365_sample.json',\n            metainfo=metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[],\n            serialize_data=False,\n            lazy_init=False)\n        self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))\n        self.assertEqual(dataset.metainfo['task_name'], 'new_task')\n        # sort the unsorted annotations\n        self.assertListEqual(dataset.get_cat_ids(0), [0, 1])\n        self.assertEqual(dataset.cat_ids, [1, 2])\n\n    def test_obj365v1_annotation_ids_unique(self):\n        # test annotation ids not unique error\n        metainfo = dict(classes=('car', ), task_name='new_task')\n        with self.assertRaisesRegex(AssertionError, 'are not unique!'):\n            Objects365V1Dataset(\n                data_prefix=dict(img='imgs'),\n                ann_file='tests/data/coco_wrong_format_sample.json',\n                metainfo=metainfo,\n                pipeline=[])\n\n\nclass TestObjects365V2Dataset(unittest.TestCase):\n\n    def test_obj365v2_dataset(self):\n        # test Objects365V2Dataset\n        metainfo = dict(classes=('bus', 'car'), task_name='new_task')\n        dataset = Objects365V2Dataset(\n            data_prefix=dict(img='imgs'),\n            ann_file='tests/data/coco_sample.json',\n            metainfo=metainfo,\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[],\n            serialize_data=False,\n            lazy_init=False)\n        self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))\n        self.assertEqual(dataset.metainfo['task_name'], 'new_task')\n        self.assertListEqual(dataset.get_cat_ids(0), [0, 1])\n        self.assertEqual(dataset.cat_ids, [1, 2])\n\n    def test_obj365v1_annotation_ids_unique(self):\n        # test annotation ids not unique error\n        metainfo = dict(classes=('car', ), task_name='new_task')\n        with self.assertRaisesRegex(AssertionError, 'are not unique!'):\n            Objects365V2Dataset(\n                data_prefix=dict(img='imgs'),\n                ann_file='tests/data/coco_wrong_format_sample.json',\n                metainfo=metainfo,\n                pipeline=[])\n"
  },
  {
    "path": "tests/test_datasets/test_openimages.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nfrom mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset\n\n\nclass TestOpenImagesDataset(unittest.TestCase):\n\n    def test_init(self):\n        dataset = OpenImagesDataset(\n            data_root='tests/data/OpenImages/',\n            ann_file='annotations/oidv6-train-annotations-bbox.csv',\n            data_prefix=dict(img='OpenImages/train/'),\n            label_file='annotations/class-descriptions-boxable.csv',\n            hierarchy_file='annotations/bbox_labels_600_hierarchy.json',\n            meta_file='annotations/image-metas.pkl',\n            pipeline=[])\n        dataset.full_init()\n        self.assertEqual(len(dataset), 1)\n        self.assertEqual(dataset.metainfo['classes'], ['Airplane'])\n\n\nclass TestOpenImagesChallengeDataset(unittest.TestCase):\n\n    def test_init(self):\n        dataset = OpenImagesChallengeDataset(\n            data_root='tests/data/OpenImages/',\n            ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',\n            data_prefix=dict(img='OpenImages/train/'),\n            label_file='challenge2019/cls-label-description.csv',\n            hierarchy_file='challenge2019/class_label_tree.np',\n            meta_file='annotations/image-metas.pkl',\n            pipeline=[])\n        dataset.full_init()\n        self.assertEqual(len(dataset), 1)\n        self.assertEqual(dataset.metainfo['classes'], ['Airplane'])\n"
  },
  {
    "path": "tests/test_datasets/test_pascal_voc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nfrom mmdet.datasets import VOCDataset\n\n\nclass TestVOCDataset(unittest.TestCase):\n\n    def test_voc2007_init(self):\n        dataset = VOCDataset(\n            data_root='tests/data/VOCdevkit/',\n            ann_file='VOC2007/ImageSets/Main/trainval.txt',\n            data_prefix=dict(sub_data_root='VOC2007/'),\n            filter_cfg=dict(\n                filter_empty_gt=True, min_size=32, bbox_min_size=32),\n            pipeline=[])\n        dataset.full_init()\n        self.assertEqual(len(dataset), 1)\n\n        data_list = dataset.load_data_list()\n        self.assertEqual(len(data_list), 1)\n        self.assertEqual(len(data_list[0]['instances']), 2)\n        self.assertEqual(dataset.get_cat_ids(0), [11, 14])\n\n    def test_voc2012_init(self):\n        dataset = VOCDataset(\n            data_root='tests/data/VOCdevkit/',\n            ann_file='VOC2012/ImageSets/Main/trainval.txt',\n            data_prefix=dict(sub_data_root='VOC2012/'),\n            filter_cfg=dict(filter_empty_gt=True, min_size=32),\n            pipeline=[])\n        dataset.full_init()\n        self.assertEqual(len(dataset), 1)\n\n        data_list = dataset.load_data_list()\n        self.assertEqual(len(data_list), 1)\n        self.assertEqual(len(data_list[0]['instances']), 1)\n        self.assertEqual(dataset.get_cat_ids(0), [18])\n"
  },
  {
    "path": "tests/test_datasets/test_samplers/test_batch_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom mmengine.dataset import DefaultSampler\nfrom torch.utils.data import Dataset\n\nfrom mmdet.datasets.samplers import AspectRatioBatchSampler\n\n\nclass DummyDataset(Dataset):\n\n    def __init__(self, length):\n        self.length = length\n        self.shapes = np.random.random((length, 2))\n\n    def __len__(self):\n        return self.length\n\n    def __getitem__(self, idx):\n        return self.shapes[idx]\n\n    def get_data_info(self, idx):\n        return dict(width=self.shapes[idx][0], height=self.shapes[idx][1])\n\n\nclass TestAspectRatioBatchSampler(TestCase):\n\n    @patch('mmengine.dist.get_dist_info', return_value=(0, 1))\n    def setUp(self, mock):\n        self.length = 100\n        self.dataset = DummyDataset(self.length)\n        self.sampler = DefaultSampler(self.dataset, shuffle=False)\n\n    def test_invalid_inputs(self):\n        with self.assertRaisesRegex(\n                ValueError, 'batch_size should be a positive integer value'):\n            AspectRatioBatchSampler(self.sampler, batch_size=-1)\n\n        with self.assertRaisesRegex(\n                TypeError, 'sampler should be an instance of ``Sampler``'):\n            AspectRatioBatchSampler(None, batch_size=1)\n\n    def test_divisible_batch(self):\n        batch_size = 5\n        batch_sampler = AspectRatioBatchSampler(\n            self.sampler, batch_size=batch_size, drop_last=True)\n        self.assertEqual(len(batch_sampler), self.length // batch_size)\n        for batch_idxs in batch_sampler:\n            self.assertEqual(len(batch_idxs), batch_size)\n            batch = [self.dataset[idx] for idx in batch_idxs]\n            flag = batch[0][0] < batch[0][1]\n            for i in range(1, batch_size):\n                self.assertEqual(batch[i][0] < batch[i][1], flag)\n\n    def test_indivisible_batch(self):\n        batch_size = 7\n        batch_sampler = AspectRatioBatchSampler(\n            self.sampler, batch_size=batch_size, drop_last=False)\n        all_batch_idxs = list(batch_sampler)\n        self.assertEqual(\n            len(batch_sampler), (self.length + batch_size - 1) // batch_size)\n        self.assertEqual(\n            len(all_batch_idxs), (self.length + batch_size - 1) // batch_size)\n\n        batch_sampler = AspectRatioBatchSampler(\n            self.sampler, batch_size=batch_size, drop_last=True)\n        all_batch_idxs = list(batch_sampler)\n        self.assertEqual(len(batch_sampler), self.length // batch_size)\n        self.assertEqual(len(all_batch_idxs), self.length // batch_size)\n\n        # the last batch may not have the same aspect ratio\n        for batch_idxs in all_batch_idxs[:-1]:\n            self.assertEqual(len(batch_idxs), batch_size)\n            batch = [self.dataset[idx] for idx in batch_idxs]\n            flag = batch[0][0] < batch[0][1]\n            for i in range(1, batch_size):\n                self.assertEqual(batch[i][0] < batch[i][1], flag)\n"
  },
  {
    "path": "tests/test_datasets/test_samplers/test_multi_source_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\nimport bisect\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom torch.utils.data import ConcatDataset, Dataset\n\nfrom mmdet.datasets.samplers import GroupMultiSourceSampler, MultiSourceSampler\n\n\nclass DummyDataset(Dataset):\n\n    def __init__(self, length, flag):\n        self.length = length\n        self.flag = flag\n        self.shapes = np.random.random((length, 2))\n\n    def __len__(self):\n        return self.length\n\n    def __getitem__(self, idx):\n        return self.shapes[idx]\n\n    def get_data_info(self, idx):\n        return dict(\n            width=self.shapes[idx][0],\n            height=self.shapes[idx][1],\n            flag=self.flag)\n\n\nclass DummyConcatDataset(ConcatDataset):\n\n    def _get_ori_dataset_idx(self, idx):\n        dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)\n        sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[\n            dataset_idx - 1]\n        return dataset_idx, sample_idx\n\n    def get_data_info(self, idx: int):\n        dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)\n        return self.datasets[dataset_idx].get_data_info(sample_idx)\n\n\nclass TestMultiSourceSampler(TestCase):\n\n    @patch('mmengine.dist.get_dist_info', return_value=(7, 8))\n    def setUp(self, mock):\n        self.length_a = 100\n        self.dataset_a = DummyDataset(self.length_a, flag='a')\n        self.length_b = 1000\n        self.dataset_b = DummyDataset(self.length_b, flag='b')\n        self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])\n\n    def test_multi_source_sampler(self):\n        # test dataset is not ConcatDataset\n        with self.assertRaises(AssertionError):\n            MultiSourceSampler(\n                self.dataset_a, batch_size=5, source_ratio=[1, 4])\n        # test invalid batch_size\n        with self.assertRaises(AssertionError):\n            MultiSourceSampler(\n                self.dataset_a, batch_size=-5, source_ratio=[1, 4])\n        # test source_ratio longer then dataset\n        with self.assertRaises(AssertionError):\n            MultiSourceSampler(\n                self.dataset, batch_size=5, source_ratio=[1, 2, 4])\n        sampler = MultiSourceSampler(\n            self.dataset, batch_size=5, source_ratio=[1, 4])\n        sampler = iter(sampler)\n        flags = []\n        for i in range(100):\n            idx = next(sampler)\n            flags.append(self.dataset.get_data_info(idx)['flag'])\n        flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20\n        self.assertEqual(flags, flags_gt)\n\n\nclass TestGroupMultiSourceSampler(TestCase):\n\n    @patch('mmengine.dist.get_dist_info', return_value=(7, 8))\n    def setUp(self, mock):\n        self.length_a = 100\n        self.dataset_a = DummyDataset(self.length_a, flag='a')\n        self.length_b = 1000\n        self.dataset_b = DummyDataset(self.length_b, flag='b')\n        self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])\n\n    def test_group_multi_source_sampler(self):\n        sampler = GroupMultiSourceSampler(\n            self.dataset, batch_size=5, source_ratio=[1, 4])\n        sampler = iter(sampler)\n        flags = []\n        groups = []\n        for i in range(100):\n            idx = next(sampler)\n            data_info = self.dataset.get_data_info(idx)\n            flags.append(data_info['flag'])\n            group = 0 if data_info['width'] < data_info['height'] else 1\n            groups.append(group)\n        flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20\n        self.assertEqual(flags, flags_gt)\n        groups = set(\n            [sum(x) for x in (groups[k:k + 5] for k in range(0, 100, 5))])\n        groups_gt = set([0, 5])\n        self.assertEqual(groups, groups_gt)\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .utils import construct_toy_data, create_full_masks, create_random_bboxes\n\n__all__ = ['create_random_bboxes', 'create_full_masks', 'construct_toy_data']\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_augment_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport unittest\n\nfrom mmdet.datasets.transforms import (AutoAugment, AutoContrast, Brightness,\n                                       Color, Contrast, Equalize, Invert,\n                                       Posterize, RandAugment, Rotate,\n                                       Sharpness, ShearX, ShearY, Solarize,\n                                       SolarizeAdd, TranslateX, TranslateY)\nfrom mmdet.utils import register_all_modules\nfrom .utils import check_result_same, construct_toy_data\n\nregister_all_modules()\n\n\nclass TestAutoAugment(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map',\n                           'homography_matrix')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.img_fill_val = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_autoaugment(self):\n        # test AutoAugment equipped with Shear\n        policies = [[\n            dict(type='ShearX', prob=1.0, level=3, reversal_prob=0.0),\n            dict(type='ShearY', prob=1.0, level=7, reversal_prob=1.0)\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_shearx = ShearX(prob=1.0, level=3, reversal_prob=0.0)\n        transform_sheary = ShearY(prob=1.0, level=7, reversal_prob=1.0)\n        results_sheared = transform_sheary(\n            transform_shearx(copy.deepcopy(self.results_mask)))\n        check_result_same(results_sheared, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Rotate\n        policies = [[\n            dict(type='Rotate', prob=1.0, level=10, reversal_prob=0.0),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_rotate = Rotate(prob=1.0, level=10, reversal_prob=0.0)\n        results_rotated = transform_rotate(copy.deepcopy(self.results_mask))\n        check_result_same(results_rotated, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Translate\n        policies = [[\n            dict(\n                type='TranslateX',\n                prob=1.0,\n                level=10,\n                max_mag=1.0,\n                reversal_prob=0.0),\n            dict(\n                type='TranslateY',\n                prob=1.0,\n                level=10,\n                max_mag=1.0,\n                reversal_prob=1.0)\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_translatex = TranslateX(\n            prob=1.0, level=10, max_mag=1.0, reversal_prob=0.0)\n        transform_translatey = TranslateY(\n            prob=1.0, level=10, max_mag=1.0, reversal_prob=1.0)\n        results_translated = transform_translatey(\n            transform_translatex(copy.deepcopy(self.results_mask)))\n        check_result_same(results_translated, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Brightness\n        policies = [[\n            dict(type='Brightness', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_brightness = Brightness(prob=1.0, level=3)\n        results_brightness = transform_brightness(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_brightness, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Color\n        policies = [[\n            dict(type='Color', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_color = Color(prob=1.0, level=3)\n        results_colored = transform_color(copy.deepcopy(self.results_mask))\n        check_result_same(results_colored, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Contrast\n        policies = [[\n            dict(type='Contrast', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_contrast = Contrast(prob=1.0, level=3)\n        results_contrasted = transform_contrast(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_contrasted, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Sharpness\n        policies = [[\n            dict(type='Sharpness', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_sharpness = Sharpness(prob=1.0, level=3)\n        results_sharpness = transform_sharpness(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_sharpness, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Solarize\n        policies = [[\n            dict(type='Solarize', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_solarize = Solarize(prob=1.0, level=3)\n        results_solarized = transform_solarize(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_solarized, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with SolarizeAdd\n        policies = [[\n            dict(type='SolarizeAdd', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_solarizeadd = SolarizeAdd(prob=1.0, level=3)\n        results_solarizeadded = transform_solarizeadd(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_solarizeadded, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Posterize\n        policies = [[\n            dict(type='Posterize', prob=1.0, level=3),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_posterize = Posterize(prob=1.0, level=3)\n        results_posterized = transform_posterize(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_posterized, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Equalize\n        policies = [[\n            dict(type='Equalize', prob=1.0),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_equalize = Equalize(prob=1.0)\n        results_equalized = transform_equalize(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_equalized, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with AutoContrast\n        policies = [[\n            dict(type='AutoContrast', prob=1.0),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_autocontrast = AutoContrast(prob=1.0)\n        results_autocontrast = transform_autocontrast(\n            copy.deepcopy(self.results_mask))\n        check_result_same(results_autocontrast, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with Invert\n        policies = [[\n            dict(type='Invert', prob=1.0),\n        ]]\n        transform_auto = AutoAugment(policies=policies)\n        results_auto = transform_auto(copy.deepcopy(self.results_mask))\n        transform_invert = Invert(prob=1.0)\n        results_inverted = transform_invert(copy.deepcopy(self.results_mask))\n        check_result_same(results_inverted, results_auto, self.check_keys)\n\n        # test AutoAugment equipped with default policies\n        transform_auto = AutoAugment()\n        transform_auto(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        policies = [[\n            dict(type='Rotate', prob=1.0, level=10, reversal_prob=0.0),\n            dict(type='Invert', prob=1.0),\n        ]]\n        transform = AutoAugment(policies=policies)\n        self.assertEqual(\n            repr(transform), ('AutoAugment('\n                              'policies=[['\n                              \"{'type': 'Rotate', 'prob': 1.0, \"\n                              \"'level': 10, 'reversal_prob': 0.0}, \"\n                              \"{'type': 'Invert', 'prob': 1.0}]], \"\n                              'prob=None)'))\n\n\nclass TestRandAugment(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map',\n                           'homography_matrix')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.img_fill_val = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_randaugment(self):\n        # test RandAugment equipped with Rotate\n        aug_space = [[\n            dict(type='Rotate', prob=1.0, level=10, reversal_prob=0.0)\n        ]]\n        transform_rand = RandAugment(aug_space=aug_space, aug_num=1)\n        results_rand = transform_rand(copy.deepcopy(self.results_mask))\n        transform_rotate = Rotate(prob=1.0, level=10, reversal_prob=0.0)\n        results_rotated = transform_rotate(copy.deepcopy(self.results_mask))\n        check_result_same(results_rotated, results_rand, self.check_keys)\n\n        # test RandAugment equipped with default augmentation space\n        transform_rand = RandAugment()\n        transform_rand(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        aug_space = [\n            [dict(type='Rotate')],\n            [dict(type='Invert')],\n        ]\n        transform = RandAugment(aug_space=aug_space)\n        self.assertEqual(\n            repr(transform), ('RandAugment('\n                              'aug_space=['\n                              \"[{'type': 'Rotate'}], \"\n                              \"[{'type': 'Invert'}]], \"\n                              'aug_num=2, '\n                              'prob=None)'))\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_colorspace.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport unittest\n\nfrom mmdet.datasets.transforms import (AutoContrast, Brightness, Color,\n                                       ColorTransform, Contrast, Equalize,\n                                       Invert, Posterize, Sharpness, Solarize,\n                                       SolarizeAdd)\nfrom .utils import check_result_same, construct_toy_data\n\n\nclass TestColorTransform(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_colortransform(self):\n        # test assertion for invalid value of level\n        with self.assertRaises(AssertionError):\n            transform = ColorTransform(level=-1)\n\n        # test assertion for invalid prob\n        with self.assertRaises(AssertionError):\n            transform = ColorTransform(level=1, prob=-0.5)\n\n        # test case when no translation is called (prob=0)\n        transform = ColorTransform(prob=0.0, level=10)\n        results_wo_color = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_color, self.check_keys)\n\n    def test_repr(self):\n        transform = ColorTransform(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('ColorTransform(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestColor(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_color(self):\n        # test case when level=5 (without color aug)\n        transform = Color(prob=1.0, level=5)\n        results_wo_color = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_color, self.check_keys)\n        # test case when level=0\n        transform = Color(prob=1.0, level=0)\n        transform(copy.deepcopy(self.results_mask))\n        # test case when level=10\n        transform = Color(prob=1.0, level=10)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Color(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('Color(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestBrightness(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_brightness(self):\n        # test case when level=5 (without Brightness aug)\n        transform = Brightness(level=5, prob=1.0)\n        results_wo_brightness = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_brightness,\n                          self.check_keys)\n        # test case when level=0\n        transform = Brightness(prob=1.0, level=0)\n        transform(copy.deepcopy(self.results_mask))\n        # test case when level=10\n        transform = Brightness(prob=1.0, level=10)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Brightness(prob=1.0, level=10)\n        self.assertEqual(\n            repr(transform), ('Brightness(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestContrast(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_contrast(self):\n        # test case when level=5 (without Contrast aug)\n        transform = Contrast(prob=1.0, level=5)\n        results_wo_contrast = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_contrast,\n                          self.check_keys)\n        # test case when level=0\n        transform = Contrast(prob=1.0, level=0)\n        transform(copy.deepcopy(self.results_mask))\n        # test case when level=10\n        transform = Contrast(prob=1.0, level=10)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Contrast(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('Contrast(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestSharpness(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_sharpness(self):\n        # test case when level=5 (without Sharpness aug)\n        transform = Sharpness(prob=1.0, level=5)\n        results_wo_sharpness = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_sharpness,\n                          self.check_keys)\n        # test case when level=0\n        transform = Sharpness(prob=1.0, level=0)\n        transform(copy.deepcopy(self.results_mask))\n        # test case when level=10\n        transform = Sharpness(prob=1.0, level=10)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Sharpness(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('Sharpness(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestSolarize(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_solarize(self):\n        # test case when level=10 (without Solarize aug)\n        transform = Solarize(prob=1.0, level=10)\n        results_wo_solarize = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_solarize,\n                          self.check_keys)\n        # test case when level=0\n        transform = Solarize(prob=1.0, level=0)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Solarize(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('Solarize(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.0, '\n                              'max_mag=256.0)'))\n\n\nclass TestSolarizeAdd(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_solarize(self):\n        # test case when level=0 (without Solarize aug)\n        transform = SolarizeAdd(prob=1.0, level=0)\n        results_wo_solarizeadd = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_solarizeadd,\n                          self.check_keys)\n        # test case when level=10\n        transform = SolarizeAdd(prob=1.0, level=10)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = SolarizeAdd(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('SolarizeAdd(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.0, '\n                              'max_mag=110.0)'))\n\n\nclass TestPosterize(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_posterize(self):\n        # test case when level=10 (without Posterize aug)\n        transform = Posterize(prob=1.0, level=10, max_mag=8.0)\n        results_wo_posterize = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_posterize,\n                          self.check_keys)\n        # test case when level=0\n        transform = Posterize(prob=1.0, level=0)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Posterize(level=10, prob=1.)\n        self.assertEqual(\n            repr(transform), ('Posterize(prob=1.0, '\n                              'level=10, '\n                              'min_mag=0.0, '\n                              'max_mag=4.0)'))\n\n\nclass TestEqualize(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_equalize(self):\n        # test case when no translation is called (prob=0)\n        transform = Equalize(prob=0.0)\n        results_wo_equalize = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_equalize,\n                          self.check_keys)\n        # test case when translation is called\n        transform = Equalize(prob=1.0)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Equalize(prob=1.0)\n        self.assertEqual(\n            repr(transform), ('Equalize(prob=1.0, '\n                              'level=None, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestAutoContrast(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_autocontrast(self):\n        # test case when no translation is called (prob=0)\n        transform = AutoContrast(prob=0.0)\n        results_wo_autocontrast = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_autocontrast,\n                          self.check_keys)\n        # test case when translation is called\n        transform = AutoContrast(prob=1.0)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = AutoContrast(prob=1.0)\n        self.assertEqual(\n            repr(transform), ('AutoContrast(prob=1.0, '\n                              'level=None, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n\n\nclass TestInvert(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n\n    def test_invert(self):\n        # test case when no translation is called (prob=0)\n        transform = Invert(prob=0.0)\n        results_wo_invert = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_invert,\n                          self.check_keys)\n        # test case when translation is called\n        transform = Invert(prob=1.0)\n        transform(copy.deepcopy(self.results_mask))\n\n    def test_repr(self):\n        transform = Invert(prob=1.0)\n        self.assertEqual(\n            repr(transform), ('Invert(prob=1.0, '\n                              'level=None, '\n                              'min_mag=0.1, '\n                              'max_mag=1.9)'))\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_formatting.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nimport unittest\n\nimport numpy as np\nimport torch\nfrom mmengine.structures import InstanceData, PixelData\n\nfrom mmdet.datasets.transforms import PackDetInputs\nfrom mmdet.structures import DetDataSample\nfrom mmdet.structures.mask import BitmapMasks\n\n\nclass TestPackDetInputs(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        data_prefix = osp.join(osp.dirname(__file__), '../../data')\n        img_path = osp.join(data_prefix, 'color.jpg')\n        rng = np.random.RandomState(0)\n        self.results1 = {\n            'img_id': 1,\n            'img_path': img_path,\n            'ori_shape': (300, 400),\n            'img_shape': (600, 800),\n            'scale_factor': 2.0,\n            'flip': False,\n            'img': rng.rand(300, 400),\n            'gt_seg_map': rng.rand(300, 400),\n            'gt_masks':\n            BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),\n            'gt_bboxes_labels': rng.rand(3, ),\n            'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),\n            'proposals': rng.rand(2, 4),\n            'proposals_scores': rng.rand(2, )\n        }\n        self.results2 = {\n            'img_id': 1,\n            'img_path': img_path,\n            'ori_shape': (300, 400),\n            'img_shape': (600, 800),\n            'scale_factor': 2.0,\n            'flip': False,\n            'img': rng.rand(300, 400),\n            'gt_seg_map': rng.rand(300, 400),\n            'gt_masks':\n            BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),\n            'gt_bboxes_labels': rng.rand(3, ),\n            'proposals': rng.rand(2, 4),\n            'proposals_scores': rng.rand(2, )\n        }\n        self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',\n                          'flip')\n\n    def test_transform(self):\n        transform = PackDetInputs(meta_keys=self.meta_keys)\n        results = transform(copy.deepcopy(self.results1))\n        self.assertIn('data_samples', results)\n        self.assertIsInstance(results['data_samples'], DetDataSample)\n        self.assertIsInstance(results['data_samples'].gt_instances,\n                              InstanceData)\n        self.assertIsInstance(results['data_samples'].ignored_instances,\n                              InstanceData)\n        self.assertEqual(len(results['data_samples'].gt_instances), 2)\n        self.assertEqual(len(results['data_samples'].ignored_instances), 1)\n        self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)\n        self.assertIsInstance(results['data_samples'].proposals, InstanceData)\n        self.assertEqual(len(results['data_samples'].proposals), 2)\n        self.assertIsInstance(results['data_samples'].proposals.bboxes,\n                              torch.Tensor)\n        self.assertIsInstance(results['data_samples'].proposals.scores,\n                              torch.Tensor)\n\n    def test_transform_without_ignore(self):\n        transform = PackDetInputs(meta_keys=self.meta_keys)\n        results = transform(copy.deepcopy(self.results2))\n        self.assertIn('data_samples', results)\n        self.assertIsInstance(results['data_samples'], DetDataSample)\n        self.assertIsInstance(results['data_samples'].gt_instances,\n                              InstanceData)\n        self.assertIsInstance(results['data_samples'].ignored_instances,\n                              InstanceData)\n        self.assertEqual(len(results['data_samples'].gt_instances), 3)\n        self.assertEqual(len(results['data_samples'].ignored_instances), 0)\n        self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)\n        self.assertIsInstance(results['data_samples'].proposals, InstanceData)\n        self.assertEqual(len(results['data_samples'].proposals), 2)\n        self.assertIsInstance(results['data_samples'].proposals.bboxes,\n                              torch.Tensor)\n        self.assertIsInstance(results['data_samples'].proposals.scores,\n                              torch.Tensor)\n\n    def test_repr(self):\n        transform = PackDetInputs(meta_keys=self.meta_keys)\n        self.assertEqual(\n            repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_geometric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport unittest\n\nimport numpy as np\n\nfrom mmdet.datasets.transforms import (GeomTransform, Rotate, ShearX, ShearY,\n                                       TranslateX, TranslateY)\nfrom mmdet.structures.bbox import HorizontalBoxes\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\nfrom .utils import check_result_same, construct_toy_data\n\n\nclass TestGeomTransform(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.img_border_value = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_geomtransform(self):\n        # test assertion for invalid prob\n        with self.assertRaises(AssertionError):\n            transform = GeomTransform(\n                prob=-0.5, level=1, min_mag=0.0, max_mag=1.0)\n\n        # test assertion for invalid value of level\n        with self.assertRaises(AssertionError):\n            transform = GeomTransform(\n                prob=0.5, level=-1, min_mag=0.0, max_mag=1.0)\n\n        # test assertion for invalid value of min_mag and max_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearX(prob=0.5, level=2, min_mag=1.0, max_mag=0.0)\n\n        # test assertion for the num of elements in tuple img_border_value\n        with self.assertRaises(AssertionError):\n            transform = GeomTransform(\n                prob=0.5,\n                level=1,\n                min_mag=0.0,\n                max_mag=1.0,\n                img_border_value=(128, 128, 128, 128))\n\n        # test ValueError for invalid type of img_border_value\n        with self.assertRaises(ValueError):\n            transform = GeomTransform(\n                prob=0.5,\n                level=1,\n                min_mag=0.0,\n                max_mag=1.0,\n                img_border_value=[128, 128, 128])\n\n        # test assertion for invalid value of img_border_value\n        with self.assertRaises(AssertionError):\n            transform = GeomTransform(\n                prob=0.5,\n                level=1,\n                min_mag=0.0,\n                max_mag=1.0,\n                img_border_value=(128, -1, 256))\n\n        # test case when no aug (prob=0)\n        transform = GeomTransform(\n            prob=0.,\n            level=10,\n            min_mag=0.0,\n            max_mag=1.0,\n            img_border_value=self.img_border_value)\n        results_wo_aug = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_aug, self.check_keys)\n\n    def test_repr(self):\n        transform = GeomTransform(\n            prob=0.5,\n            level=5,\n            min_mag=0.0,\n            max_mag=1.0,\n        )\n        self.assertEqual(\n            repr(transform), ('GeomTransform(prob=0.5, '\n                              'level=5, '\n                              'min_mag=0.0, '\n                              'max_mag=1.0, '\n                              'reversal_prob=0.5, '\n                              'img_border_value=(128.0, 128.0, 128.0), '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255, '\n                              'interpolation=bilinear)'))\n\n\nclass TestShearX(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.results_poly = construct_toy_data(poly2mask=False)\n        self.results_mask_boxtype = construct_toy_data(\n            poly2mask=True, use_box_type=True)\n        self.img_border_value = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_shearx(self):\n        # test assertion for invalid value of min_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearX(prob=0.5, level=2, min_mag=-30.)\n        # test assertion for invalid value of max_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearX(prob=0.5, level=2, max_mag=100.)\n\n        # test case when no shear horizontally (level=0)\n        transform = ShearX(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label,\n        )\n        results_wo_shearx = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_shearx,\n                          self.check_keys)\n\n        # test shear horizontally, magnitude=-1\n        transform = ShearX(\n            prob=1.0,\n            level=10,\n            max_mag=45.,\n            reversal_prob=1.0,\n            img_border_value=self.img_border_value)\n        results_sheared = transform(copy.deepcopy(self.results_mask))\n        results_gt = copy.deepcopy(self.results_mask)\n        img_gt = np.array([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 9, 10]],\n                          dtype=np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        img_gt[1, 0, :] = np.array(self.img_border_value)\n        img_gt[2, 0, :] = np.array(self.img_border_value)\n        img_gt[2, 1, :] = np.array(self.img_border_value)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = np.array([[1, 0, 4, 2]], dtype=np.float32)\n        results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)\n        gt_masks = np.array([[0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 255, 255], [255, 255, 13, 13], [255, 255, 255, 13]],\n            dtype=self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_sheared, self.check_keys)\n\n        # test PolygonMasks with shear horizontally, magnitude=1\n        results_sheared = transform(copy.deepcopy(self.results_poly))\n        gt_masks = [[np.array([3, 2, 1, 0, 3, 1], dtype=np.float32)]]\n        results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)\n        check_result_same(results_gt, results_sheared, self.check_keys)\n\n    def test_shearx_use_box_type(self):\n        # test case when no shear horizontally (level=0)\n        transform = ShearX(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label,\n        )\n        results_wo_shearx = transform(copy.deepcopy(self.results_mask_boxtype))\n        check_result_same(self.results_mask_boxtype, results_wo_shearx,\n                          self.check_keys)\n\n        # test shear horizontally, magnitude=-1\n        transform = ShearX(\n            prob=1.0,\n            level=10,\n            max_mag=45.,\n            reversal_prob=1.0,\n            img_border_value=self.img_border_value)\n        results_sheared = transform(copy.deepcopy(self.results_mask_boxtype))\n        results_gt = copy.deepcopy(self.results_mask_boxtype)\n        img_gt = np.array([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 9, 10]],\n                          dtype=np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        img_gt[1, 0, :] = np.array(self.img_border_value)\n        img_gt[2, 0, :] = np.array(self.img_border_value)\n        img_gt[2, 1, :] = np.array(self.img_border_value)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = HorizontalBoxes(\n            np.array([[1, 0, 4, 2]], dtype=np.float32))\n        results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)\n        gt_masks = np.array([[0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 255, 255], [255, 255, 13, 13], [255, 255, 255, 13]],\n            dtype=self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_sheared, self.check_keys)\n\n    def test_repr(self):\n        transform = ShearX(prob=0.5, level=10)\n        self.assertEqual(\n            repr(transform), ('ShearX(prob=0.5, '\n                              'level=10, '\n                              'min_mag=0.0, '\n                              'max_mag=30.0, '\n                              'reversal_prob=0.5, '\n                              'img_border_value=(128.0, 128.0, 128.0), '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255, '\n                              'interpolation=bilinear)'))\n\n\nclass TestShearY(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.results_poly = construct_toy_data(poly2mask=False)\n        self.results_mask_boxtype = construct_toy_data(\n            poly2mask=True, use_box_type=True)\n        self.img_border_value = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_sheary(self):\n        # test assertion for invalid value of min_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearY(prob=0.5, level=2, min_mag=-30.)\n        # test assertion for invalid value of max_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearY(prob=0.5, level=2, max_mag=100.)\n\n        # test case when no shear vertically (level=0)\n        transform = ShearY(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label,\n        )\n        results_wo_sheary = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_sheary,\n                          self.check_keys)\n\n        # test shear vertically, magnitude=1\n        transform = ShearY(prob=1., level=10, max_mag=45., reversal_prob=0.)\n        results_sheared = transform(copy.deepcopy(self.results_mask))\n        results_gt = copy.deepcopy(self.results_mask)\n        img_gt = np.array(\n            [[1, 6, 11, 128], [5, 10, 128, 128], [9, 128, 128, 128]],\n            dtype=np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = np.array([[1, 0, 2, 1]], dtype=np.float32)\n        results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)\n        gt_masks = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 255, 255], [255, 13, 255, 255], [255, 255, 255, 255]],\n            dtype=self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_sheared, self.check_keys)\n\n        # test PolygonMasks with shear vertically, magnitude=-1\n        results_sheared = transform(copy.deepcopy(self.results_poly))\n        gt_masks = [[np.array([1, 1, 1, 0, 2, 0], dtype=np.float32)]]\n        results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)\n        check_result_same(results_gt, results_sheared, self.check_keys)\n\n    def test_sheary_use_box_type(self):\n        # test case when no shear vertically (level=0)\n        transform = ShearY(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label,\n        )\n        results_wo_sheary = transform(copy.deepcopy(self.results_mask_boxtype))\n        check_result_same(self.results_mask_boxtype, results_wo_sheary,\n                          self.check_keys)\n\n        # test shear vertically, magnitude=1\n        transform = ShearY(prob=1., level=10, max_mag=45., reversal_prob=0.)\n        results_sheared = transform(copy.deepcopy(self.results_mask_boxtype))\n        results_gt = copy.deepcopy(self.results_mask_boxtype)\n        img_gt = np.array(\n            [[1, 6, 11, 128], [5, 10, 128, 128], [9, 128, 128, 128]],\n            dtype=np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = HorizontalBoxes(\n            np.array([[1, 0, 2, 1]], dtype=np.float32))\n        results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)\n        gt_masks = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 255, 255], [255, 13, 255, 255], [255, 255, 255, 255]],\n            dtype=self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_sheared, self.check_keys)\n\n    def test_repr(self):\n        transform = ShearY(prob=0.5, level=10)\n        self.assertEqual(\n            repr(transform), ('ShearY(prob=0.5, '\n                              'level=10, '\n                              'min_mag=0.0, '\n                              'max_mag=30.0, '\n                              'reversal_prob=0.5, '\n                              'img_border_value=(128.0, 128.0, 128.0), '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255, '\n                              'interpolation=bilinear)'))\n\n\nclass TestRotate(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.results_poly = construct_toy_data(poly2mask=False)\n        self.results_mask_boxtype = construct_toy_data(\n            poly2mask=True, use_box_type=True)\n        self.img_border_value = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_rotate(self):\n        # test assertion for invalid value of min_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearY(prob=0.5, level=2, min_mag=-90.0)\n        # test assertion for invalid value of max_mag\n        with self.assertRaises(AssertionError):\n            transform = ShearY(prob=0.5, level=2, max_mag=270.0)\n\n        # test case when no rotate aug (level=0)\n        transform = Rotate(\n            prob=1.,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label,\n        )\n        results_wo_rotate = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_rotate,\n                          self.check_keys)\n\n        # test clockwise rotation with angle 90\n        transform = Rotate(\n            prob=1.,\n            level=10,\n            max_mag=90.0,\n            # set reversal_prob to 1 for clockwise rotation\n            reversal_prob=1.,\n        )\n        results_rotated = transform(copy.deepcopy(self.results_mask))\n        # The image, masks, and semantic segmentation map\n        # will be bilinearly interpolated.\n        img_gt = np.array([[69, 8, 4, 65], [69, 9, 5, 65],\n                           [70, 10, 6, 66]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt = copy.deepcopy(self.results_mask)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = np.array([[0.5, 0.5, 2.5, 1.5]],\n                                           dtype=np.float32)\n        gt_masks = np.array([[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 13, 13], [255, 255, 13, 255],\n             [255, 255, 255,\n              255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_rotated, self.check_keys)\n\n        # test clockwise rotation with angle 90, PolygonMasks\n        results_rotated = transform(copy.deepcopy(self.results_poly))\n        gt_masks = [[np.array([0, 1, 0, 1, 0, 2], dtype=np.float)]]\n        results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)\n        check_result_same(results_gt, results_rotated, self.check_keys)\n\n        # test counter-clockwise rotation with angle 90\n        transform = Rotate(\n            prob=1.0,\n            level=10,\n            max_mag=90.0,\n            # set reversal_prob to 0 for counter-clockwise rotation\n            reversal_prob=0.0,\n        )\n        results_rotated = transform(copy.deepcopy(self.results_mask))\n        # The image, masks, and  semantic segmentation map\n        # will be bilinearly interpolated.\n        img_gt = np.array([[66, 6, 10, 70], [65, 5, 9, 69],\n                           [65, 4, 8, 69]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt = copy.deepcopy(self.results_mask)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = np.array([[0.5, 0.5, 2.5, 1.5]],\n                                           dtype=np.float32)\n        gt_masks = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 255, 255, 255], [255, 13, 255, 255],\n             [13, 13, 13, 255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_rotated, self.check_keys)\n\n        # test counter-clockwise rotation with angle 90, PolygonMasks\n        results_rotated = transform(copy.deepcopy(self.results_poly))\n        gt_masks = [[np.array([2, 0, 0, 0, 1, 0], dtype=np.float)]]\n        results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)\n        check_result_same(results_gt, results_rotated, self.check_keys)\n\n    def test_rotate_use_box_type(self):\n        # test case when no rotate aug (level=0)\n        transform = Rotate(\n            prob=1.,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label,\n        )\n        results_wo_rotate = transform(copy.deepcopy(self.results_mask_boxtype))\n        check_result_same(self.results_mask_boxtype, results_wo_rotate,\n                          self.check_keys)\n\n        # test clockwise rotation with angle 90\n        transform = Rotate(\n            prob=1.,\n            level=10,\n            max_mag=90.0,\n            # set reversal_prob to 1 for clockwise rotation\n            reversal_prob=1.,\n        )\n        results_rotated = transform(copy.deepcopy(self.results_mask_boxtype))\n        # The image, masks, and semantic segmentation map\n        # will be bilinearly interpolated.\n        img_gt = np.array([[69, 8, 4, 65], [69, 9, 5, 65],\n                           [70, 10, 6, 66]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt = copy.deepcopy(self.results_mask_boxtype)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = HorizontalBoxes(\n            np.array([[0.5, 0.5, 2.5, 1.5]], dtype=np.float32))\n        gt_masks = np.array([[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 13, 13], [255, 255, 13, 255],\n             [255, 255, 255,\n              255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_rotated, self.check_keys)\n\n        # test counter-clockwise rotation with angle 90\n        transform = Rotate(\n            prob=1.0,\n            level=10,\n            max_mag=90.0,\n            # set reversal_prob to 0 for counter-clockwise rotation\n            reversal_prob=0.0,\n        )\n        results_rotated = transform(copy.deepcopy(self.results_mask_boxtype))\n        # The image, masks, and  semantic segmentation map\n        # will be bilinearly interpolated.\n        img_gt = np.array([[66, 6, 10, 70], [65, 5, 9, 69],\n                           [65, 4, 8, 69]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt = copy.deepcopy(self.results_mask_boxtype)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = HorizontalBoxes(\n            np.array([[0.5, 0.5, 2.5, 1.5]], dtype=np.float32))\n        gt_masks = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 255, 255, 255], [255, 13, 255, 255],\n             [13, 13, 13, 255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_rotated, self.check_keys)\n\n    def test_repr(self):\n        transform = Rotate(prob=0.5, level=5)\n        self.assertEqual(\n            repr(transform), ('Rotate(prob=0.5, '\n                              'level=5, '\n                              'min_mag=0.0, '\n                              'max_mag=30.0, '\n                              'reversal_prob=0.5, '\n                              'img_border_value=(128.0, 128.0, 128.0), '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255, '\n                              'interpolation=bilinear)'))\n\n\nclass TestTranslateX(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.results_poly = construct_toy_data(poly2mask=False)\n        self.results_mask_boxtype = construct_toy_data(\n            poly2mask=True, use_box_type=True)\n        self.img_border_value = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_translatex(self):\n        # test assertion for invalid value of min_mag\n        with self.assertRaises(AssertionError):\n            transform = TranslateX(prob=0.5, level=2, min_mag=-1.)\n        # test assertion for invalid value of max_mag\n        with self.assertRaises(AssertionError):\n            transform = TranslateX(prob=0.5, level=2, max_mag=1.1)\n\n        # test case when level=0 (without translate aug)\n        transform = TranslateX(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label)\n        results_wo_translatex = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_translatex,\n                          self.check_keys)\n\n        # test translate horizontally, magnitude=-1\n        transform = TranslateX(\n            prob=1.0,\n            level=10,\n            max_mag=0.3,\n            reversal_prob=0.0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label)\n        results_translated = transform(copy.deepcopy(self.results_mask))\n        img_gt = np.array([[2, 3, 4, 0], [6, 7, 8, 0], [10, 11, 12,\n                                                        0]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        img_gt[:, 3, :] = np.array(self.img_border_value)\n        results_gt = copy.deepcopy(self.results_mask)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = np.array([[0, 0, 1, 2]], dtype=np.float32)\n        gt_masks = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[13, 255, 255, 255], [13, 13, 255, 255],\n             [13, 255, 255,\n              255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_translated, self.check_keys)\n\n        # test PolygonMasks with translate horizontally.\n        results_translated = transform(copy.deepcopy(self.results_poly))\n        gt_masks = [[np.array([0, 2, 0, 0, 1, 1], dtype=np.float32)]]\n        results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)\n        check_result_same(results_gt, results_translated, self.check_keys)\n\n    def test_translatex_use_box_type(self):\n        # test case when level=0 (without translate aug)\n        transform = TranslateX(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label)\n        results_wo_translatex = transform(\n            copy.deepcopy(self.results_mask_boxtype))\n        check_result_same(self.results_mask_boxtype, results_wo_translatex,\n                          self.check_keys)\n\n        # test translate horizontally, magnitude=-1\n        transform = TranslateX(\n            prob=1.0,\n            level=10,\n            max_mag=0.3,\n            reversal_prob=0.0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label)\n        results_translated = transform(\n            copy.deepcopy(self.results_mask_boxtype))\n        img_gt = np.array([[2, 3, 4, 0], [6, 7, 8, 0], [10, 11, 12,\n                                                        0]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        img_gt[:, 3, :] = np.array(self.img_border_value)\n        results_gt = copy.deepcopy(self.results_mask)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = HorizontalBoxes(\n            np.array([[0, 0, 1, 2]], dtype=np.float32))\n        gt_masks = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[13, 255, 255, 255], [13, 13, 255, 255],\n             [13, 255, 255,\n              255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_translated, self.check_keys)\n\n    def test_repr(self):\n        transform = TranslateX(prob=0.5, level=5)\n        self.assertEqual(\n            repr(transform), ('TranslateX(prob=0.5, '\n                              'level=5, '\n                              'min_mag=0.0, '\n                              'max_mag=0.1, '\n                              'reversal_prob=0.5, '\n                              'img_border_value=(128.0, 128.0, 128.0), '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255, '\n                              'interpolation=bilinear)'))\n\n\nclass TestTranslateY(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',\n                           'gt_ignore_flags', 'gt_seg_map')\n        self.results_mask = construct_toy_data(poly2mask=True)\n        self.results_poly = construct_toy_data(poly2mask=False)\n        self.results_mask_boxtype = construct_toy_data(\n            poly2mask=True, use_box_type=True)\n        self.img_border_value = (104, 116, 124)\n        self.seg_ignore_label = 255\n\n    def test_translatey(self):\n        # test assertion for invalid value of min_mag\n        with self.assertRaises(AssertionError):\n            transform = TranslateY(prob=0.5, level=2, min_mag=-1.0)\n        # test assertion for invalid value of max_mag\n        with self.assertRaises(AssertionError):\n            transform = TranslateY(prob=0.5, level=2, max_mag=1.1)\n\n        # test case when level=0 (without translate aug)\n        transform = TranslateY(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label)\n        results_wo_translatey = transform(copy.deepcopy(self.results_mask))\n        check_result_same(self.results_mask, results_wo_translatey,\n                          self.check_keys)\n\n        # test translate vertically, magnitude=-1\n        transform = TranslateY(\n            prob=1.0,\n            level=10,\n            max_mag=0.4,\n            reversal_prob=0.0,\n            seg_ignore_label=self.seg_ignore_label)\n\n        results_translated = transform(copy.deepcopy(self.results_mask))\n        img_gt = np.array([[5, 6, 7, 8], [9, 10, 11, 12],\n                           [128, 128, 128, 128]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt = copy.deepcopy(self.results_mask)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = np.array([[1, 0, 2, 1]], dtype=np.float32)\n        gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 13, 255], [255, 13, 255, 255],\n             [255, 255, 255,\n              255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_translated, self.check_keys)\n\n        # test PolygonMasks with translate vertically.\n        results_translated = transform(copy.deepcopy(self.results_poly))\n        gt_masks = [[np.array([1, 1, 1, 0, 2, 0], dtype=np.float32)]]\n        results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)\n        check_result_same(results_gt, results_translated, self.check_keys)\n\n    def test_translatey_use_box_type(self):\n        # test case when level=0 (without translate aug)\n        transform = TranslateY(\n            prob=1.0,\n            level=0,\n            img_border_value=self.img_border_value,\n            seg_ignore_label=self.seg_ignore_label)\n        results_wo_translatey = transform(\n            copy.deepcopy(self.results_mask_boxtype))\n        check_result_same(self.results_mask_boxtype, results_wo_translatey,\n                          self.check_keys)\n\n        # test translate vertically, magnitude=-1\n        transform = TranslateY(\n            prob=1.0,\n            level=10,\n            max_mag=0.4,\n            reversal_prob=0.0,\n            seg_ignore_label=self.seg_ignore_label)\n\n        results_translated = transform(\n            copy.deepcopy(self.results_mask_boxtype))\n        img_gt = np.array([[5, 6, 7, 8], [9, 10, 11, 12],\n                           [128, 128, 128, 128]]).astype(np.uint8)\n        img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)\n        results_gt = copy.deepcopy(self.results_mask_boxtype)\n        results_gt['img'] = img_gt\n        results_gt['gt_bboxes'] = HorizontalBoxes(\n            np.array([[1, 0, 2, 1]], dtype=np.float32))\n        gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n        results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n        results_gt['gt_seg_map'] = np.array(\n            [[255, 13, 13, 255], [255, 13, 255, 255],\n             [255, 255, 255,\n              255]]).astype(self.results_mask['gt_seg_map'].dtype)\n        check_result_same(results_gt, results_translated, self.check_keys)\n\n    def test_repr(self):\n        transform = TranslateX(prob=0.5, level=5)\n        self.assertEqual(\n            repr(transform), ('TranslateX(prob=0.5, '\n                              'level=5, '\n                              'min_mag=0.0, '\n                              'max_mag=0.1, '\n                              'reversal_prob=0.5, '\n                              'img_border_value=(128.0, 128.0, 128.0), '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255, '\n                              'interpolation=bilinear)'))\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_instaboost.py",
    "content": "import os.path as osp\nimport unittest\n\nimport numpy as np\n\nfrom mmdet.registry import TRANSFORMS\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\nclass TestInstaboost(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        img_path = osp.join(osp.dirname(__file__), '../../data/gray.jpg')\n        self.results = {\n            'img_path':\n            img_path,\n            'img_shape': (300, 400),\n            'instances': [{\n                'bbox': [0, 0, 10, 20],\n                'bbox_label': 1,\n                'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n                'ignore_flag': 0\n            }, {\n                'bbox': [10, 10, 110, 120],\n                'bbox_label': 2,\n                'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n                'ignore_flag': 0\n            }, {\n                'bbox': [50, 50, 60, 80],\n                'bbox_label': 2,\n                'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n                'ignore_flag': 1\n            }]\n        }\n\n    def test_transform(self):\n        load = TRANSFORMS.build(dict(type='LoadImageFromFile'))\n        instaboost_transform = TRANSFORMS.build(dict(type='InstaBoost'))\n\n        # Execute transforms\n        results = load(self.results)\n        results = instaboost_transform(results)\n\n        self.assertEqual(results['img'].dtype, np.uint8)\n        self.assertIn('instances', results)\n\n    def test_repr(self):\n        instaboost_transform = TRANSFORMS.build(dict(type='InstaBoost'))\n\n        self.assertEqual(\n            repr(instaboost_transform), 'InstaBoost(aug_ratio=0.5)')\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_loading.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os\nimport os.path as osp\nimport sys\nimport unittest\nfrom unittest.mock import MagicMock, Mock, patch\n\nimport mmcv\nimport numpy as np\n\nfrom mmdet.datasets.transforms import (FilterAnnotations, LoadAnnotations,\n                                       LoadEmptyAnnotations,\n                                       LoadImageFromNDArray,\n                                       LoadMultiChannelImageFromFiles,\n                                       LoadProposals)\nfrom mmdet.evaluation import INSTANCE_OFFSET\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\n\ntry:\n    import panopticapi\nexcept ImportError:\n    panopticapi = None\n\n\nclass TestLoadAnnotations(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        data_prefix = osp.join(osp.dirname(__file__), '../../data')\n        seg_map = osp.join(data_prefix, 'gray.jpg')\n        self.results = {\n            'ori_shape': (300, 400),\n            'seg_map_path':\n            seg_map,\n            'instances': [{\n                'bbox': [0, 0, 10, 20],\n                'bbox_label': 1,\n                'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n                'ignore_flag': 0\n            }, {\n                'bbox': [10, 10, 110, 120],\n                'bbox_label': 2,\n                'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n                'ignore_flag': 0\n            }, {\n                'bbox': [50, 50, 60, 80],\n                'bbox_label': 2,\n                'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n                'ignore_flag': 1\n            }]\n        }\n\n    def test_load_bboxes(self):\n        transform = LoadAnnotations(\n            with_bbox=True,\n            with_label=False,\n            with_seg=False,\n            with_mask=False,\n            box_type=None)\n        results = transform(copy.deepcopy(self.results))\n        self.assertIn('gt_bboxes', results)\n        self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20],\n                                                           [10, 10, 110, 120],\n                                                           [50, 50, 60,\n                                                            80]])).all())\n        self.assertEqual(results['gt_bboxes'].dtype, np.float32)\n        self.assertTrue((results['gt_ignore_flags'] == np.array([0, 0,\n                                                                 1])).all())\n        self.assertEqual(results['gt_ignore_flags'].dtype, bool)\n\n    def test_load_labels(self):\n        transform = LoadAnnotations(\n            with_bbox=False,\n            with_label=True,\n            with_seg=False,\n            with_mask=False,\n        )\n        results = transform(copy.deepcopy(self.results))\n        self.assertIn('gt_bboxes_labels', results)\n        self.assertTrue((results['gt_bboxes_labels'] == np.array([1, 2,\n                                                                  2])).all())\n        self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)\n\n    def test_load_mask(self):\n        transform = LoadAnnotations(\n            with_bbox=False,\n            with_label=False,\n            with_seg=False,\n            with_mask=True,\n            poly2mask=False)\n        results = transform(copy.deepcopy(self.results))\n        self.assertIn('gt_masks', results)\n        self.assertEqual(len(results['gt_masks']), 3)\n        self.assertIsInstance(results['gt_masks'], PolygonMasks)\n\n    def test_load_mask_poly2mask(self):\n        transform = LoadAnnotations(\n            with_bbox=False,\n            with_label=False,\n            with_seg=False,\n            with_mask=True,\n            poly2mask=True)\n        results = transform(copy.deepcopy(self.results))\n        self.assertIn('gt_masks', results)\n        self.assertEqual(len(results['gt_masks']), 3)\n        self.assertIsInstance(results['gt_masks'], BitmapMasks)\n\n    def test_repr(self):\n        transform = LoadAnnotations(\n            with_bbox=True,\n            with_label=False,\n            with_seg=False,\n            with_mask=False,\n        )\n        self.assertEqual(\n            repr(transform), ('LoadAnnotations(with_bbox=True, '\n                              'with_label=False, with_mask=False, '\n                              'with_seg=False, poly2mask=True, '\n                              \"imdecode_backend='cv2', \"\n                              'file_client_args=None)'))\n\n\nclass TestFilterAnnotations(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.results = {\n            'img':\n            np.random.random((224, 224, 3)),\n            'img_shape': (224, 224),\n            'gt_bboxes_labels':\n            np.array([1, 2, 3], dtype=np.int64),\n            'gt_bboxes':\n            np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]]),\n            'gt_ignore_flags':\n            np.array([0, 0, 1], dtype=np.bool8),\n            'gt_masks':\n            BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),\n        }\n\n    def test_transform(self):\n        # test keep_empty = True\n        transform = FilterAnnotations(\n            min_gt_bbox_wh=(50, 50),\n            keep_empty=True,\n        )\n        results = transform(copy.deepcopy(self.results))\n        self.assertIsNone(results)\n\n        # test keep_empty = False\n        transform = FilterAnnotations(\n            min_gt_bbox_wh=(50, 50),\n            keep_empty=False,\n        )\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(isinstance(results, dict))\n\n        # test filter annotations\n        transform = FilterAnnotations(min_gt_bbox_wh=(15, 15), )\n        results = transform(copy.deepcopy(self.results))\n\n        self.assertIsInstance(results, dict)\n        self.assertTrue((results['gt_bboxes_labels'] == np.array([2,\n                                                                  3])).all())\n        self.assertTrue((results['gt_bboxes'] == np.array([[20, 20, 40, 40],\n                                                           [40, 40, 80,\n                                                            80]])).all())\n        self.assertEqual(len(results['gt_masks']), 2)\n        self.assertEqual(len(results['gt_ignore_flags']), 2)\n\n    def test_repr(self):\n        transform = FilterAnnotations(\n            min_gt_bbox_wh=(1, 1),\n            keep_empty=False,\n        )\n        self.assertEqual(\n            repr(transform), ('FilterAnnotations(min_gt_bbox_wh=(1, 1), '\n                              'keep_empty=False)'))\n\n\nclass TestLoadPanopticAnnotations(unittest.TestCase):\n\n    def setUp(self):\n        seg_map = np.zeros((10, 10), dtype=np.int32)\n        seg_map[:5, :10] = 1 + 10 * INSTANCE_OFFSET\n        seg_map[5:10, :5] = 4 + 11 * INSTANCE_OFFSET\n        seg_map[5:10, 5:10] = 6 + 0 * INSTANCE_OFFSET\n        rgb_seg_map = np.zeros((10, 10, 3), dtype=np.uint8)\n        rgb_seg_map[:, :, 0] = seg_map / (256 * 256)\n        rgb_seg_map[:, :, 1] = seg_map % (256 * 256) / 256\n        rgb_seg_map[:, :, 2] = seg_map % 256\n        self.seg_map_path = './1.png'\n        mmcv.imwrite(rgb_seg_map, self.seg_map_path)\n\n        self.seg_map = seg_map\n        self.rgb_seg_map = rgb_seg_map\n        self.results = {\n            'ori_shape': (10, 10),\n            'instances': [{\n                'bbox': [0, 0, 10, 5],\n                'bbox_label': 0,\n                'ignore_flag': 0,\n            }, {\n                'bbox': [0, 5, 5, 10],\n                'bbox_label': 1,\n                'ignore_flag': 1,\n            }],\n            'segments_info': [\n                {\n                    'id': 1 + 10 * INSTANCE_OFFSET,\n                    'category': 0,\n                    'is_thing': True,\n                },\n                {\n                    'id': 4 + 11 * INSTANCE_OFFSET,\n                    'category': 1,\n                    'is_thing': True,\n                },\n                {\n                    'id': 6 + 0 * INSTANCE_OFFSET,\n                    'category': 2,\n                    'is_thing': False,\n                },\n            ],\n            'seg_map_path':\n            self.seg_map_path\n        }\n\n        self.gt_mask = BitmapMasks([\n            (seg_map == 1 + 10 * INSTANCE_OFFSET).astype(np.uint8),\n            (seg_map == 4 + 11 * INSTANCE_OFFSET).astype(np.uint8),\n        ], 10, 10)\n        self.gt_bboxes = np.array([[0, 0, 10, 5], [0, 5, 5, 10]],\n                                  dtype=np.float32)\n        self.gt_bboxes_labels = np.array([0, 1], dtype=np.int64)\n        self.gt_ignore_flags = np.array([0, 1], dtype=bool)\n        self.gt_seg_map = np.zeros((10, 10), dtype=np.int32)\n        self.gt_seg_map[:5, :10] = 0\n        self.gt_seg_map[5:10, :5] = 1\n        self.gt_seg_map[5:10, 5:10] = 2\n\n    def tearDown(self):\n        os.remove(self.seg_map_path)\n\n    @unittest.skipIf(panopticapi is not None, 'panopticapi is installed')\n    def test_init_without_panopticapi(self):\n        # test if panopticapi is not installed\n        from mmdet.datasets.transforms import LoadPanopticAnnotations\n        with self.assertRaisesRegex(\n                ImportError,\n                'panopticapi is not installed, please install it by'):\n            LoadPanopticAnnotations()\n\n    def test_transform(self):\n        sys.modules['panopticapi'] = MagicMock()\n        sys.modules['panopticapi.utils'] = MagicMock()\n        from mmdet.datasets.transforms import LoadPanopticAnnotations\n        mock_rgb2id = Mock(return_value=self.seg_map)\n        with patch('panopticapi.utils.rgb2id', mock_rgb2id):\n            # test with all False\n            transform = LoadPanopticAnnotations(\n                with_bbox=False,\n                with_label=False,\n                with_mask=False,\n                with_seg=False)\n            results = transform(copy.deepcopy(self.results))\n            self.assertDictEqual(results, self.results)\n            # test with with_mask=True\n            transform = LoadPanopticAnnotations(\n                with_bbox=False,\n                with_label=False,\n                with_mask=True,\n                with_seg=False)\n            results = transform(copy.deepcopy(self.results))\n            self.assertTrue(\n                (results['gt_masks'].masks == self.gt_mask.masks).all())\n\n            # test with with_seg=True\n            transform = LoadPanopticAnnotations(\n                with_bbox=False,\n                with_label=False,\n                with_mask=False,\n                with_seg=True)\n            results = transform(copy.deepcopy(self.results))\n            self.assertNotIn('gt_masks', results)\n            self.assertTrue((results['gt_seg_map'] == self.gt_seg_map).all())\n\n            # test with all True\n            transform = LoadPanopticAnnotations(\n                with_bbox=True,\n                with_label=True,\n                with_mask=True,\n                with_seg=True,\n                box_type=None)\n            results = transform(copy.deepcopy(self.results))\n            self.assertTrue(\n                (results['gt_masks'].masks == self.gt_mask.masks).all())\n            self.assertTrue((results['gt_bboxes'] == self.gt_bboxes).all())\n            self.assertTrue(\n                (results['gt_bboxes_labels'] == self.gt_bboxes_labels).all())\n            self.assertTrue(\n                (results['gt_ignore_flags'] == self.gt_ignore_flags).all())\n            self.assertTrue((results['gt_seg_map'] == self.gt_seg_map).all())\n\n\nclass TestLoadImageFromNDArray(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.results = {'img': np.zeros((256, 256, 3), dtype=np.uint8)}\n\n    def test_transform(self):\n        transform = LoadImageFromNDArray()\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape, (256, 256, 3))\n        self.assertEqual(results['img'].dtype, np.uint8)\n        self.assertEqual(results['img_shape'], (256, 256))\n        self.assertEqual(results['ori_shape'], (256, 256))\n\n        # to_float32\n        transform = LoadImageFromNDArray(to_float32=True)\n        results = transform(copy.deepcopy(results))\n        self.assertEqual(results['img'].dtype, np.float32)\n\n    def test_repr(self):\n        transform = LoadImageFromNDArray()\n        self.assertEqual(\n            repr(transform), ('LoadImageFromNDArray('\n                              'ignore_empty=False, '\n                              'to_float32=False, '\n                              \"color_type='color', \"\n                              \"imdecode_backend='cv2', \"\n                              'backend_args=None)'))\n\n\nclass TestLoadMultiChannelImageFromFiles(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.img_path = []\n        for i in range(4):\n            img_channel_path = f'./part_{i}.jpg'\n            img_channel = np.zeros((10, 10), dtype=np.uint8)\n            mmcv.imwrite(img_channel, img_channel_path)\n            self.img_path.append(img_channel_path)\n        self.results = {'img_path': self.img_path}\n\n    def tearDown(self):\n        for filename in self.img_path:\n            os.remove(filename)\n\n    def test_transform(self):\n        transform = LoadMultiChannelImageFromFiles()\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape, (10, 10, 4))\n        self.assertEqual(results['img'].dtype, np.uint8)\n        self.assertEqual(results['img_shape'], (10, 10))\n        self.assertEqual(results['ori_shape'], (10, 10))\n\n        # to_float32\n        transform = LoadMultiChannelImageFromFiles(to_float32=True)\n        results = transform(copy.deepcopy(results))\n        self.assertEqual(results['img'].dtype, np.float32)\n\n    def test_rper(self):\n        transform = LoadMultiChannelImageFromFiles()\n        self.assertEqual(\n            repr(transform), ('LoadMultiChannelImageFromFiles('\n                              'to_float32=False, '\n                              \"color_type='unchanged', \"\n                              \"imdecode_backend='cv2', \"\n                              \"file_client_args={'backend': 'disk'})\"))\n\n\nclass TestLoadProposals(unittest.TestCase):\n\n    def test_transform(self):\n        transform = LoadProposals()\n        results = {\n            'proposals':\n            dict(\n                bboxes=np.zeros((5, 4), dtype=np.int64),\n                scores=np.zeros((5, ), dtype=np.int64))\n        }\n        results = transform(results)\n        self.assertEqual(results['proposals'].dtype, np.float32)\n        self.assertEqual(results['proposals'].shape[-1], 4)\n        self.assertEqual(results['proposals_scores'].dtype, np.float32)\n\n        #  bboxes.shape[1] should be 4\n        results = {'proposals': dict(bboxes=np.zeros((5, 5), dtype=np.int64))}\n        with self.assertRaises(AssertionError):\n            transform(results)\n\n        # bboxes.shape[0] should equal to scores.shape[0]\n        results = {\n            'proposals':\n            dict(\n                bboxes=np.zeros((5, 4), dtype=np.int64),\n                scores=np.zeros((3, ), dtype=np.int64))\n        }\n        with self.assertRaises(AssertionError):\n            transform(results)\n\n        # empty bboxes\n        results = {\n            'proposals': dict(bboxes=np.zeros((0, 4), dtype=np.float32))\n        }\n        results = transform(results)\n        excepted_proposals = np.zeros((0, 4), dtype=np.float32)\n        excepted_proposals_scores = np.zeros(0, dtype=np.float32)\n        self.assertTrue((results['proposals'] == excepted_proposals).all())\n        self.assertTrue(\n            (results['proposals_scores'] == excepted_proposals_scores).all())\n\n        transform = LoadProposals(num_max_proposals=2)\n        results = {\n            'proposals':\n            dict(\n                bboxes=np.zeros((5, 4), dtype=np.int64),\n                scores=np.zeros((5, ), dtype=np.int64))\n        }\n        results = transform(results)\n        self.assertEqual(results['proposals'].shape[0], 2)\n\n    def test_repr(self):\n        transform = LoadProposals()\n        self.assertEqual(\n            repr(transform), 'LoadProposals(num_max_proposals=None)')\n\n\nclass TestLoadEmptyAnnotations(unittest.TestCase):\n\n    def test_transform(self):\n        transform = LoadEmptyAnnotations(\n            with_bbox=True, with_label=True, with_mask=True, with_seg=True)\n        results = {'img_shape': (224, 224)}\n        results = transform(results)\n        self.assertEqual(results['gt_bboxes'].dtype, np.float32)\n        self.assertEqual(results['gt_bboxes'].shape[-1], 4)\n        self.assertEqual(results['gt_ignore_flags'].dtype, bool)\n        self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)\n        self.assertEqual(results['gt_masks'].masks.dtype, np.uint8)\n        self.assertEqual(results['gt_masks'].masks.shape[-2:],\n                         results['img_shape'])\n        self.assertEqual(results['gt_seg_map'].dtype, np.uint8)\n        self.assertEqual(results['gt_seg_map'].shape, results['img_shape'])\n\n    def test_repr(self):\n        transform = LoadEmptyAnnotations()\n        self.assertEqual(\n            repr(transform), 'LoadEmptyAnnotations(with_bbox=True, '\n            'with_label=True, '\n            'with_mask=False, '\n            'with_seg=False, '\n            'seg_ignore_label=255)')\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_transforms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nimport unittest\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.transforms import LoadImageFromFile\n\n# yapf:disable\nfrom mmdet.datasets.transforms import (CopyPaste, CutOut, Expand,\n                                       FixShapeResize, MinIoURandomCrop, MixUp,\n                                       Mosaic, Pad, PhotoMetricDistortion,\n                                       RandomAffine, RandomCenterCropPad,\n                                       RandomCrop, RandomErasing, RandomFlip,\n                                       RandomShift, Resize, SegRescale,\n                                       YOLOXHSVRandomAug)\n# yapf:enable\nfrom mmdet.evaluation import bbox_overlaps\nfrom mmdet.registry import TRANSFORMS\nfrom mmdet.structures.bbox import HorizontalBoxes, bbox_project\nfrom mmdet.structures.mask import BitmapMasks\nfrom .utils import construct_toy_data, create_full_masks, create_random_bboxes\n\ntry:\n    import albumentations\n    from albumentations import Compose\nexcept ImportError:\n    albumentations = None\n    Compose = None\n# yapf:enable\n\n\nclass TestResize(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod()\n        -> tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.data_info1 = dict(\n            img=np.random.random((1333, 800, 3)),\n            gt_seg_map=np.random.random((1333, 800, 3)),\n            gt_bboxes=np.array([[0, 0, 112, 112]], dtype=np.float32),\n            gt_masks=BitmapMasks(\n                rng.rand(1, 1333, 800), height=1333, width=800))\n        self.data_info2 = dict(\n            img=np.random.random((300, 400, 3)),\n            gt_bboxes=np.array([[200, 150, 600, 450]], dtype=np.float32),\n            dtype=np.float32)\n        self.data_info3 = dict(img=np.random.random((300, 400, 3)))\n\n    def test_resize(self):\n        # test keep_ratio is True\n        transform = Resize(scale=(2000, 2000), keep_ratio=True)\n        results = transform(copy.deepcopy(self.data_info1))\n        self.assertEqual(results['img_shape'], (2000, 1200))\n        self.assertEqual(results['scale_factor'], (1200 / 800, 2000 / 1333))\n\n        # test resize_bboxes/seg/masks\n        transform = Resize(scale_factor=(1.5, 2))\n        results = transform(copy.deepcopy(self.data_info1))\n        self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 168,\n                                                            224]])).all())\n        self.assertEqual(results['gt_masks'].height, 2666)\n        self.assertEqual(results['gt_masks'].width, 1200)\n        self.assertEqual(results['gt_seg_map'].shape[:2], (2666, 1200))\n\n        # test clip_object_border = False\n        transform = Resize(scale=(200, 150), clip_object_border=False)\n        results = transform(self.data_info2)\n        self.assertTrue((results['gt_bboxes'] == np.array([100, 75, 300,\n                                                           225])).all())\n\n        # test only with image\n        transform = Resize(scale=(200, 150), clip_object_border=False)\n        results = transform(self.data_info3)\n        self.assertTupleEqual(results['img'].shape[:2], (150, 200))\n\n        # test geometric transformation with homography matrix\n        transform = Resize(scale_factor=(1.5, 2))\n        results = transform(copy.deepcopy(self.data_info1))\n        self.assertTrue((bbox_project(\n            copy.deepcopy(self.data_info1['gt_bboxes']),\n            results['homography_matrix']) == results['gt_bboxes']).all())\n\n    def test_resize_use_box_type(self):\n        data_info1 = copy.deepcopy(self.data_info1)\n        data_info1['gt_bboxes'] = HorizontalBoxes(data_info1['gt_bboxes'])\n        data_info2 = copy.deepcopy(self.data_info2)\n        data_info2['gt_bboxes'] = HorizontalBoxes(data_info2['gt_bboxes'])\n        # test keep_ratio is True\n        transform = Resize(scale=(2000, 2000), keep_ratio=True)\n        results = transform(copy.deepcopy(data_info1))\n        self.assertEqual(results['img_shape'], (2000, 1200))\n        self.assertEqual(results['scale_factor'], (1200 / 800, 2000 / 1333))\n\n        # test resize_bboxes/seg/masks\n        transform = Resize(scale_factor=(1.5, 2))\n        results = transform(copy.deepcopy(data_info1))\n        self.assertTrue(\n            (results['gt_bboxes'].numpy() == np.array([[0, 0, 168,\n                                                        224]])).all())\n        self.assertEqual(results['gt_masks'].height, 2666)\n        self.assertEqual(results['gt_masks'].width, 1200)\n        self.assertEqual(results['gt_seg_map'].shape[:2], (2666, 1200))\n\n        # test clip_object_border = False\n        transform = Resize(scale=(200, 150), clip_object_border=False)\n        results = transform(data_info2)\n        self.assertTrue(\n            (results['gt_bboxes'].numpy() == np.array([100, 75, 300,\n                                                       225])).all())\n\n        # test geometric transformation with homography matrix\n        transform = Resize(scale_factor=(1.5, 2))\n        results = transform(copy.deepcopy(data_info1))\n        self.assertTrue((bbox_project(\n            copy.deepcopy(data_info1['gt_bboxes'].numpy()),\n            results['homography_matrix']) == results['gt_bboxes'].numpy()\n                         ).all())\n\n    def test_repr(self):\n        transform = Resize(scale=(2000, 2000), keep_ratio=True)\n        self.assertEqual(\n            repr(transform), ('Resize(scale=(2000, 2000), '\n                              'scale_factor=None, keep_ratio=True, '\n                              'clip_object_border=True), backend=cv2), '\n                              'interpolation=bilinear)'))\n\n\nclass TestFIXShapeResize(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.data_info1 = dict(\n            img=np.random.random((1333, 800, 3)),\n            gt_seg_map=np.random.random((1333, 800, 3)),\n            gt_bboxes=np.array([[0, 0, 112, 1333]], dtype=np.float32),\n            gt_masks=BitmapMasks(\n                rng.rand(1, 1333, 800), height=1333, width=800))\n        self.data_info2 = dict(\n            img=np.random.random((300, 400, 3)),\n            gt_bboxes=np.array([[200, 150, 600, 450]], dtype=np.float32),\n            dtype=np.float32)\n        self.data_info3 = dict(img=np.random.random((300, 400, 3)))\n        self.data_info4 = dict(\n            img=np.random.random((600, 800, 3)),\n            gt_bboxes=np.array([[200, 150, 300, 400]], dtype=np.float32),\n            dtype=np.float32)\n\n    def test_resize(self):\n        # test keep_ratio is True\n        transform = FixShapeResize(width=2000, height=800, keep_ratio=True)\n        results = transform(copy.deepcopy(self.data_info1))\n        self.assertEqual(results['img_shape'], (800, 2000))\n        self.assertEqual(results['scale_factor'], (800 / 1333, 800 / 1333))\n        # test resize_bboxes/seg/masks\n        transform = FixShapeResize(width=2000, height=800, keep_ratio=False)\n        results = transform(copy.deepcopy(self.data_info1))\n        self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 280,\n                                                            800]])).all())\n        self.assertEqual(results['gt_masks'].height, 800)\n        self.assertEqual(results['gt_masks'].width, 2000)\n        self.assertEqual(results['gt_seg_map'].shape[:2], (800, 2000))\n\n        # test clip_object_border = False\n        transform = FixShapeResize(\n            width=200, height=150, clip_object_border=False)\n        results = transform(copy.deepcopy(self.data_info2))\n        self.assertTrue((results['gt_bboxes'] == np.array([100, 75, 300,\n                                                           225])).all())\n\n        # test only with image\n        transform = FixShapeResize(\n            width=200, height=150, clip_object_border=False)\n        results = transform(self.data_info3)\n        self.assertTupleEqual(results['img'].shape[:2], (150, 200))\n\n        # test geometric transformation with homography matrix\n        transform = FixShapeResize(width=400, height=300)\n        results = transform(copy.deepcopy(self.data_info4))\n        self.assertTrue((bbox_project(\n            copy.deepcopy(self.data_info4['gt_bboxes']),\n            results['homography_matrix']) == results['gt_bboxes']).all())\n\n    def test_resize_with_boxlist(self):\n        data_info1 = copy.deepcopy(self.data_info1)\n        data_info1['gt_bboxes'] = HorizontalBoxes(data_info1['gt_bboxes'])\n        data_info2 = copy.deepcopy(self.data_info2)\n        data_info2['gt_bboxes'] = HorizontalBoxes(data_info2['gt_bboxes'])\n        data_info4 = copy.deepcopy(self.data_info4)\n        data_info4['gt_bboxes'] = HorizontalBoxes(data_info4['gt_bboxes'])\n        # test keep_ratio is True\n        transform = FixShapeResize(width=2000, height=800, keep_ratio=True)\n        results = transform(copy.deepcopy(data_info1))\n        self.assertEqual(results['img_shape'], (800, 2000))\n        self.assertEqual(results['scale_factor'], (800 / 1333, 800 / 1333))\n\n        # test resize_bboxes/seg/masks\n        transform = FixShapeResize(width=2000, height=800, keep_ratio=False)\n        results = transform(copy.deepcopy(data_info1))\n        self.assertTrue(\n            (results['gt_bboxes'].numpy() == np.array([[0, 0, 280,\n                                                        800]])).all())\n        self.assertEqual(results['gt_masks'].height, 800)\n        self.assertEqual(results['gt_masks'].width, 2000)\n        self.assertEqual(results['gt_seg_map'].shape[:2], (800, 2000))\n\n        # test clip_object_border = False\n        transform = FixShapeResize(\n            width=200, height=150, clip_object_border=False)\n        results = transform(copy.deepcopy(data_info2))\n        self.assertTrue(\n            (results['gt_bboxes'].numpy() == np.array([100, 75, 300,\n                                                       225])).all())\n\n        # test only with image\n        transform = FixShapeResize(\n            width=200, height=150, clip_object_border=False)\n        results = transform(self.data_info3)\n        self.assertTupleEqual(results['img'].shape[:2], (150, 200))\n\n        # test geometric transformation with homography matrix\n        transform = FixShapeResize(width=400, height=300)\n        results = transform(copy.deepcopy(data_info4))\n        self.assertTrue((bbox_project(\n            copy.deepcopy(self.data_info4['gt_bboxes']),\n            results['homography_matrix']) == results['gt_bboxes'].numpy()\n                         ).all())\n\n    def test_repr(self):\n        transform = FixShapeResize(width=2000, height=2000, keep_ratio=True)\n        self.assertEqual(\n            repr(transform), ('FixShapeResize(width=2000, height=2000, '\n                              'keep_ratio=True, '\n                              'clip_object_border=True), backend=cv2), '\n                              'interpolation=bilinear)'))\n\n\nclass TestRandomFlip(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.results1 = {\n            'img': np.random.random((224, 224, 3)),\n            'gt_bboxes': np.array([[0, 1, 100, 101]], dtype=np.float32),\n            'gt_masks':\n            BitmapMasks(rng.rand(1, 224, 224), height=224, width=224),\n            'gt_seg_map': np.random.random((224, 224))\n        }\n\n        self.results2 = {'img': self.results1['img']}\n\n    def test_transform(self):\n        # test with image, gt_bboxes, gt_masks, gt_seg_map\n        transform = RandomFlip(1.0)\n        results_update = transform.transform(copy.deepcopy(self.results1))\n        self.assertTrue(\n            (results_update['gt_bboxes'] == np.array([[124, 1, 224,\n                                                       101]])).all())\n        # test only with image\n        transform = RandomFlip(1.0)\n        results_update = transform.transform(copy.deepcopy(self.results2))\n        self.assertTrue(\n            (results_update['img'] == self.results2['img'][:, ::-1]).all())\n\n        # test geometric transformation with homography matrix\n        # (1) Horizontal Flip\n        transform = RandomFlip(1.0)\n        results_update = transform.transform(copy.deepcopy(self.results1))\n        bboxes = copy.deepcopy(self.results1['gt_bboxes'])\n        self.assertTrue((bbox_project(\n            bboxes,\n            results_update['homography_matrix']) == results_update['gt_bboxes']\n                         ).all())\n        # (2) Vertical Flip\n        transform = RandomFlip(1.0, direction='vertical')\n        results_update = transform.transform(copy.deepcopy(self.results1))\n        bboxes = copy.deepcopy(self.results1['gt_bboxes'])\n        self.assertTrue((bbox_project(\n            bboxes,\n            results_update['homography_matrix']) == results_update['gt_bboxes']\n                         ).all())\n        # (3) Diagonal Flip\n        transform = RandomFlip(1.0, direction='diagonal')\n        results_update = transform.transform(copy.deepcopy(self.results1))\n        bboxes = copy.deepcopy(self.results1['gt_bboxes'])\n        self.assertTrue((bbox_project(\n            bboxes,\n            results_update['homography_matrix']) == results_update['gt_bboxes']\n                         ).all())\n\n    def test_transform_use_box_type(self):\n        results1 = copy.deepcopy(self.results1)\n        results1['gt_bboxes'] = HorizontalBoxes(results1['gt_bboxes'])\n        # test with image, gt_bboxes, gt_masks, gt_seg_map\n        transform = RandomFlip(1.0)\n        results_update = transform.transform(copy.deepcopy(results1))\n        self.assertTrue((results_update['gt_bboxes'].numpy() == np.array(\n            [[124, 1, 224, 101]])).all())\n\n        # test geometric transformation with homography matrix\n        # (1) Horizontal Flip\n        transform = RandomFlip(1.0)\n        results_update = transform.transform(copy.deepcopy(results1))\n        bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())\n        self.assertTrue((bbox_project(bboxes,\n                                      results_update['homography_matrix']) ==\n                         results_update['gt_bboxes'].numpy()).all())\n        # (2) Vertical Flip\n        transform = RandomFlip(1.0, direction='vertical')\n        results_update = transform.transform(copy.deepcopy(results1))\n        bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())\n        self.assertTrue((bbox_project(bboxes,\n                                      results_update['homography_matrix']) ==\n                         results_update['gt_bboxes'].numpy()).all())\n        # (3) Diagonal Flip\n        transform = RandomFlip(1.0, direction='diagonal')\n        results_update = transform.transform(copy.deepcopy(results1))\n        bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())\n        self.assertTrue((bbox_project(bboxes,\n                                      results_update['homography_matrix']) ==\n                         results_update['gt_bboxes'].numpy()).all())\n\n    def test_repr(self):\n        transform = RandomFlip(0.1)\n        transform_str = str(transform)\n        self.assertIsInstance(transform_str, str)\n\n\nclass TestPad(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.results = {\n            'img': np.random.random((1333, 800, 3)),\n            'gt_masks':\n            BitmapMasks(rng.rand(4, 1333, 800), height=1333, width=800)\n        }\n\n    def test_transform(self):\n        # test pad img/gt_masks with size\n        transform = Pad(size=(1200, 2000))\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape[:2], (2000, 1200))\n        self.assertEqual(results['gt_masks'].masks.shape[1:], (2000, 1200))\n\n        # test pad img/gt_masks with size_divisor\n        transform = Pad(size_divisor=11)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape[:2], (1342, 803))\n        self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 803))\n\n        # test pad img/gt_masks with pad_to_square\n        transform = Pad(pad_to_square=True)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape[:2], (1333, 1333))\n        self.assertEqual(results['gt_masks'].masks.shape[1:], (1333, 1333))\n\n        # test pad img/gt_masks with pad_to_square and size_divisor\n        transform = Pad(pad_to_square=True, size_divisor=11)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape[:2], (1342, 1342))\n        self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 1342))\n\n        # test pad img/gt_masks with pad_to_square and size_divisor\n        transform = Pad(pad_to_square=True, size_divisor=11)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img'].shape[:2], (1342, 1342))\n        self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 1342))\n\n    def test_repr(self):\n        transform = Pad(\n            pad_to_square=True, size_divisor=11, padding_mode='edge')\n        self.assertEqual(\n            repr(transform),\n            ('Pad(size=None, size_divisor=11, pad_to_square=True, '\n             \"pad_val={'img': 0, 'seg': 255}), padding_mode=edge)\"))\n\n\nclass TestMinIoURandomCrop(unittest.TestCase):\n\n    def test_transform(self):\n        results = dict()\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        results['img'] = img\n        results['img_shape'] = img.shape[:2]\n        gt_bboxes = create_random_bboxes(1, results['img_shape'][1],\n                                         results['img_shape'][0])\n        results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)\n        results['gt_bboxes'] = gt_bboxes\n        transform = MinIoURandomCrop()\n        results = transform.transform(copy.deepcopy(results))\n\n        self.assertEqual(results['gt_labels'].shape[0],\n                         results['gt_bboxes'].shape[0])\n        self.assertEqual(results['gt_labels'].dtype, np.int64)\n        self.assertEqual(results['gt_bboxes'].dtype, np.float32)\n\n        patch = np.array(\n            [0, 0, results['img_shape'][1], results['img_shape'][0]])\n        ious = bbox_overlaps(patch.reshape(-1, 4),\n                             results['gt_bboxes']).reshape(-1)\n        mode = transform.mode\n        if mode == 1:\n            self.assertTrue(np.equal(results['gt_bboxes'], gt_bboxes).all())\n        else:\n            self.assertTrue((ious >= mode).all())\n\n    def test_transform_use_box_type(self):\n        results = dict()\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        results['img'] = img\n        results['img_shape'] = img.shape[:2]\n        gt_bboxes = create_random_bboxes(1, results['img_shape'][1],\n                                         results['img_shape'][0])\n        results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)\n        results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)\n        transform = MinIoURandomCrop()\n        results = transform.transform(copy.deepcopy(results))\n\n        self.assertEqual(results['gt_labels'].shape[0],\n                         results['gt_bboxes'].shape[0])\n        self.assertEqual(results['gt_labels'].dtype, np.int64)\n        self.assertEqual(results['gt_bboxes'].dtype, torch.float32)\n\n        patch = np.array(\n            [0, 0, results['img_shape'][1], results['img_shape'][0]])\n        ious = bbox_overlaps(\n            patch.reshape(-1, 4), results['gt_bboxes'].numpy()).reshape(-1)\n        mode = transform.mode\n        if mode == 1:\n            self.assertTrue((results['gt_bboxes'].numpy() == gt_bboxes).all())\n        else:\n            self.assertTrue((ious >= mode).all())\n\n    def test_repr(self):\n        transform = MinIoURandomCrop()\n        self.assertEqual(\n            repr(transform), ('MinIoURandomCrop'\n                              '(min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), '\n                              'min_crop_size=0.3, '\n                              'bbox_clip_border=True)'))\n\n\nclass TestPhotoMetricDistortion(unittest.TestCase):\n\n    def test_transform(self):\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        transform = PhotoMetricDistortion()\n\n        # test uint8 input\n        results = dict()\n        results['img'] = img\n        results = transform.transform(copy.deepcopy(results))\n        self.assertEqual(results['img'].dtype, np.float32)\n\n        # test float32 input\n        results = dict()\n        results['img'] = img.astype(np.float32)\n        results = transform.transform(copy.deepcopy(results))\n        self.assertEqual(results['img'].dtype, np.float32)\n\n    def test_repr(self):\n        transform = PhotoMetricDistortion()\n        self.assertEqual(\n            repr(transform), ('PhotoMetricDistortion'\n                              '(brightness_delta=32, '\n                              'contrast_range=(0.5, 1.5), '\n                              'saturation_range=(0.5, 1.5), '\n                              'hue_delta=18)'))\n\n\nclass TestExpand(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.results = {\n            'img': np.random.random((224, 224, 3)),\n            'img_shape': (224, 224),\n            'gt_bboxes': np.array([[0, 1, 100, 101]]),\n            'gt_masks':\n            BitmapMasks(rng.rand(1, 224, 224), height=224, width=224),\n            'gt_seg_map': np.random.random((224, 224))\n        }\n\n    def test_transform(self):\n        transform = Expand()\n        results = transform.transform(copy.deepcopy(self.results))\n        self.assertEqual(\n            results['img_shape'],\n            (results['gt_masks'].height, results['gt_masks'].width))\n        self.assertEqual(results['img_shape'], results['gt_seg_map'].shape)\n\n    def test_transform_use_box_type(self):\n        results = copy.deepcopy(self.results)\n        results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])\n        transform = Expand()\n        results = transform.transform(results)\n        self.assertEqual(\n            results['img_shape'],\n            (results['gt_masks'].height, results['gt_masks'].width))\n        self.assertEqual(results['img_shape'], results['gt_seg_map'].shape)\n\n    def test_repr(self):\n        transform = Expand()\n        self.assertEqual(\n            repr(transform), ('Expand'\n                              '(mean=(0, 0, 0), to_rgb=True, '\n                              'ratio_range=(1, 4), '\n                              'seg_ignore_label=None, '\n                              'prob=0.5)'))\n\n\nclass TestSegRescale(unittest.TestCase):\n\n    def setUp(self) -> None:\n        seg_map = np.random.randint(0, 255, size=(32, 32), dtype=np.int32)\n        self.results = {'gt_seg_map': seg_map}\n\n    def test_transform(self):\n        # test scale_factor != 1\n        transform = SegRescale(scale_factor=2)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['gt_seg_map'].shape[:2], (64, 64))\n        # test scale_factor = 1\n        transform = SegRescale(scale_factor=1)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['gt_seg_map'].shape[:2], (32, 32))\n\n    def test_repr(self):\n        transform = SegRescale(scale_factor=2)\n        self.assertEqual(\n            repr(transform), ('SegRescale(scale_factor=2, backend=cv2)'))\n\n\nclass TestRandomCrop(unittest.TestCase):\n\n    def test_init(self):\n        # test invalid crop_type\n        with self.assertRaisesRegex(ValueError, 'Invalid crop_type'):\n            RandomCrop(crop_size=(10, 10), crop_type='unknown')\n\n        crop_type_list = ['absolute', 'absolute_range']\n        for crop_type in crop_type_list:\n            # test h > 0 and w > 0\n            for crop_size in [(0, 0), (0, 1), (1, 0)]:\n                with self.assertRaises(AssertionError):\n                    RandomCrop(crop_size=crop_size, crop_type=crop_type)\n            # test type(h) = int and type(w) = int\n            for crop_size in [(1.0, 1), (1, 1.0), (1.0, 1.0)]:\n                with self.assertRaises(AssertionError):\n                    RandomCrop(crop_size=crop_size, crop_type=crop_type)\n\n        # test crop_size[0] <= crop_size[1]\n        with self.assertRaises(AssertionError):\n            RandomCrop(crop_size=(10, 5), crop_type='absolute_range')\n\n        # test h in (0, 1] and w in (0, 1]\n        crop_type_list = ['relative_range', 'relative']\n        for crop_type in crop_type_list:\n            for crop_size in [(0, 1), (1, 0), (1.1, 0.5), (0.5, 1.1)]:\n                with self.assertRaises(AssertionError):\n                    RandomCrop(crop_size=crop_size, crop_type=crop_type)\n\n    def test_transform(self):\n        # test relative and absolute crop\n        src_results = {\n            'img': np.random.randint(0, 255, size=(24, 32), dtype=np.int32)\n        }\n        target_shape = (12, 16)\n        for crop_type, crop_size in zip(['relative', 'absolute'], [(0.5, 0.5),\n                                                                   (16, 12)]):\n            transform = RandomCrop(crop_size=crop_size, crop_type=crop_type)\n            results = transform(copy.deepcopy(src_results))\n            print(results['img'].shape[:2])\n            self.assertEqual(results['img'].shape[:2], target_shape)\n\n        # test absolute_range crop\n        transform = RandomCrop(crop_size=(10, 20), crop_type='absolute_range')\n        results = transform(copy.deepcopy(src_results))\n        h, w = results['img'].shape\n        self.assertTrue(10 <= w <= 20)\n        self.assertTrue(10 <= h <= 20)\n        # test relative_range crop\n        transform = RandomCrop(\n            crop_size=(0.5, 0.5), crop_type='relative_range')\n        results = transform(copy.deepcopy(src_results))\n        h, w = results['img'].shape\n        self.assertTrue(16 <= w <= 32)\n        self.assertTrue(12 <= h <= 24)\n\n        # test with gt_bboxes, gt_bboxes_labels, gt_ignore_flags,\n        # gt_masks, gt_seg_map\n        img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        gt_bboxes = np.array([[0, 0, 7, 7], [2, 3, 9, 9]], dtype=np.float32)\n        gt_bboxes_labels = np.array([0, 1], dtype=np.int64)\n        gt_ignore_flags = np.array([0, 1], dtype=bool)\n        gt_masks_ = np.zeros((2, 10, 10), np.uint8)\n        gt_masks_[0, 0:7, 0:7] = 1\n        gt_masks_[1, 2:7, 3:8] = 1\n        gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)\n        gt_seg_map = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        src_results = {\n            'img': img,\n            'gt_bboxes': gt_bboxes,\n            'gt_bboxes_labels': gt_bboxes_labels,\n            'gt_ignore_flags': gt_ignore_flags,\n            'gt_masks': gt_masks,\n            'gt_seg_map': gt_seg_map\n        }\n        transform = RandomCrop(\n            crop_size=(7, 5),\n            allow_negative_crop=False,\n            recompute_bbox=False,\n            bbox_clip_border=True)\n        results = transform(copy.deepcopy(src_results))\n        h, w = results['img'].shape\n        self.assertEqual(h, 5)\n        self.assertEqual(w, 7)\n        self.assertEqual(results['gt_bboxes'].shape[0], 2)\n        self.assertEqual(results['gt_bboxes_labels'].shape[0], 2)\n        self.assertEqual(results['gt_ignore_flags'].shape[0], 2)\n        self.assertTupleEqual(results['gt_seg_map'].shape[:2], (5, 7))\n\n        # test geometric transformation with homography matrix\n        bboxes = copy.deepcopy(src_results['gt_bboxes'])\n        self.assertTrue((bbox_project(bboxes, results['homography_matrix'],\n                                      (5, 7)) == results['gt_bboxes']).all())\n\n        # test recompute_bbox = True\n        gt_masks_ = np.zeros((2, 10, 10), np.uint8)\n        gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)\n        gt_bboxes = np.array([[0.1, 0.1, 0.2, 0.2]])\n        src_results = {\n            'img': img,\n            'gt_bboxes': gt_bboxes,\n            'gt_masks': gt_masks\n        }\n        target_gt_bboxes = np.zeros((1, 4), dtype=np.float32)\n        transform = RandomCrop(\n            crop_size=(10, 11),\n            allow_negative_crop=False,\n            recompute_bbox=True,\n            bbox_clip_border=True)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue((results['gt_bboxes'] == target_gt_bboxes).all())\n\n        # test bbox_clip_border = False\n        src_results = {'img': img, 'gt_bboxes': gt_bboxes}\n        transform = RandomCrop(\n            crop_size=(10, 11),\n            allow_negative_crop=False,\n            recompute_bbox=True,\n            bbox_clip_border=False)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(\n            (results['gt_bboxes'] == src_results['gt_bboxes']).all())\n\n        # test the crop does not contain any gt-bbox\n        # allow_negative_crop = False\n        img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n        src_results = {'img': img, 'gt_bboxes': gt_bboxes}\n        transform = RandomCrop(crop_size=(5, 3), allow_negative_crop=False)\n        results = transform(copy.deepcopy(src_results))\n        self.assertIsNone(results)\n\n        # allow_negative_crop = True\n        img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n        src_results = {'img': img, 'gt_bboxes': gt_bboxes}\n        transform = RandomCrop(crop_size=(5, 3), allow_negative_crop=True)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(isinstance(results, dict))\n\n    def test_transform_use_box_type(self):\n        # test with gt_bboxes, gt_bboxes_labels, gt_ignore_flags,\n        # gt_masks, gt_seg_map\n        img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        gt_bboxes = np.array([[0, 0, 7, 7], [2, 3, 9, 9]], dtype=np.float32)\n        gt_bboxes_labels = np.array([0, 1], dtype=np.int64)\n        gt_ignore_flags = np.array([0, 1], dtype=bool)\n        gt_masks_ = np.zeros((2, 10, 10), np.uint8)\n        gt_masks_[0, 0:7, 0:7] = 1\n        gt_masks_[1, 2:7, 3:8] = 1\n        gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)\n        gt_seg_map = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        src_results = {\n            'img': img,\n            'gt_bboxes': HorizontalBoxes(gt_bboxes),\n            'gt_bboxes_labels': gt_bboxes_labels,\n            'gt_ignore_flags': gt_ignore_flags,\n            'gt_masks': gt_masks,\n            'gt_seg_map': gt_seg_map\n        }\n        transform = RandomCrop(\n            crop_size=(7, 5),\n            allow_negative_crop=False,\n            recompute_bbox=False,\n            bbox_clip_border=True)\n        results = transform(copy.deepcopy(src_results))\n        h, w = results['img'].shape\n        self.assertEqual(h, 5)\n        self.assertEqual(w, 7)\n        self.assertEqual(results['gt_bboxes'].shape[0], 2)\n        self.assertEqual(results['gt_bboxes_labels'].shape[0], 2)\n        self.assertEqual(results['gt_ignore_flags'].shape[0], 2)\n        self.assertTupleEqual(results['gt_seg_map'].shape[:2], (5, 7))\n\n        # test geometric transformation with homography matrix\n        bboxes = copy.deepcopy(src_results['gt_bboxes'].numpy())\n        print(bboxes, results['gt_bboxes'])\n        self.assertTrue(\n            (bbox_project(bboxes, results['homography_matrix'],\n                          (5, 7)) == results['gt_bboxes'].numpy()).all())\n\n        # test recompute_bbox = True\n        gt_masks_ = np.zeros((2, 10, 10), np.uint8)\n        gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)\n        gt_bboxes = HorizontalBoxes(np.array([[0.1, 0.1, 0.2, 0.2]]))\n        src_results = {\n            'img': img,\n            'gt_bboxes': gt_bboxes,\n            'gt_masks': gt_masks\n        }\n        target_gt_bboxes = np.zeros((1, 4), dtype=np.float32)\n        transform = RandomCrop(\n            crop_size=(10, 11),\n            allow_negative_crop=False,\n            recompute_bbox=True,\n            bbox_clip_border=True)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(\n            (results['gt_bboxes'].numpy() == target_gt_bboxes).all())\n\n        # test bbox_clip_border = False\n        src_results = {'img': img, 'gt_bboxes': gt_bboxes}\n        transform = RandomCrop(\n            crop_size=(10, 10),\n            allow_negative_crop=False,\n            recompute_bbox=True,\n            bbox_clip_border=False)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(\n            (results['gt_bboxes'].numpy() == src_results['gt_bboxes'].numpy()\n             ).all())\n\n        # test the crop does not contain any gt-bbox\n        # allow_negative_crop = False\n        img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        gt_bboxes = HorizontalBoxes(np.zeros((0, 4), dtype=np.float32))\n        src_results = {'img': img, 'gt_bboxes': gt_bboxes}\n        transform = RandomCrop(crop_size=(5, 2), allow_negative_crop=False)\n        results = transform(copy.deepcopy(src_results))\n        self.assertIsNone(results)\n\n        # allow_negative_crop = True\n        img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)\n        gt_bboxes = HorizontalBoxes(np.zeros((0, 4), dtype=np.float32))\n        src_results = {'img': img, 'gt_bboxes': gt_bboxes}\n        transform = RandomCrop(crop_size=(5, 2), allow_negative_crop=True)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(isinstance(results, dict))\n\n    def test_repr(self):\n        crop_type = 'absolute'\n        crop_size = (10, 5)\n        allow_negative_crop = False\n        recompute_bbox = True\n        bbox_clip_border = False\n        transform = RandomCrop(\n            crop_size=crop_size,\n            crop_type=crop_type,\n            allow_negative_crop=allow_negative_crop,\n            recompute_bbox=recompute_bbox,\n            bbox_clip_border=bbox_clip_border)\n        self.assertEqual(\n            repr(transform),\n            f'RandomCrop(crop_size={crop_size}, crop_type={crop_type}, '\n            f'allow_negative_crop={allow_negative_crop}, '\n            f'recompute_bbox={recompute_bbox}, '\n            f'bbox_clip_border={bbox_clip_border})')\n\n\nclass TestCutOut(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        self.results = {'img': img}\n\n    def test_transform(self):\n        # test n_holes\n        with self.assertRaises(AssertionError):\n            transform = CutOut(n_holes=(5, 3), cutout_shape=(8, 8))\n        with self.assertRaises(AssertionError):\n            transform = CutOut(n_holes=(3, 4, 5), cutout_shape=(8, 8))\n\n        # test cutout_shape and cutout_ratio\n        with self.assertRaises(AssertionError):\n            transform = CutOut(n_holes=1, cutout_shape=8)\n        with self.assertRaises(AssertionError):\n            transform = CutOut(n_holes=1, cutout_ratio=0.2)\n\n        # either of cutout_shape and cutout_ratio should be given\n        with self.assertRaises(AssertionError):\n            transform = CutOut(n_holes=1)\n        with self.assertRaises(AssertionError):\n            transform = CutOut(\n                n_holes=1, cutout_shape=(2, 2), cutout_ratio=(0.4, 0.4))\n\n        transform = CutOut(n_holes=1, cutout_shape=(10, 10))\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].sum() < self.results['img'].sum())\n\n        transform = CutOut(\n            n_holes=(2, 4),\n            cutout_shape=[(10, 10), (15, 15)],\n            fill_in=(255, 255, 255))\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].sum() > self.results['img'].sum())\n\n        transform = CutOut(\n            n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255))\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].sum() > self.results['img'].sum())\n\n    def test_repr(self):\n        transform = CutOut(n_holes=1, cutout_shape=(10, 10))\n        self.assertEqual(\n            repr(transform), ('CutOut(n_holes=(1, 1), '\n                              'cutout_shape=[(10, 10)], '\n                              'fill_in=(0, 0, 0))'))\n        transform = CutOut(\n            n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255))\n        self.assertEqual(\n            repr(transform), ('CutOut(n_holes=(1, 1), '\n                              'cutout_ratio=[(0.8, 0.8)], '\n                              'fill_in=(255, 255, 255))'))\n\n\nclass TestMosaic(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.results = {\n            'img':\n            np.random.random((224, 224, 3)),\n            'img_shape': (224, 224),\n            'gt_bboxes_labels':\n            np.array([1, 2, 3], dtype=np.int64),\n            'gt_bboxes':\n            np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],\n                     dtype=np.float32),\n            'gt_ignore_flags':\n            np.array([0, 0, 1], dtype=bool),\n            'gt_masks':\n            BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),\n        }\n\n    def test_transform(self):\n        # test assertion for invalid img_scale\n        with self.assertRaises(AssertionError):\n            transform = Mosaic(img_scale=640)\n\n        # test assertion for invalid probability\n        with self.assertRaises(AssertionError):\n            transform = Mosaic(prob=1.5)\n\n        transform = Mosaic(img_scale=(12, 10))\n        # test assertion for invalid mix_results\n        with self.assertRaises(AssertionError):\n            results = transform(copy.deepcopy(self.results))\n\n        self.results['mix_results'] = [copy.deepcopy(self.results)] * 3\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].shape[:2] == (20, 24))\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == np.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_transform_with_no_gt(self):\n        self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)\n        self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64)\n        self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool)\n        transform = Mosaic(img_scale=(12, 10))\n        self.results['mix_results'] = [copy.deepcopy(self.results)] * 3\n        results = transform(copy.deepcopy(self.results))\n        self.assertIsInstance(results, dict)\n        self.assertTrue(results['img'].shape[:2] == (20, 24))\n        self.assertTrue(\n            results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].\n            shape[0] == results['gt_ignore_flags'].shape[0] == 0)\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == np.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_transform_use_box_type(self):\n        transform = Mosaic(img_scale=(12, 10))\n        results = copy.deepcopy(self.results)\n        results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])\n        results['mix_results'] = [results] * 3\n        results = transform(results)\n        self.assertTrue(results['img'].shape[:2] == (20, 24))\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == torch.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_repr(self):\n        transform = Mosaic(img_scale=(640, 640), )\n        self.assertEqual(\n            repr(transform), ('Mosaic(img_scale=(640, 640), '\n                              'center_ratio_range=(0.5, 1.5), '\n                              'pad_val=114.0, '\n                              'prob=1.0)'))\n\n\nclass TestMixUp(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        rng = np.random.RandomState(0)\n        self.results = {\n            'img':\n            np.random.random((224, 224, 3)),\n            'img_shape': (224, 224),\n            'gt_bboxes_labels':\n            np.array([1, 2, 3], dtype=np.int64),\n            'gt_bboxes':\n            np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],\n                     dtype=np.float32),\n            'gt_ignore_flags':\n            np.array([0, 0, 1], dtype=bool),\n            'gt_masks':\n            BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),\n        }\n\n    def test_transform(self):\n        # test assertion for invalid img_scale\n        with self.assertRaises(AssertionError):\n            transform = MixUp(img_scale=640)\n\n        transform = MixUp(img_scale=(12, 10))\n        # test assertion for invalid mix_results\n        with self.assertRaises(AssertionError):\n            results = transform(copy.deepcopy(self.results))\n\n        with self.assertRaises(AssertionError):\n            self.results['mix_results'] = [copy.deepcopy(self.results)] * 2\n            results = transform(copy.deepcopy(self.results))\n\n        self.results['mix_results'] = [copy.deepcopy(self.results)]\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].shape[:2] == (224, 224))\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == np.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_transform_use_box_type(self):\n        results = copy.deepcopy(self.results)\n        results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])\n\n        transform = MixUp(img_scale=(12, 10))\n        results['mix_results'] = [results]\n        results = transform(results)\n        self.assertTrue(results['img'].shape[:2] == (224, 224))\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == torch.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_repr(self):\n        transform = MixUp(\n            img_scale=(640, 640),\n            ratio_range=(0.8, 1.6),\n            pad_val=114.0,\n        )\n        self.assertEqual(\n            repr(transform), ('MixUp(dynamic_scale=(640, 640), '\n                              'ratio_range=(0.8, 1.6), '\n                              'flip_ratio=0.5, '\n                              'pad_val=114.0, '\n                              'max_iters=15, '\n                              'bbox_clip_border=True)'))\n\n\nclass TestRandomAffine(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.results = {\n            'img':\n            np.random.random((224, 224, 3)),\n            'img_shape': (224, 224),\n            'gt_bboxes_labels':\n            np.array([1, 2, 3], dtype=np.int64),\n            'gt_bboxes':\n            np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],\n                     dtype=np.float32),\n            'gt_ignore_flags':\n            np.array([0, 0, 1], dtype=bool),\n        }\n\n    def test_transform(self):\n        # test assertion for invalid translate_ratio\n        with self.assertRaises(AssertionError):\n            transform = RandomAffine(max_translate_ratio=1.5)\n\n        # test assertion for invalid scaling_ratio_range\n        with self.assertRaises(AssertionError):\n            transform = RandomAffine(scaling_ratio_range=(1.5, 0.5))\n\n        with self.assertRaises(AssertionError):\n            transform = RandomAffine(scaling_ratio_range=(0, 0.5))\n\n        transform = RandomAffine()\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].shape[:2] == (224, 224))\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == np.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_transform_use_box_type(self):\n        results = copy.deepcopy(self.results)\n        results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])\n\n        transform = RandomAffine()\n        results = transform(copy.deepcopy(results))\n        self.assertTrue(results['img'].shape[:2] == (224, 224))\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == torch.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_repr(self):\n        transform = RandomAffine(\n            scaling_ratio_range=(0.1, 2),\n            border=(-320, -320),\n        )\n        self.assertEqual(\n            repr(transform), ('RandomAffine(max_rotate_degree=10.0, '\n                              'max_translate_ratio=0.1, '\n                              'scaling_ratio_range=(0.1, 2), '\n                              'max_shear_degree=2.0, '\n                              'border=(-320, -320), '\n                              'border_val=(114, 114, 114), '\n                              'bbox_clip_border=True)'))\n\n\nclass TestYOLOXHSVRandomAug(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        self.results = {\n            'img':\n            img,\n            'img_shape': (224, 224),\n            'gt_bboxes_labels':\n            np.array([1, 2, 3], dtype=np.int64),\n            'gt_bboxes':\n            np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],\n                     dtype=np.float32),\n            'gt_ignore_flags':\n            np.array([0, 0, 1], dtype=bool),\n        }\n\n    def test_transform(self):\n        transform = YOLOXHSVRandomAug()\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(\n            results['img'].shape[:2] == self.results['img'].shape[:2])\n        self.assertTrue(results['gt_bboxes_labels'].shape[0] ==\n                        results['gt_bboxes'].shape[0])\n        self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)\n        self.assertTrue(results['gt_bboxes'].dtype == np.float32)\n        self.assertTrue(results['gt_ignore_flags'].dtype == bool)\n\n    def test_repr(self):\n        transform = YOLOXHSVRandomAug()\n        self.assertEqual(\n            repr(transform), ('YOLOXHSVRandomAug(hue_delta=5, '\n                              'saturation_delta=30, '\n                              'value_delta=30)'))\n\n\nclass TestRandomCenterCropPad(unittest.TestCase):\n\n    def test_init(self):\n        # test assertion for invalid crop_size while test_mode=False\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=(-1, 0), test_mode=False, test_pad_mode=None)\n\n        # test assertion for invalid ratios while test_mode=False\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=(511, 511),\n                ratios=(1.0, 1.0),\n                test_mode=False,\n                test_pad_mode=None)\n\n        # test assertion for invalid mean, std and to_rgb\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=(511, 511),\n                mean=None,\n                std=None,\n                to_rgb=None,\n                test_mode=False,\n                test_pad_mode=None)\n\n        # test assertion for invalid crop_size while test_mode=True\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=(511, 511),\n                ratios=None,\n                border=None,\n                mean=[123.675, 116.28, 103.53],\n                std=[58.395, 57.12, 57.375],\n                to_rgb=True,\n                test_mode=True,\n                test_pad_mode=('logical_or', 127))\n\n        # test assertion for invalid ratios while test_mode=True\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=None,\n                ratios=(0.9, 1.0, 1.1),\n                border=None,\n                mean=[123.675, 116.28, 103.53],\n                std=[58.395, 57.12, 57.375],\n                to_rgb=True,\n                test_mode=True,\n                test_pad_mode=('logical_or', 127))\n\n        # test assertion for invalid border while test_mode=True\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=None,\n                ratios=None,\n                border=128,\n                mean=[123.675, 116.28, 103.53],\n                std=[58.395, 57.12, 57.375],\n                to_rgb=True,\n                test_mode=True,\n                test_pad_mode=('logical_or', 127))\n\n        # test assertion for invalid test_pad_mode while test_mode=True\n        with self.assertRaises(AssertionError):\n            RandomCenterCropPad(\n                crop_size=None,\n                ratios=None,\n                border=None,\n                mean=[123.675, 116.28, 103.53],\n                std=[58.395, 57.12, 57.375],\n                to_rgb=True,\n                test_mode=True,\n                test_pad_mode=('do_nothing', 100))\n\n    def test_transform(self):\n        results = dict(\n            img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))\n\n        load = LoadImageFromFile(to_float32=True)\n        results = load(results)\n        test_results = copy.deepcopy(results)\n\n        h, w = results['img_shape']\n        gt_bboxes = create_random_bboxes(4, w, h)\n        gt_bboxes_labels = np.array([1, 2, 3, 1], dtype=np.int64)\n        gt_ignore_flags = np.array([0, 0, 1, 1], dtype=bool)\n        results['gt_bboxes'] = gt_bboxes\n        results['gt_bboxes_labels'] = gt_bboxes_labels\n        results['gt_ignore_flags'] = gt_ignore_flags\n        crop_module = RandomCenterCropPad(\n            crop_size=(w - 20, h - 20),\n            ratios=(1.0, ),\n            border=128,\n            mean=[123.675, 116.28, 103.53],\n            std=[58.395, 57.12, 57.375],\n            to_rgb=True,\n            test_mode=False,\n            test_pad_mode=None)\n        train_results = crop_module(results)\n        assert train_results['img'].shape[:2] == (h - 20, w - 20)\n        # All bboxes should be reserved after crop\n        assert train_results['img_shape'][:2] == (h - 20, w - 20)\n        assert train_results['gt_bboxes'].shape[0] == 4\n        assert train_results['gt_bboxes'].dtype == np.float32\n\n        crop_module = RandomCenterCropPad(\n            crop_size=None,\n            ratios=None,\n            border=None,\n            mean=[123.675, 116.28, 103.53],\n            std=[58.395, 57.12, 57.375],\n            to_rgb=True,\n            test_mode=True,\n            test_pad_mode=('logical_or', 127))\n        test_results = crop_module(test_results)\n        assert test_results['img'].shape[:2] == (h | 127, w | 127)\n        assert test_results['img_shape'][:2] == (h | 127, w | 127)\n        assert 'border' in test_results\n\n    def test_transform_use_box_type(self):\n        results = dict(\n            img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))\n\n        load = LoadImageFromFile(to_float32=True)\n        results = load(results)\n        test_results = copy.deepcopy(results)\n\n        h, w = results['img_shape']\n        gt_bboxes = create_random_bboxes(4, w, h)\n        gt_bboxes_labels = np.array([1, 2, 3, 1], dtype=np.int64)\n        gt_ignore_flags = np.array([0, 0, 1, 1], dtype=bool)\n        results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)\n        results['gt_bboxes_labels'] = gt_bboxes_labels\n        results['gt_ignore_flags'] = gt_ignore_flags\n        crop_module = RandomCenterCropPad(\n            crop_size=(w - 20, h - 20),\n            ratios=(1.0, ),\n            border=128,\n            mean=[123.675, 116.28, 103.53],\n            std=[58.395, 57.12, 57.375],\n            to_rgb=True,\n            test_mode=False,\n            test_pad_mode=None)\n        train_results = crop_module(results)\n        assert train_results['img'].shape[:2] == (h - 20, w - 20)\n        # All bboxes should be reserved after crop\n        assert train_results['img_shape'][:2] == (h - 20, w - 20)\n        assert train_results['gt_bboxes'].shape[0] == 4\n        assert train_results['gt_bboxes'].dtype == torch.float32\n\n        crop_module = RandomCenterCropPad(\n            crop_size=None,\n            ratios=None,\n            border=None,\n            mean=[123.675, 116.28, 103.53],\n            std=[58.395, 57.12, 57.375],\n            to_rgb=True,\n            test_mode=True,\n            test_pad_mode=('logical_or', 127))\n        test_results = crop_module(test_results)\n        assert test_results['img'].shape[:2] == (h | 127, w | 127)\n        assert test_results['img_shape'][:2] == (h | 127, w | 127)\n        assert 'border' in test_results\n\n\nclass TestCopyPaste(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        h, w, _ = img.shape\n        dst_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h],\n                               [0.5 * w, 0.5 * h, 0.6 * w, 0.6 * h]],\n                              dtype=np.float32)\n        src_bboxes = np.array([[0.1 * w, 0.1 * h, 0.3 * w, 0.5 * h],\n                               [0.4 * w, 0.4 * h, 0.7 * w, 0.7 * h],\n                               [0.8 * w, 0.8 * h, 0.9 * w, 0.9 * h]],\n                              dtype=np.float32)\n\n        self.dst_results = {\n            'img': img.copy(),\n            'gt_bboxes': dst_bboxes,\n            'gt_bboxes_labels': np.ones(dst_bboxes.shape[0], dtype=np.int64),\n            'gt_masks': create_full_masks(dst_bboxes, w, h),\n            'gt_ignore_flags': np.array([0, 1], dtype=bool),\n        }\n        self.src_results = {\n            'img': img.copy(),\n            'gt_bboxes': src_bboxes,\n            'gt_bboxes_labels':\n            np.ones(src_bboxes.shape[0], dtype=np.int64) * 2,\n            'gt_masks': create_full_masks(src_bboxes, w, h),\n            'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),\n        }\n\n    def test_transform(self):\n        transform = CopyPaste(selected=False)\n        # test assertion for invalid mix_results\n        with self.assertRaises(AssertionError):\n            results = transform(copy.deepcopy(self.dst_results))\n\n        results = copy.deepcopy(self.dst_results)\n        results['mix_results'] = [copy.deepcopy(self.src_results)]\n        results = transform(results)\n\n        self.assertEqual(results['img'].shape[:2],\n                         self.dst_results['img'].shape[:2])\n\n        # one object of destination image is totally occluded\n        self.assertEqual(\n            results['gt_bboxes'].shape[0],\n            self.dst_results['gt_bboxes'].shape[0] +\n            self.src_results['gt_bboxes'].shape[0] - 1)\n        self.assertEqual(\n            results['gt_bboxes_labels'].shape[0],\n            self.dst_results['gt_bboxes_labels'].shape[0] +\n            self.src_results['gt_bboxes_labels'].shape[0] - 1)\n        self.assertEqual(\n            results['gt_masks'].masks.shape[0],\n            self.dst_results['gt_masks'].masks.shape[0] +\n            self.src_results['gt_masks'].masks.shape[0] - 1)\n        self.assertEqual(\n            results['gt_ignore_flags'].shape[0],\n            self.dst_results['gt_ignore_flags'].shape[0] +\n            self.src_results['gt_ignore_flags'].shape[0] - 1)\n\n        # the object of destination image is partially occluded\n        ori_bbox = self.dst_results['gt_bboxes'][0]\n        occ_bbox = results['gt_bboxes'][0]\n        ori_mask = self.dst_results['gt_masks'].masks[0]\n        occ_mask = results['gt_masks'].masks[0]\n        self.assertTrue(ori_mask.sum() > occ_mask.sum())\n        self.assertTrue(\n            np.all(np.abs(occ_bbox - ori_bbox) <= transform.bbox_occluded_thr)\n            or occ_mask.sum() > transform.mask_occluded_thr)\n\n        # test copypaste with selected objects\n        transform = CopyPaste()\n        results = copy.deepcopy(self.dst_results)\n        results['mix_results'] = [copy.deepcopy(self.src_results)]\n        results = transform(results)\n\n        # test copypaste with an empty source image\n        results = copy.deepcopy(self.dst_results)\n        valid_inds = [False] * self.src_results['gt_bboxes'].shape[0]\n        results['mix_results'] = [{\n            'img':\n            self.src_results['img'].copy(),\n            'gt_bboxes':\n            self.src_results['gt_bboxes'][valid_inds],\n            'gt_bboxes_labels':\n            self.src_results['gt_bboxes_labels'][valid_inds],\n            'gt_masks':\n            self.src_results['gt_masks'][valid_inds],\n            'gt_ignore_flags':\n            self.src_results['gt_ignore_flags'][valid_inds],\n        }]\n        results = transform(results)\n\n    def test_transform_use_box_type(self):\n        src_results = copy.deepcopy(self.src_results)\n        src_results['gt_bboxes'] = HorizontalBoxes(src_results['gt_bboxes'])\n        dst_results = copy.deepcopy(self.dst_results)\n        dst_results['gt_bboxes'] = HorizontalBoxes(dst_results['gt_bboxes'])\n        transform = CopyPaste(selected=False)\n\n        results = copy.deepcopy(dst_results)\n        results['mix_results'] = [copy.deepcopy(src_results)]\n        results = transform(results)\n\n        self.assertEqual(results['img'].shape[:2],\n                         self.dst_results['img'].shape[:2])\n\n        # one object of destination image is totally occluded\n        self.assertEqual(\n            results['gt_bboxes'].shape[0],\n            self.dst_results['gt_bboxes'].shape[0] +\n            self.src_results['gt_bboxes'].shape[0] - 1)\n        self.assertEqual(\n            results['gt_bboxes_labels'].shape[0],\n            self.dst_results['gt_bboxes_labels'].shape[0] +\n            self.src_results['gt_bboxes_labels'].shape[0] - 1)\n        self.assertEqual(\n            results['gt_masks'].masks.shape[0],\n            self.dst_results['gt_masks'].masks.shape[0] +\n            self.src_results['gt_masks'].masks.shape[0] - 1)\n        self.assertEqual(\n            results['gt_ignore_flags'].shape[0],\n            self.dst_results['gt_ignore_flags'].shape[0] +\n            self.src_results['gt_ignore_flags'].shape[0] - 1)\n\n        # the object of destination image is partially occluded\n        ori_bbox = dst_results['gt_bboxes'][0].numpy()\n        occ_bbox = results['gt_bboxes'][0].numpy()\n        ori_mask = dst_results['gt_masks'].masks[0]\n        occ_mask = results['gt_masks'].masks[0]\n        self.assertTrue(ori_mask.sum() > occ_mask.sum())\n        self.assertTrue(\n            np.all(np.abs(occ_bbox - ori_bbox) <= transform.bbox_occluded_thr)\n            or occ_mask.sum() > transform.mask_occluded_thr)\n\n        # test copypaste with selected objects\n        transform = CopyPaste()\n        results = copy.deepcopy(dst_results)\n        results['mix_results'] = [copy.deepcopy(src_results)]\n        results = transform(results)\n\n        # test copypaste with an empty source image\n        results = copy.deepcopy(dst_results)\n        valid_inds = [False] * self.src_results['gt_bboxes'].shape[0]\n        results['mix_results'] = [{\n            'img':\n            src_results['img'].copy(),\n            'gt_bboxes':\n            src_results['gt_bboxes'][valid_inds],\n            'gt_bboxes_labels':\n            src_results['gt_bboxes_labels'][valid_inds],\n            'gt_masks':\n            src_results['gt_masks'][valid_inds],\n            'gt_ignore_flags':\n            src_results['gt_ignore_flags'][valid_inds],\n        }]\n        results = transform(results)\n\n    def test_repr(self):\n        transform = CopyPaste()\n        self.assertEqual(\n            repr(transform), ('CopyPaste(max_num_pasted=100, '\n                              'bbox_occluded_thr=10, '\n                              'mask_occluded_thr=300, '\n                              'selected=True)'))\n\n\nclass TestAlbu(unittest.TestCase):\n\n    @unittest.skipIf(albumentations is None, 'albumentations is not installed')\n    def test_transform(self):\n        results = dict(\n            img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))\n\n        # Define simple pipeline\n        load = dict(type='LoadImageFromFile')\n        load = TRANSFORMS.build(load)\n\n        albu_transform = dict(\n            type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])\n        albu_transform = TRANSFORMS.build(albu_transform)\n\n        # Execute transforms\n        results = load(results)\n        results = albu_transform(results)\n\n        self.assertEqual(results['img'].dtype, np.uint8)\n\n        # test bbox\n        albu_transform = dict(\n            type='Albu',\n            transforms=[dict(type='ChannelShuffle', p=1)],\n            bbox_params=dict(\n                type='BboxParams',\n                format='pascal_voc',\n                label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),\n            keymap={\n                'img': 'image',\n                'gt_bboxes': 'bboxes'\n            })\n        albu_transform = TRANSFORMS.build(albu_transform)\n        results = {\n            'img':\n            np.random.random((224, 224, 3)),\n            'img_shape': (224, 224),\n            'gt_bboxes_labels':\n            np.array([1, 2, 3], dtype=np.int64),\n            'gt_bboxes':\n            np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],\n                     dtype=np.float32),\n            'gt_ignore_flags':\n            np.array([0, 0, 1], dtype=bool),\n        }\n        results = albu_transform(results)\n        self.assertEqual(results['img'].dtype, np.float64)\n        self.assertEqual(results['gt_bboxes'].dtype, np.float32)\n        self.assertEqual(results['gt_ignore_flags'].dtype, bool)\n        self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)\n\n    @unittest.skipIf(albumentations is None, 'albumentations is not installed')\n    def test_repr(self):\n        albu_transform = dict(\n            type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])\n        albu_transform = TRANSFORMS.build(albu_transform)\n\n        self.assertEqual(\n            repr(albu_transform), 'Albu(transforms=['\n            '{\\'type\\': \\'ChannelShuffle\\', '\n            '\\'p\\': 1}])')\n\n\nclass TestCorrupt(unittest.TestCase):\n\n    def test_transform(self):\n        results = dict(\n            img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))\n\n        # Define simple pipeline\n        load = dict(type='LoadImageFromFile')\n        load = TRANSFORMS.build(load)\n\n        corrupt_transform = dict(type='Corrupt', corruption='gaussian_blur')\n        corrupt_transform = TRANSFORMS.build(corrupt_transform)\n\n        # Execute transforms\n        results = load(results)\n        results = corrupt_transform(results)\n\n        self.assertEqual(results['img'].dtype, np.uint8)\n\n    def test_repr(self):\n        corrupt_transform = dict(type='Corrupt', corruption='gaussian_blur')\n        corrupt_transform = TRANSFORMS.build(corrupt_transform)\n\n        self.assertEqual(\n            repr(corrupt_transform), 'Corrupt(corruption=gaussian_blur, '\n            'severity=1)')\n\n\nclass TestRandomShift(unittest.TestCase):\n\n    def test_init(self):\n        # test assertion for invalid shift_ratio\n        with self.assertRaises(AssertionError):\n            RandomShift(prob=1.5)\n\n        # test assertion for invalid max_shift_px\n        with self.assertRaises(AssertionError):\n            RandomShift(max_shift_px=-1)\n\n    def test_transform(self):\n\n        results = dict()\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        results['img'] = img\n        h, w, _ = img.shape\n        gt_bboxes = create_random_bboxes(8, w, h)\n        results['gt_bboxes_labels'] = np.ones(\n            gt_bboxes.shape[0], dtype=np.int64)\n        results['gt_bboxes'] = gt_bboxes\n        transform = RandomShift(prob=1.0)\n        results = transform(results)\n\n        self.assertEqual(results['img'].shape[:2], (h, w))\n        self.assertEqual(results['gt_bboxes_labels'].shape[0],\n                         results['gt_bboxes'].shape[0])\n        self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)\n        self.assertEqual(results['gt_bboxes'].dtype, np.float32)\n\n    def test_transform_use_box_type(self):\n\n        results = dict()\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')\n        results['img'] = img\n        h, w, _ = img.shape\n        gt_bboxes = create_random_bboxes(8, w, h)\n        results['gt_bboxes_labels'] = np.ones(\n            gt_bboxes.shape[0], dtype=np.int64)\n        results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)\n        transform = RandomShift(prob=1.0)\n        results = transform(results)\n\n        self.assertEqual(results['img'].shape[:2], (h, w))\n        self.assertEqual(results['gt_bboxes_labels'].shape[0],\n                         results['gt_bboxes'].shape[0])\n        self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)\n        self.assertEqual(results['gt_bboxes'].dtype, torch.float32)\n\n    def test_repr(self):\n        transform = RandomShift()\n        self.assertEqual(\n            repr(transform), ('RandomShift(prob=0.5, '\n                              'max_shift_px=32, '\n                              'filter_thr_px=1)'))\n\n\nclass TestRandomErasing(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.results = construct_toy_data(poly2mask=True)\n\n    def test_transform(self):\n        transform = RandomErasing(\n            n_patches=(1, 5), ratio=(0.4, 0.8), img_border_value=0)\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].sum() < self.results['img'].sum())\n\n        transform = RandomErasing(\n            n_patches=1, ratio=0.999, img_border_value=255)\n        results = transform(copy.deepcopy(self.results))\n        self.assertTrue(results['img'].sum() > self.results['img'].sum())\n        # test empty results\n        empty_results = copy.deepcopy(self.results)\n        empty_results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32)\n        empty_results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)\n        empty_results['gt_masks'] = empty_results['gt_masks'][False]\n        empty_results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)\n        empty_results['gt_seg_map'] = np.ones_like(\n            empty_results['gt_seg_map']) * 255\n        results = transform(copy.deepcopy(empty_results))\n        self.assertTrue(results['img'].sum() > self.results['img'].sum())\n\n    def test_transform_use_box_type(self):\n        src_results = copy.deepcopy(self.results)\n        src_results['gt_bboxes'] = HorizontalBoxes(src_results['gt_bboxes'])\n\n        transform = RandomErasing(\n            n_patches=(1, 5), ratio=(0.4, 0.8), img_border_value=0)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(results['img'].sum() < src_results['img'].sum())\n\n        transform = RandomErasing(\n            n_patches=1, ratio=0.999, img_border_value=255)\n        results = transform(copy.deepcopy(src_results))\n        self.assertTrue(results['img'].sum() > src_results['img'].sum())\n        # test empty results\n        empty_results = copy.deepcopy(src_results)\n        empty_results['gt_bboxes'] = HorizontalBoxes([], dtype=torch.float32)\n        empty_results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)\n        empty_results['gt_masks'] = empty_results['gt_masks'][False]\n        empty_results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)\n        empty_results['gt_seg_map'] = np.ones_like(\n            empty_results['gt_seg_map']) * 255\n        results = transform(copy.deepcopy(empty_results))\n        self.assertTrue(results['img'].sum() > src_results['img'].sum())\n\n    def test_repr(self):\n        transform = RandomErasing(n_patches=(1, 5), ratio=(0, 0.2))\n        self.assertEqual(\n            repr(transform), ('RandomErasing(n_patches=(1, 5), '\n                              'ratio=(0, 0.2), '\n                              'squared=True, '\n                              'bbox_erased_thr=0.9, '\n                              'img_border_value=128, '\n                              'mask_border_value=0, '\n                              'seg_ignore_label=255)'))\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/test_wrappers.py",
    "content": "import copy\nimport os.path as osp\nimport unittest\n\nfrom mmcv.transforms import Compose\n\nfrom mmdet.datasets.transforms import MultiBranch, RandomOrder\nfrom mmdet.utils import register_all_modules\nfrom .utils import construct_toy_data\n\nregister_all_modules()\n\n\nclass TestMultiBranch(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        data_prefix = osp.join(osp.dirname(__file__), '../../data')\n        img_path = osp.join(data_prefix, 'color.jpg')\n        seg_map = osp.join(data_prefix, 'gray.jpg')\n        self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',\n                          'scale_factor', 'flip', 'flip_direction',\n                          'homography_matrix')\n        self.results = {\n            'img_path':\n            img_path,\n            'img_id':\n            12345,\n            'img_shape': (300, 400),\n            'seg_map_path':\n            seg_map,\n            'instances': [{\n                'bbox': [0, 0, 10, 20],\n                'bbox_label': 1,\n                'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n                'ignore_flag': 0\n            }, {\n                'bbox': [10, 10, 110, 120],\n                'bbox_label': 2,\n                'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n                'ignore_flag': 0\n            }, {\n                'bbox': [50, 50, 60, 80],\n                'bbox_label': 2,\n                'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n                'ignore_flag': 1\n            }]\n        }\n        self.branch_field = ['sup', 'sup_teacher', 'sup_student']\n        self.weak_pipeline = [\n            dict(type='ShearX'),\n            dict(type='PackDetInputs', meta_keys=self.meta_keys)\n        ]\n        self.strong_pipeline = [\n            dict(type='ShearX'),\n            dict(type='ShearY'),\n            dict(type='PackDetInputs', meta_keys=self.meta_keys)\n        ]\n        self.labeled_pipeline = [\n            dict(type='LoadImageFromFile'),\n            dict(\n                type='LoadAnnotations',\n                with_bbox=True,\n                with_mask=True,\n                with_seg=True),\n            dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n            dict(type='RandomFlip', prob=0.5),\n            dict(\n                type='MultiBranch',\n                branch_field=self.branch_field,\n                sup_teacher=self.weak_pipeline,\n                sup_student=self.strong_pipeline),\n        ]\n        self.unlabeled_pipeline = [\n            dict(type='LoadImageFromFile'),\n            dict(type='Resize', scale=(1333, 800), keep_ratio=True),\n            dict(type='RandomFlip', prob=0.5),\n            dict(\n                type='MultiBranch',\n                branch_field=self.branch_field,\n                unsup_teacher=self.weak_pipeline,\n                unsup_student=self.strong_pipeline),\n        ]\n\n    def test_transform(self):\n        labeled_pipeline = Compose(self.labeled_pipeline)\n        labeled_results = labeled_pipeline(copy.deepcopy(self.results))\n        unlabeled_pipeline = Compose(self.unlabeled_pipeline)\n        unlabeled_results = unlabeled_pipeline(copy.deepcopy(self.results))\n\n        # test branch sup_teacher and sup_student\n        sup_branches = ['sup_teacher', 'sup_student']\n        for branch in sup_branches:\n            self.assertIn(branch, labeled_results['data_samples'])\n            self.assertIn('homography_matrix',\n                          labeled_results['data_samples'][branch])\n            self.assertIn('labels',\n                          labeled_results['data_samples'][branch].gt_instances)\n            self.assertIn('bboxes',\n                          labeled_results['data_samples'][branch].gt_instances)\n            self.assertIn('masks',\n                          labeled_results['data_samples'][branch].gt_instances)\n            self.assertIn('gt_sem_seg',\n                          labeled_results['data_samples'][branch])\n        # test branch unsup_teacher and unsup_student\n        unsup_branches = ['unsup_teacher', 'unsup_student']\n        for branch in unsup_branches:\n            self.assertIn(branch, unlabeled_results['data_samples'])\n            self.assertIn('homography_matrix',\n                          unlabeled_results['data_samples'][branch])\n            self.assertNotIn(\n                'labels',\n                unlabeled_results['data_samples'][branch].gt_instances)\n            self.assertNotIn(\n                'bboxes',\n                unlabeled_results['data_samples'][branch].gt_instances)\n            self.assertNotIn(\n                'masks',\n                unlabeled_results['data_samples'][branch].gt_instances)\n            self.assertNotIn('gt_sem_seg',\n                             unlabeled_results['data_samples'][branch])\n\n    def test_repr(self):\n        pipeline = [dict(type='PackDetInputs', meta_keys=())]\n        transform = MultiBranch(\n            branch_field=self.branch_field, sup=pipeline, unsup=pipeline)\n        self.assertEqual(\n            repr(transform),\n            (\"MultiBranch(branch_pipelines=['sup', 'unsup'])\"))\n\n\nclass TestRandomOrder(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.results = construct_toy_data(poly2mask=True)\n        self.pipeline = [\n            dict(type='Sharpness'),\n            dict(type='Contrast'),\n            dict(type='Brightness'),\n            dict(type='Rotate'),\n            dict(type='ShearX'),\n            dict(type='TranslateY')\n        ]\n\n    def test_transform(self):\n        transform = RandomOrder(self.pipeline)\n        results = transform(copy.deepcopy(self.results))\n        self.assertEqual(results['img_shape'], self.results['img_shape'])\n        self.assertEqual(results['gt_bboxes'].shape,\n                         self.results['gt_bboxes'].shape)\n        self.assertEqual(results['gt_bboxes_labels'],\n                         self.results['gt_bboxes_labels'])\n        self.assertEqual(results['gt_ignore_flags'],\n                         self.results['gt_ignore_flags'])\n        self.assertEqual(results['gt_masks'].masks.shape,\n                         self.results['gt_masks'].masks.shape)\n        self.assertEqual(results['gt_seg_map'].shape,\n                         self.results['gt_seg_map'].shape)\n\n    def test_repr(self):\n        transform = RandomOrder(self.pipeline)\n        self.assertEqual(\n            repr(transform), ('RandomOrder(Sharpness, Contrast, '\n                              'Brightness, Rotate, ShearX, TranslateY, )'))\n"
  },
  {
    "path": "tests/test_datasets/test_transforms/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.structures.bbox import BaseBoxes, HorizontalBoxes\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\n\n\ndef create_random_bboxes(num_bboxes, img_w, img_h):\n    bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))\n    bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))\n    bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)\n    bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(\n        np.float32)\n    return bboxes\n\n\ndef create_full_masks(gt_bboxes, img_w, img_h):\n    xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]\n    xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]\n    gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)\n    for i in range(len(gt_bboxes)):\n        gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1\n    gt_masks = BitmapMasks(gt_masks, img_h, img_w)\n    return gt_masks\n\n\ndef construct_toy_data(poly2mask, use_box_type=False):\n    img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],\n                   dtype=np.uint8)\n    img = np.stack([img, img, img], axis=-1)\n    results = dict()\n    results['img'] = img\n    results['img_shape'] = img.shape[:2]\n    if use_box_type:\n        results['gt_bboxes'] = HorizontalBoxes(\n            np.array([[1, 0, 2, 2]], dtype=np.float32))\n    else:\n        results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)\n    results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)\n    if poly2mask:\n        gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],\n                            dtype=np.uint8)[None, :, :]\n        results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)\n    else:\n        raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]\n        results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)\n    results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))\n    results['gt_seg_map'] = np.array(\n        [[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],\n        dtype=np.uint8)\n    return results\n\n\ndef check_result_same(results, pipeline_results, check_keys):\n    \"\"\"Check whether the ``pipeline_results`` is the same with the predefined\n    ``results``.\n\n    Args:\n        results (dict): Predefined results which should be the standard\n            output of the transform pipeline.\n        pipeline_results (dict): Results processed by the transform\n            pipeline.\n        check_keys (tuple): Keys that need to be checked between\n            results and pipeline_results.\n    \"\"\"\n    for key in check_keys:\n        if results.get(key, None) is None:\n            continue\n        if isinstance(results[key], (BitmapMasks, PolygonMasks)):\n            assert_allclose(pipeline_results[key].to_ndarray(),\n                            results[key].to_ndarray())\n        elif isinstance(results[key], BaseBoxes):\n            assert_allclose(pipeline_results[key].tensor, results[key].tensor)\n        else:\n            assert_allclose(pipeline_results[key], results[key])\n"
  },
  {
    "path": "tests/test_datasets/test_tta.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom unittest import TestCase\n\nimport mmcv\nimport pytest\n\nfrom mmdet.datasets.transforms import *  # noqa\nfrom mmdet.registry import TRANSFORMS\n\n\nclass TestMuitiScaleFlipAug(TestCase):\n\n    def test_exception(self):\n        with pytest.raises(TypeError):\n            tta_transform = dict(\n                type='TestTimeAug',\n                transforms=[dict(type='Resize', keep_ratio=False)],\n            )\n            TRANSFORMS.build(tta_transform)\n\n    def test_multi_scale_flip_aug(self):\n        tta_transform = dict(\n            type='TestTimeAug',\n            transforms=[[\n                dict(type='Resize', scale=scale, keep_ratio=False)\n                for scale in [(256, 256), (512, 512), (1024, 1024)]\n            ],\n                        [\n                            dict(\n                                type='mmdet.PackDetInputs',\n                                meta_keys=('img_id', 'img_path', 'ori_shape',\n                                           'img_shape', 'scale_factor'))\n                        ]])\n        tta_module = TRANSFORMS.build(tta_transform)\n\n        results = dict()\n        img = mmcv.imread(\n            osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')\n        results['img_id'] = '1'\n        results['img_path'] = 'data/color.jpg'\n        results['img'] = img\n        results['ori_shape'] = img.shape\n        results['ori_height'] = img.shape[0]\n        results['ori_width'] = img.shape[1]\n        # Set initial values for default meta_keys\n        results['pad_shape'] = img.shape\n        results['scale_factor'] = 1.0\n\n        tta_results = tta_module(results.copy())\n        assert [img.shape\n                for img in tta_results['inputs']] == [(3, 256, 256),\n                                                      (3, 512, 512),\n                                                      (3, 1024, 1024)]\n\n        tta_transform = dict(\n            type='TestTimeAug',\n            transforms=[\n                [\n                    dict(type='Resize', scale=scale, keep_ratio=False)\n                    for scale in [(256, 256), (512, 512), (1024, 1024)]\n                ],\n                [\n                    dict(type='RandomFlip', prob=0., direction='horizontal'),\n                    dict(type='RandomFlip', prob=1., direction='horizontal')\n                ],\n                [\n                    dict(\n                        type='mmdet.PackDetInputs',\n                        meta_keys=('img_id', 'img_path', 'ori_shape',\n                                   'img_shape', 'scale_factor', 'flip',\n                                   'flip_direction'))\n                ]\n            ])\n        tta_module = TRANSFORMS.build(tta_transform)\n        tta_results: dict = tta_module(results.copy())\n        assert [img.shape\n                for img in tta_results['inputs']] == [(3, 256, 256),\n                                                      (3, 256, 256),\n                                                      (3, 512, 512),\n                                                      (3, 512, 512),\n                                                      (3, 1024, 1024),\n                                                      (3, 1024, 1024)]\n        assert [\n            data_sample.metainfo['flip']\n            for data_sample in tta_results['data_samples']\n        ] == [False, True, False, True, False, True]\n\n        tta_transform = dict(\n            type='TestTimeAug',\n            transforms=[[\n                dict(type='Resize', scale=(512, 512), keep_ratio=False)\n            ],\n                        [\n                            dict(\n                                type='mmdet.PackDetInputs',\n                                meta_keys=('img_id', 'img_path', 'ori_shape',\n                                           'img_shape', 'scale_factor'))\n                        ]])\n        tta_module = TRANSFORMS.build(tta_transform)\n        tta_results = tta_module(results.copy())\n        assert [tta_results['inputs'][0].shape] == [(3, 512, 512)]\n\n        tta_transform = dict(\n            type='TestTimeAug',\n            transforms=[\n                [dict(type='Resize', scale=(512, 512), keep_ratio=False)],\n                [\n                    dict(type='RandomFlip', prob=0., direction='horizontal'),\n                    dict(type='RandomFlip', prob=1., direction='horizontal')\n                ],\n                [\n                    dict(\n                        type='mmdet.PackDetInputs',\n                        meta_keys=('img_id', 'img_path', 'ori_shape',\n                                   'img_shape', 'scale_factor', 'flip',\n                                   'flip_direction'))\n                ]\n            ])\n        tta_module = TRANSFORMS.build(tta_transform)\n        tta_results = tta_module(results.copy())\n        assert [img.shape for img in tta_results['inputs']] == [(3, 512, 512),\n                                                                (3, 512, 512)]\n        assert [\n            data_sample.metainfo['flip']\n            for data_sample in tta_results['data_samples']\n        ] == [False, True]\n\n        tta_transform = dict(\n            type='TestTimeAug',\n            transforms=[[\n                dict(type='Resize', scale_factor=r, keep_ratio=False)\n                for r in [0.5, 1.0, 2.0]\n            ],\n                        [\n                            dict(\n                                type='mmdet.PackDetInputs',\n                                meta_keys=('img_id', 'img_path', 'ori_shape',\n                                           'img_shape', 'scale_factor'))\n                        ]])\n        tta_module = TRANSFORMS.build(tta_transform)\n        tta_results = tta_module(results.copy())\n        assert [img.shape for img in tta_results['inputs']] == [(3, 144, 256),\n                                                                (3, 288, 512),\n                                                                (3, 576, 1024)]\n\n        tta_transform = dict(\n            type='TestTimeAug',\n            transforms=[\n                [\n                    dict(type='Resize', scale_factor=r, keep_ratio=True)\n                    for r in [0.5, 1.0, 2.0]\n                ],\n                [\n                    dict(type='RandomFlip', prob=0., direction='horizontal'),\n                    dict(type='RandomFlip', prob=1., direction='horizontal')\n                ],\n                [\n                    dict(\n                        type='mmdet.PackDetInputs',\n                        meta_keys=('img_id', 'img_path', 'ori_shape',\n                                   'img_shape', 'scale_factor', 'flip',\n                                   'flip_direction'))\n                ]\n            ])\n        tta_module = TRANSFORMS.build(tta_transform)\n        tta_results = tta_module(results.copy())\n        assert [img.shape for img in tta_results['inputs']] == [(3, 144, 256),\n                                                                (3, 144, 256),\n                                                                (3, 288, 512),\n                                                                (3, 288, 512),\n                                                                (3, 576, 1024),\n                                                                (3, 576, 1024)]\n        assert [\n            data_sample.metainfo['flip']\n            for data_sample in tta_results['data_samples']\n        ] == [False, True, False, True, False, True]\n"
  },
  {
    "path": "tests/test_engine/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_engine/test_hooks/test_checkloss_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nimport torch\n\nfrom mmdet.engine.hooks import CheckInvalidLossHook\n\n\nclass TestCheckInvalidLossHook(TestCase):\n\n    def test_after_train_iter(self):\n        n = 50\n        hook = CheckInvalidLossHook(n)\n        runner = Mock()\n        runner.logger = Mock()\n        runner.logger.info = Mock()\n\n        # Test `after_train_iter` function within the n iteration.\n        runner.iter = 10\n        outputs = dict(loss=torch.LongTensor([2]))\n        hook.after_train_iter(runner, 10, outputs=outputs)\n        outputs = dict(loss=torch.tensor(float('nan')))\n        hook.after_train_iter(runner, 10, outputs=outputs)\n        outputs = dict(loss=torch.tensor(float('inf')))\n        hook.after_train_iter(runner, 10, outputs=outputs)\n\n        # Test `after_train_iter` at the n iteration.\n        runner.iter = n - 1\n        outputs = dict(loss=torch.LongTensor([2]))\n        hook.after_train_iter(runner, n - 1, outputs=outputs)\n        outputs = dict(loss=torch.tensor(float('nan')))\n        with self.assertRaises(AssertionError):\n            hook.after_train_iter(runner, n - 1, outputs=outputs)\n        outputs = dict(loss=torch.tensor(float('inf')))\n        with self.assertRaises(AssertionError):\n            hook.after_train_iter(runner, n - 1, outputs=outputs)\n"
  },
  {
    "path": "tests/test_engine/test_hooks/test_mean_teacher_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport tempfile\nfrom unittest import TestCase\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.evaluator import BaseMetric\nfrom mmengine.model import BaseModel\nfrom mmengine.optim import OptimWrapper\nfrom mmengine.registry import MODEL_WRAPPERS\nfrom mmengine.runner import Runner\nfrom torch.utils.data import Dataset\n\nfrom mmdet.registry import DATASETS\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\nclass ToyModel(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n        self.linear = nn.Linear(2, 1)\n\n    def forward(self, inputs, data_samples, mode='tensor'):\n        labels = torch.stack(data_samples)\n        inputs = torch.stack(inputs)\n        outputs = self.linear(inputs)\n        if mode == 'tensor':\n            return outputs\n        elif mode == 'loss':\n            loss = (labels - outputs).sum()\n            outputs = dict(loss=loss)\n            return outputs\n        else:\n            return outputs\n\n\nclass ToyModel1(BaseModel, ToyModel):\n\n    def __init__(self):\n        super().__init__()\n\n    def forward(self, *args, **kwargs):\n        return super(BaseModel, self).forward(*args, **kwargs)\n\n\nclass ToyModel2(BaseModel):\n\n    def __init__(self):\n        super().__init__()\n        self.teacher = ToyModel1()\n        self.student = ToyModel1()\n\n    def forward(self, *args, **kwargs):\n        return self.student(*args, **kwargs)\n\n\n@DATASETS.register_module(force=True)\nclass DummyDataset(Dataset):\n    METAINFO = dict()  # type: ignore\n    data = torch.randn(12, 2)\n    label = torch.ones(12)\n\n    @property\n    def metainfo(self):\n        return self.METAINFO\n\n    def __len__(self):\n        return self.data.size(0)\n\n    def __getitem__(self, index):\n        return dict(inputs=self.data[index], data_samples=self.label[index])\n\n\nclass ToyMetric1(BaseMetric):\n\n    def __init__(self, collect_device='cpu', dummy_metrics=None):\n        super().__init__(collect_device=collect_device)\n        self.dummy_metrics = dummy_metrics\n\n    def process(self, data_batch, predictions):\n        result = {'acc': 1}\n        self.results.append(result)\n\n    def compute_metrics(self, results):\n        return dict(acc=1)\n\n\nclass TestMeanTeacherHook(TestCase):\n\n    def setUp(self):\n        self.temp_dir = tempfile.TemporaryDirectory()\n\n    def tearDown(self):\n        self.temp_dir.cleanup()\n\n    def test_mean_teacher_hook(self):\n        device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n        model = ToyModel2().to(device)\n        runner = Runner(\n            model=model,\n            train_dataloader=dict(\n                dataset=DummyDataset(),\n                sampler=dict(type='DefaultSampler', shuffle=True),\n                batch_size=3,\n                num_workers=0),\n            val_dataloader=dict(\n                dataset=DummyDataset(),\n                sampler=dict(type='DefaultSampler', shuffle=False),\n                batch_size=3,\n                num_workers=0),\n            val_evaluator=[ToyMetric1()],\n            work_dir=self.temp_dir.name,\n            default_scope='mmdet',\n            optim_wrapper=OptimWrapper(\n                torch.optim.Adam(ToyModel().parameters())),\n            train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),\n            val_cfg=dict(),\n            default_hooks=dict(logger=None),\n            custom_hooks=[dict(type='MeanTeacherHook')],\n            experiment_name='test1')\n        runner.train()\n\n        self.assertTrue(\n            osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))\n        # checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))\n\n        # load and testing\n        runner = Runner(\n            model=model,\n            test_dataloader=dict(\n                dataset=DummyDataset(),\n                sampler=dict(type='DefaultSampler', shuffle=True),\n                batch_size=3,\n                num_workers=0),\n            test_evaluator=[ToyMetric1()],\n            test_cfg=dict(),\n            work_dir=self.temp_dir.name,\n            default_scope='mmdet',\n            load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),\n            default_hooks=dict(logger=None),\n            custom_hooks=[dict(type='MeanTeacherHook')],\n            experiment_name='test2')\n        runner.test()\n\n        @MODEL_WRAPPERS.register_module()\n        class DummyWrapper(BaseModel):\n\n            def __init__(self, model):\n                super().__init__()\n                self.module = model\n\n            def forward(self, *args, **kwargs):\n                return self.module(*args, **kwargs)\n\n        # with model wrapper\n        runner = Runner(\n            model=DummyWrapper(ToyModel2()),\n            test_dataloader=dict(\n                dataset=DummyDataset(),\n                sampler=dict(type='DefaultSampler', shuffle=True),\n                batch_size=3,\n                num_workers=0),\n            test_evaluator=[ToyMetric1()],\n            test_cfg=dict(),\n            work_dir=self.temp_dir.name,\n            default_scope='mmdet',\n            load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),\n            default_hooks=dict(logger=None),\n            custom_hooks=[dict(type='MeanTeacherHook')],\n            experiment_name='test3')\n        runner.test()\n"
  },
  {
    "path": "tests/test_engine/test_hooks/test_memory_profiler_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nfrom mmdet.engine.hooks import MemoryProfilerHook\n\n\nclass TestMemoryProfilerHook(TestCase):\n\n    def test_after_train_iter(self):\n        hook = MemoryProfilerHook(2)\n        runner = Mock()\n        runner.logger = Mock()\n        runner.logger.info = Mock()\n        hook.after_train_iter(runner, 0)\n        runner.logger.info.assert_not_called()\n        hook.after_train_iter(runner, 1)\n        runner.logger.info.assert_called_once()\n\n    def test_after_val_iter(self):\n        hook = MemoryProfilerHook(2)\n        runner = Mock()\n        runner.logger = Mock()\n        runner.logger.info = Mock()\n        hook.after_val_iter(runner, 0)\n        runner.logger.info.assert_not_called()\n        hook.after_val_iter(runner, 1)\n        runner.logger.info.assert_called_once()\n\n    def test_after_test_iter(self):\n        hook = MemoryProfilerHook(2)\n        runner = Mock()\n        runner.logger = Mock()\n        runner.logger.info = Mock()\n        hook.after_test_iter(runner, 0)\n        runner.logger.info.assert_not_called()\n        hook.after_test_iter(runner, 1)\n        runner.logger.info.assert_called_once()\n"
  },
  {
    "path": "tests/test_engine/test_hooks/test_num_class_check_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom copy import deepcopy\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nfrom mmcv.cnn import VGG\nfrom mmengine.dataset import BaseDataset\nfrom torch import nn\n\nfrom mmdet.engine.hooks import NumClassCheckHook\nfrom mmdet.models.roi_heads.mask_heads import FusedSemanticHead\n\n\nclass TestNumClassCheckHook(TestCase):\n\n    def setUp(self):\n        # Setup NumClassCheckHook\n        hook = NumClassCheckHook()\n        self.hook = hook\n\n        # Setup runner mock\n        runner = Mock()\n        runner.model = Mock()\n        runner.logger = Mock()\n        runner.logger.warning = Mock()\n        runner.train_dataloader = Mock()\n        runner.val_dataloader = Mock()\n        self.runner = runner\n\n        # Setup dataset\n        metainfo = dict(classes=None)\n        self.none_classmeta_dataset = BaseDataset(\n            metainfo=metainfo, lazy_init=True)\n        metainfo = dict(classes='class_name')\n        self.str_classmeta_dataset = BaseDataset(\n            metainfo=metainfo, lazy_init=True)\n        metainfo = dict(classes=('bus', 'car'))\n        self.normal_classmeta_dataset = BaseDataset(\n            metainfo=metainfo, lazy_init=True)\n\n        # Setup valid model\n        valid_model = nn.Module()\n        valid_model.add_module('backbone', VGG(depth=11))\n        fused_semantic_head = FusedSemanticHead(\n            num_ins=1,\n            fusion_level=0,\n            num_convs=1,\n            in_channels=1,\n            conv_out_channels=1)\n        valid_model.add_module('semantic_head', fused_semantic_head)\n        rpn_head = nn.Module()\n        rpn_head.num_classes = 1\n        valid_model.add_module('rpn_head', rpn_head)\n        bbox_head = nn.Module()\n        bbox_head.num_classes = 2\n        valid_model.add_module('bbox_head', bbox_head)\n        self.valid_model = valid_model\n\n        # Setup invalid model\n        invalid_model = nn.Module()\n        bbox_head = nn.Module()\n        bbox_head.num_classes = 4\n        invalid_model.add_module('bbox_head', bbox_head)\n        self.invalid_model = invalid_model\n\n    def test_before_train_epch(self):\n        runner = deepcopy(self.runner)\n\n        # Test when dataset.metainfo['classes'] is None\n        runner.train_dataloader.dataset = self.none_classmeta_dataset\n        self.hook.before_train_epoch(runner)\n        runner.logger.warning.assert_called_once()\n        # Test when dataset.metainfo['classes'] is a str\n        runner.train_dataloader.dataset = self.str_classmeta_dataset\n        with self.assertRaises(AssertionError):\n            self.hook.before_train_epoch(runner)\n\n        runner.train_dataloader.dataset = self.normal_classmeta_dataset\n        # Test `num_classes` of model is compatible with dataset\n        runner.model = self.valid_model\n        self.hook.before_train_epoch(runner)\n        # Test `num_classes` of model is not compatible with dataset\n        runner.model = self.invalid_model\n        with self.assertRaises(AssertionError):\n            self.hook.before_train_epoch(runner)\n\n    def test_before_val_epoch(self):\n        runner = deepcopy(self.runner)\n\n        # Test when dataset.metainfo['classes'] is None\n        runner.val_dataloader.dataset = self.none_classmeta_dataset\n        self.hook.before_val_epoch(runner)\n        runner.logger.warning.assert_called_once()\n        # Test when dataset.metainfo['classes'] is a str\n        runner.val_dataloader.dataset = self.str_classmeta_dataset\n        with self.assertRaises(AssertionError):\n            self.hook.before_val_epoch(runner)\n\n        runner.val_dataloader.dataset = self.normal_classmeta_dataset\n        # Test `num_classes` of model is compatible with dataset\n        runner.model = self.valid_model\n        self.hook.before_val_epoch(runner)\n        # Test `num_classes` of model is not compatible with dataset\n        runner.model = self.invalid_model\n        with self.assertRaises(AssertionError):\n            self.hook.before_val_epoch(runner)\n"
  },
  {
    "path": "tests/test_engine/test_hooks/test_sync_norm_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nimport torch.nn as nn\n\nfrom mmdet.engine.hooks import SyncNormHook\n\n\nclass TestSyncNormHook(TestCase):\n\n    @patch(\n        'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 1))\n    def test_before_val_epoch_non_dist(self, mock):\n        model = nn.Sequential(\n            nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),\n            nn.Linear(5, 10))\n        runner = Mock()\n        runner.model = model\n        hook = SyncNormHook()\n        hook.before_val_epoch(runner)\n\n    @patch(\n        'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 2))\n    def test_before_val_epoch_dist(self, mock):\n        model = nn.Sequential(\n            nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),\n            nn.Linear(5, 10))\n        runner = Mock()\n        runner.model = model\n        hook = SyncNormHook()\n        hook.before_val_epoch(runner)\n\n    @patch(\n        'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 2))\n    def test_before_val_epoch_dist_no_norm(self, mock):\n        model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))\n        runner = Mock()\n        runner.model = model\n        hook = SyncNormHook()\n        hook.before_val_epoch(runner)\n"
  },
  {
    "path": "tests/test_engine/test_hooks/test_visualization_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport shutil\nimport time\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.engine.hooks import DetVisualizationHook\nfrom mmdet.structures import DetDataSample\nfrom mmdet.visualization import DetLocalVisualizer\n\n\ndef _rand_bboxes(num_boxes, h, w):\n    cx, cy, bw, bh = torch.rand(num_boxes, 4).T\n\n    tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)\n    tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)\n    br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)\n    br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)\n\n    bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T\n    return bboxes\n\n\nclass TestVisualizationHook(TestCase):\n\n    def setUp(self) -> None:\n        DetLocalVisualizer.get_instance('current_visualizer')\n\n        pred_instances = InstanceData()\n        pred_instances.bboxes = _rand_bboxes(5, 10, 12)\n        pred_instances.labels = torch.randint(0, 2, (5, ))\n        pred_instances.scores = torch.rand((5, ))\n        pred_det_data_sample = DetDataSample()\n        pred_det_data_sample.set_metainfo({\n            'img_path':\n            osp.join(osp.dirname(__file__), '../../data/color.jpg')\n        })\n        pred_det_data_sample.pred_instances = pred_instances\n        self.outputs = [pred_det_data_sample] * 2\n\n    def test_after_val_iter(self):\n        runner = Mock()\n        runner.iter = 1\n        hook = DetVisualizationHook()\n        hook.after_val_iter(runner, 1, {}, self.outputs)\n\n    def test_after_test_iter(self):\n        runner = Mock()\n        runner.iter = 1\n        hook = DetVisualizationHook(draw=True)\n        hook.after_test_iter(runner, 1, {}, self.outputs)\n        self.assertEqual(hook._test_index, 2)\n\n        # test\n        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n        test_out_dir = timestamp + '1'\n        runner.work_dir = timestamp\n        runner.timestamp = '1'\n        hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)\n        hook.after_test_iter(runner, 1, {}, self.outputs)\n        self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))\n\n        hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)\n        hook.after_test_iter(runner, 1, {}, self.outputs)\n        self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))\n        shutil.rmtree(f'{timestamp}')\n"
  },
  {
    "path": "tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom mmdet.engine.hooks import YOLOXModeSwitchHook\n\n\nclass TestYOLOXModeSwitchHook(TestCase):\n\n    @patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')\n    def test_is_model_wrapper_and_persistent_workers_on(\n            self, mock_is_model_wrapper):\n        mock_is_model_wrapper.return_value = True\n        runner = Mock()\n        runner.model = Mock()\n        runner.model.module = Mock()\n        runner.model.module.bbox_head.use_l1 = False\n        runner.train_dataloader = Mock()\n        runner.train_dataloader.persistent_workers = True\n        runner.train_dataloader._DataLoader__initialized = True\n        runner.epoch = 284\n        runner.max_epochs = 300\n\n        hook = YOLOXModeSwitchHook(num_last_epochs=15)\n        hook.before_train_epoch(runner)\n        self.assertTrue(hook._restart_dataloader)\n        self.assertTrue(runner.model.module.bbox_head.use_l1)\n        self.assertFalse(runner.train_dataloader._DataLoader__initialized)\n\n        runner.epoch = 285\n        hook.before_train_epoch(runner)\n        self.assertTrue(runner.train_dataloader._DataLoader__initialized)\n\n    def test_not_model_wrapper_and_persistent_workers_off(self):\n        runner = Mock()\n        runner.model = Mock()\n        runner.model.bbox_head.use_l1 = False\n        runner.train_dataloader = Mock()\n        runner.train_dataloader.persistent_workers = False\n        runner.train_dataloader._DataLoader__initialized = True\n        runner.epoch = 284\n        runner.max_epochs = 300\n\n        hook = YOLOXModeSwitchHook(num_last_epochs=15)\n        hook.before_train_epoch(runner)\n        self.assertFalse(hook._restart_dataloader)\n        self.assertTrue(runner.model.bbox_head.use_l1)\n        self.assertTrue(runner.train_dataloader._DataLoader__initialized)\n\n        runner.epoch = 285\n        hook.before_train_epoch(runner)\n        self.assertFalse(hook._restart_dataloader)\n        self.assertTrue(runner.train_dataloader._DataLoader__initialized)\n"
  },
  {
    "path": "tests/test_engine/test_optimizers/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_engine/test_optimizers/test_layer_decay_optimizer_constructor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.engine import LearningRateDecayOptimizerConstructor\n\nbase_lr = 1\ndecay_rate = 2\nbase_wd = 0.05\nweight_decay = 0.05\n\nexpected_stage_wise_lr_wd_convnext = [{\n    'weight_decay': 0.0,\n    'lr_scale': 128\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 1\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 64\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 64\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 32\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 32\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 16\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 16\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 8\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 8\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 128\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 1\n}]\n\nexpected_layer_wise_lr_wd_convnext = [{\n    'weight_decay': 0.0,\n    'lr_scale': 128\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 1\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 64\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 64\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 32\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 32\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 16\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 16\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 2\n}, {\n    'weight_decay': 0.0,\n    'lr_scale': 2\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 128\n}, {\n    'weight_decay': 0.05,\n    'lr_scale': 1\n}]\n\n\nclass ToyConvNeXt(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n        self.stages = nn.ModuleList()\n        for i in range(4):\n            stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True))\n            self.stages.append(stage)\n        self.norm0 = nn.BatchNorm2d(2)\n\n        # add some variables to meet unit test coverate rate\n        self.cls_token = nn.Parameter(torch.ones(1))\n        self.mask_token = nn.Parameter(torch.ones(1))\n        self.pos_embed = nn.Parameter(torch.ones(1))\n        self.stem_norm = nn.Parameter(torch.ones(1))\n        self.downsample_norm0 = nn.BatchNorm2d(2)\n        self.downsample_norm1 = nn.BatchNorm2d(2)\n        self.downsample_norm2 = nn.BatchNorm2d(2)\n        self.lin = nn.Parameter(torch.ones(1))\n        self.lin.requires_grad = False\n        self.downsample_layers = nn.ModuleList()\n        for _ in range(4):\n            stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True))\n            self.downsample_layers.append(stage)\n\n\nclass ToyDetector(nn.Module):\n\n    def __init__(self, backbone):\n        super().__init__()\n        self.backbone = backbone\n        self.head = nn.Conv2d(2, 2, kernel_size=1, groups=2)\n\n\nclass PseudoDataParallel(nn.Module):\n\n    def __init__(self, model):\n        super().__init__()\n        self.module = model\n\n\ndef check_optimizer_lr_wd(optimizer, gt_lr_wd):\n    assert isinstance(optimizer, torch.optim.AdamW)\n    assert optimizer.defaults['lr'] == base_lr\n    assert optimizer.defaults['weight_decay'] == base_wd\n    param_groups = optimizer.param_groups\n    print(param_groups)\n    assert len(param_groups) == len(gt_lr_wd)\n    for i, param_dict in enumerate(param_groups):\n        assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay']\n        assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale']\n        assert param_dict['lr_scale'] == param_dict['lr']\n\n\ndef test_learning_rate_decay_optimizer_constructor():\n\n    # Test lr wd for ConvNeXT\n    backbone = ToyConvNeXt()\n    model = PseudoDataParallel(ToyDetector(backbone))\n    optim_wrapper_cfg = dict(\n        type='OptimWrapper',\n        optimizer=dict(\n            type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05))\n    # stagewise decay\n    stagewise_paramwise_cfg = dict(\n        decay_rate=decay_rate, decay_type='stage_wise', num_layers=6)\n    optim_constructor = LearningRateDecayOptimizerConstructor(\n        optim_wrapper_cfg, stagewise_paramwise_cfg)\n    optim_wrapper = optim_constructor(model)\n    check_optimizer_lr_wd(optim_wrapper.optimizer,\n                          expected_stage_wise_lr_wd_convnext)\n    # layerwise decay\n    layerwise_paramwise_cfg = dict(\n        decay_rate=decay_rate, decay_type='layer_wise', num_layers=6)\n    optim_constructor = LearningRateDecayOptimizerConstructor(\n        optim_wrapper_cfg, layerwise_paramwise_cfg)\n    optim_wrapper = optim_constructor(model)\n    check_optimizer_lr_wd(optim_wrapper.optimizer,\n                          expected_layer_wise_lr_wd_convnext)\n"
  },
  {
    "path": "tests/test_engine/test_runner/test_loops.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport tempfile\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.evaluator import Evaluator\nfrom mmengine.model import BaseModel\nfrom mmengine.optim import OptimWrapper\nfrom mmengine.runner import Runner\nfrom torch.utils.data import Dataset\n\nfrom mmdet.registry import DATASETS\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\nclass ToyModel(nn.Module):\n\n    def __init__(self):\n        super().__init__()\n        self.linear = nn.Linear(2, 1)\n\n    def forward(self, inputs, data_samples, mode='tensor'):\n        labels = torch.stack(data_samples)\n        inputs = torch.stack(inputs)\n        outputs = self.linear(inputs)\n        if mode == 'tensor':\n            return outputs\n        elif mode == 'loss':\n            loss = (labels - outputs).sum()\n            outputs = dict(loss=loss)\n            return outputs\n        else:\n            return outputs\n\n\nclass ToyModel1(BaseModel, ToyModel):\n\n    def __init__(self):\n        super().__init__()\n\n    def forward(self, *args, **kwargs):\n        return super(BaseModel, self).forward(*args, **kwargs)\n\n\nclass ToyModel2(BaseModel):\n\n    def __init__(self):\n        super().__init__()\n        self.teacher = ToyModel1()\n        self.student = ToyModel1()\n        self.semi_test_cfg = dict(predict_on='teacher')\n\n    def forward(self, *args, **kwargs):\n        return self.student(*args, **kwargs)\n\n\n@DATASETS.register_module(force=True)\nclass DummyDataset(Dataset):\n    METAINFO = dict()  # type: ignore\n    data = torch.randn(12, 2)\n    label = torch.ones(12)\n\n    @property\n    def metainfo(self):\n        return self.METAINFO\n\n    def __len__(self):\n        return self.data.size(0)\n\n    def __getitem__(self, index):\n        return dict(inputs=self.data[index], data_samples=self.label[index])\n\n\nclass TestTeacherStudentValLoop(TestCase):\n\n    def setUp(self):\n        self.temp_dir = tempfile.TemporaryDirectory()\n\n    def tearDown(self):\n        self.temp_dir.cleanup()\n\n    def test_teacher_student_val_loop(self):\n        device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n        model = ToyModel2().to(device)\n        evaluator = Mock()\n        evaluator.evaluate = Mock(return_value=dict(acc=0.5))\n        evaluator.__class__ = Evaluator\n        runner = Runner(\n            model=model,\n            train_dataloader=dict(\n                dataset=dict(type='DummyDataset'),\n                sampler=dict(type='DefaultSampler', shuffle=True),\n                batch_size=3,\n                num_workers=0),\n            val_dataloader=dict(\n                dataset=dict(type='DummyDataset'),\n                sampler=dict(type='DefaultSampler', shuffle=False),\n                batch_size=3,\n                num_workers=0),\n            val_evaluator=evaluator,\n            work_dir=self.temp_dir.name,\n            default_scope='mmdet',\n            optim_wrapper=OptimWrapper(\n                torch.optim.Adam(ToyModel().parameters())),\n            train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),\n            val_cfg=dict(type='TeacherStudentValLoop'),\n            default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),\n            experiment_name='test1')\n        runner.train()\n"
  },
  {
    "path": "tests/test_engine/test_schedulers/test_quadratic_warmup.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom mmengine.optim.scheduler import _ParamScheduler\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.engine.schedulers import (QuadraticWarmupLR,\n                                     QuadraticWarmupMomentum,\n                                     QuadraticWarmupParamScheduler)\n\n\nclass ToyModel(torch.nn.Module):\n\n    def __init__(self):\n        super().__init__()\n        self.conv1 = torch.nn.Conv2d(1, 1, 1)\n        self.conv2 = torch.nn.Conv2d(1, 1, 1)\n\n    def forward(self, x):\n        return self.conv2(F.relu(self.conv1(x)))\n\n\nclass TestQuadraticWarmupScheduler(TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.model = ToyModel()\n        self.optimizer = optim.SGD(\n            self.model.parameters(), lr=0.05, momentum=0.01, weight_decay=5e-4)\n\n    def _test_scheduler_value(self,\n                              schedulers,\n                              targets,\n                              epochs=10,\n                              param_name='lr'):\n        if isinstance(schedulers, _ParamScheduler):\n            schedulers = [schedulers]\n        for epoch in range(epochs):\n            for param_group, target in zip(self.optimizer.param_groups,\n                                           targets):\n                print(param_group[param_name])\n                assert_allclose(\n                    target[epoch],\n                    param_group[param_name],\n                    msg='{} is wrong in epoch {}: expected {}, got {}'.format(\n                        param_name, epoch, target[epoch],\n                        param_group[param_name]),\n                    atol=1e-5,\n                    rtol=0)\n            [scheduler.step() for scheduler in schedulers]\n\n    def test_quadratic_warmup_scheduler(self):\n        with self.assertRaises(ValueError):\n            QuadraticWarmupParamScheduler(self.optimizer, param_name='lr')\n        epochs = 10\n        iters = 5\n        warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]\n        single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (\n            epochs - iters)\n        targets = [single_targets, [x * epochs for x in single_targets]]\n        scheduler = QuadraticWarmupParamScheduler(\n            self.optimizer, param_name='lr', end=iters)\n        self._test_scheduler_value(scheduler, targets, epochs)\n\n    def test_quadratic_warmup_scheduler_convert_iterbased(self):\n        epochs = 10\n        end = 5\n        epoch_length = 11\n\n        iters = end * epoch_length\n        warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]\n        single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (\n            epochs * epoch_length - iters)\n        targets = [single_targets, [x * epochs for x in single_targets]]\n        scheduler = QuadraticWarmupParamScheduler.build_iter_from_epoch(\n            self.optimizer,\n            param_name='lr',\n            end=end,\n            epoch_length=epoch_length)\n        self._test_scheduler_value(scheduler, targets, epochs * epoch_length)\n\n    def test_quadratic_warmup_lr(self):\n        epochs = 10\n        iters = 5\n        warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]\n        single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (\n            epochs - iters)\n        targets = [single_targets, [x * epochs for x in single_targets]]\n        scheduler = QuadraticWarmupLR(self.optimizer, end=iters)\n        self._test_scheduler_value(scheduler, targets, epochs)\n\n    def test_quadratic_warmup_momentum(self):\n        epochs = 10\n        iters = 5\n        warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]\n        single_targets = [x * 0.01 for x in warmup_factor] + [0.01] * (\n            epochs - iters)\n        targets = [single_targets, [x * epochs for x in single_targets]]\n        scheduler = QuadraticWarmupMomentum(self.optimizer, end=iters)\n        self._test_scheduler_value(\n            scheduler, targets, epochs, param_name='momentum')\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_cityscapes_metric.py",
    "content": "import os\nimport os.path as osp\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nfrom mmdet.evaluation import CityScapesMetric\n\ntry:\n    import cityscapesscripts\nexcept ImportError:\n    cityscapesscripts = None\n\n\nclass TestCityScapesMetric(unittest.TestCase):\n\n    def setUp(self):\n        self.tmp_dir = tempfile.TemporaryDirectory()\n\n    def tearDown(self):\n        self.tmp_dir.cleanup()\n\n    @unittest.skipIf(cityscapesscripts is None,\n                     'cityscapesscripts is not installed.')\n    def test_init(self):\n        # test with outfile_prefix = None\n        with self.assertRaises(AssertionError):\n            CityScapesMetric(outfile_prefix=None)\n\n        # test with format_only=True, keep_results=False\n        with self.assertRaises(AssertionError):\n            CityScapesMetric(\n                outfile_prefix=self.tmp_dir.name + 'test',\n                format_only=True,\n                keep_results=False)\n\n    @unittest.skipIf(cityscapesscripts is None,\n                     'cityscapesscripts is not installed.')\n    def test_evaluate(self):\n        dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)\n        dummy_mask1[:, :10, :10] = 1\n        dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)\n        dummy_mask2[:, :10, :10] = 1\n\n        self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')\n        self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')\n        city = 'lindau'\n        sequenceNb = '000000'\n        frameNb = '000019'\n        img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'\n        img_path1 = osp.join(self.seg_prefix, city, img_name1)\n\n        frameNb = '000020'\n        img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'\n        img_path2 = osp.join(self.seg_prefix, city, img_name2)\n        os.makedirs(osp.join(self.seg_prefix, city))\n\n        masks1 = np.zeros((20, 20), dtype=np.int32)\n        masks1[:10, :10] = 24 * 1000\n        Image.fromarray(masks1).save(img_path1)\n\n        masks2 = np.zeros((20, 20), dtype=np.int32)\n        masks2[:10, :10] = 24 * 1000 + 1\n        Image.fromarray(masks2).save(img_path2)\n\n        data_samples = [{\n            'img_path': img_path1,\n            'pred_instances': {\n                'scores': torch.from_numpy(np.array([1.0])),\n                'labels': torch.from_numpy(np.array([0])),\n                'masks': torch.from_numpy(dummy_mask1)\n            }\n        }, {\n            'img_path': img_path2,\n            'pred_instances': {\n                'scores': torch.from_numpy(np.array([0.98])),\n                'labels': torch.from_numpy(np.array([1])),\n                'masks': torch.from_numpy(dummy_mask2)\n            }\n        }]\n\n        target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}\n        metric = CityScapesMetric(\n            seg_prefix=self.seg_prefix,\n            format_only=False,\n            keep_results=False,\n            outfile_prefix=self.outfile_prefix)\n        metric.dataset_meta = dict(\n            classes=('person', 'rider', 'car', 'truck', 'bus', 'train',\n                     'motorcycle', 'bicycle'))\n        metric.process({}, data_samples)\n        results = metric.evaluate(size=2)\n        self.assertDictEqual(results, target)\n        del metric\n        self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))\n\n        # test format_only\n        metric = CityScapesMetric(\n            seg_prefix=self.seg_prefix,\n            format_only=True,\n            keep_results=True,\n            outfile_prefix=self.outfile_prefix)\n        metric.dataset_meta = dict(\n            classes=('person', 'rider', 'car', 'truck', 'bus', 'train',\n                     'motorcycle', 'bicycle'))\n        metric.process({}, data_samples)\n        results = metric.evaluate(size=2)\n        self.assertDictEqual(results, dict())\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_coco_metric.py",
    "content": "import os.path as osp\nimport tempfile\nfrom unittest import TestCase\n\nimport numpy as np\nimport pycocotools.mask as mask_util\nimport torch\nfrom mmengine.fileio import dump\n\nfrom mmdet.evaluation import CocoMetric\n\n\nclass TestCocoMetric(TestCase):\n\n    def _create_dummy_coco_json(self, json_name):\n        dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)\n        dummy_mask[:5, :5] = 1\n        rle_mask = mask_util.encode(dummy_mask)\n        rle_mask['counts'] = rle_mask['counts'].decode('utf-8')\n        image = {\n            'id': 0,\n            'width': 640,\n            'height': 640,\n            'file_name': 'fake_name.jpg',\n        }\n\n        annotation_1 = {\n            'id': 1,\n            'image_id': 0,\n            'category_id': 0,\n            'area': 400,\n            'bbox': [50, 60, 20, 20],\n            'iscrowd': 0,\n            'segmentation': rle_mask,\n        }\n\n        annotation_2 = {\n            'id': 2,\n            'image_id': 0,\n            'category_id': 0,\n            'area': 900,\n            'bbox': [100, 120, 30, 30],\n            'iscrowd': 0,\n            'segmentation': rle_mask,\n        }\n\n        annotation_3 = {\n            'id': 3,\n            'image_id': 0,\n            'category_id': 1,\n            'area': 1600,\n            'bbox': [150, 160, 40, 40],\n            'iscrowd': 0,\n            'segmentation': rle_mask,\n        }\n\n        annotation_4 = {\n            'id': 4,\n            'image_id': 0,\n            'category_id': 0,\n            'area': 10000,\n            'bbox': [250, 260, 100, 100],\n            'iscrowd': 0,\n            'segmentation': rle_mask,\n        }\n\n        categories = [\n            {\n                'id': 0,\n                'name': 'car',\n                'supercategory': 'car',\n            },\n            {\n                'id': 1,\n                'name': 'bicycle',\n                'supercategory': 'bicycle',\n            },\n        ]\n\n        fake_json = {\n            'images': [image],\n            'annotations':\n            [annotation_1, annotation_2, annotation_3, annotation_4],\n            'categories': categories\n        }\n\n        dump(fake_json, json_name)\n\n    def _create_dummy_results(self):\n        bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150],\n                           [150, 160, 190, 200], [250, 260, 350, 360]])\n        scores = np.array([1.0, 0.98, 0.96, 0.95])\n        labels = np.array([0, 0, 1, 0])\n        dummy_mask = np.zeros((4, 10, 10), dtype=np.uint8)\n        dummy_mask[:, :5, :5] = 1\n        return dict(\n            bboxes=torch.from_numpy(bboxes),\n            scores=torch.from_numpy(scores),\n            labels=torch.from_numpy(labels),\n            masks=torch.from_numpy(dummy_mask))\n\n    def setUp(self):\n        self.tmp_dir = tempfile.TemporaryDirectory()\n\n    def tearDown(self):\n        self.tmp_dir.cleanup()\n\n    def test_init(self):\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        with self.assertRaisesRegex(KeyError, 'metric should be one of'):\n            CocoMetric(ann_file=fake_json_file, metric='unknown')\n\n    def test_evaluate(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        # test single coco dataset evaluation\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file,\n            classwise=False,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        target = {\n            'coco/bbox_mAP': 1.0,\n            'coco/bbox_mAP_50': 1.0,\n            'coco/bbox_mAP_75': 1.0,\n            'coco/bbox_mAP_s': 1.0,\n            'coco/bbox_mAP_m': 1.0,\n            'coco/bbox_mAP_l': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))\n\n        # test box and segm coco dataset evaluation\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file,\n            metric=['bbox', 'segm'],\n            classwise=False,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        target = {\n            'coco/bbox_mAP': 1.0,\n            'coco/bbox_mAP_50': 1.0,\n            'coco/bbox_mAP_75': 1.0,\n            'coco/bbox_mAP_s': 1.0,\n            'coco/bbox_mAP_m': 1.0,\n            'coco/bbox_mAP_l': 1.0,\n            'coco/segm_mAP': 1.0,\n            'coco/segm_mAP_50': 1.0,\n            'coco/segm_mAP_75': 1.0,\n            'coco/segm_mAP_s': 1.0,\n            'coco/segm_mAP_m': 1.0,\n            'coco/segm_mAP_l': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))\n\n        # test invalid custom metric_items\n        with self.assertRaisesRegex(KeyError,\n                                    'metric item \"invalid\" is not supported'):\n            coco_metric = CocoMetric(\n                ann_file=fake_json_file, metric_items=['invalid'])\n            coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n            coco_metric.process({}, [\n                dict(\n                    pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))\n            ])\n            coco_metric.evaluate(size=1)\n\n        # test custom metric_items\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file, metric_items=['mAP_m'])\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        target = {\n            'coco/bbox_mAP_m': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n\n    def test_classwise_evaluate(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        # test single coco dataset evaluation\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file, metric='bbox', classwise=True)\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        target = {\n            'coco/bbox_mAP': 1.0,\n            'coco/bbox_mAP_50': 1.0,\n            'coco/bbox_mAP_75': 1.0,\n            'coco/bbox_mAP_s': 1.0,\n            'coco/bbox_mAP_m': 1.0,\n            'coco/bbox_mAP_l': 1.0,\n            'coco/car_precision': 1.0,\n            'coco/bicycle_precision': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n\n    def test_manually_set_iou_thrs(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n\n        # test single coco dataset evaluation\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        self.assertEqual(coco_metric.iou_thrs, [0.3, 0.6])\n\n    def test_fast_eval_recall(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        # test default proposal nums\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file, metric='proposal_fast')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        target = {'coco/AR@100': 1.0, 'coco/AR@300': 1.0, 'coco/AR@1000': 1.0}\n        self.assertDictEqual(eval_results, target)\n\n        # test manually set proposal nums\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file,\n            metric='proposal_fast',\n            proposal_nums=(2, 4))\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        target = {'coco/AR@2': 0.5, 'coco/AR@4': 1.0}\n        self.assertDictEqual(eval_results, target)\n\n    def test_evaluate_proposal(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        coco_metric = CocoMetric(ann_file=fake_json_file, metric='proposal')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        print(eval_results)\n        target = {\n            'coco/AR@100': 1,\n            'coco/AR@300': 1.0,\n            'coco/AR@1000': 1.0,\n            'coco/AR_s@1000': 1.0,\n            'coco/AR_m@1000': 1.0,\n            'coco/AR_l@1000': 1.0\n        }\n        self.assertDictEqual(eval_results, target)\n\n    def test_empty_results(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        coco_metric = CocoMetric(ann_file=fake_json_file, metric='bbox')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        bboxes = np.zeros((0, 4))\n        labels = np.array([])\n        scores = np.array([])\n        dummy_mask = np.zeros((0, 10, 10), dtype=np.uint8)\n        empty_pred = dict(\n            bboxes=torch.from_numpy(bboxes),\n            scores=torch.from_numpy(scores),\n            labels=torch.from_numpy(labels),\n            masks=torch.from_numpy(dummy_mask))\n        coco_metric.process(\n            {},\n            [dict(pred_instances=empty_pred, img_id=0, ori_shape=(640, 640))])\n        # coco api Index error will be caught\n        coco_metric.evaluate(size=1)\n\n    def test_evaluate_without_json(self):\n        dummy_pred = self._create_dummy_results()\n\n        dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)\n        dummy_mask[:5, :5] = 1\n        rle_mask = mask_util.encode(dummy_mask)\n        rle_mask['counts'] = rle_mask['counts'].decode('utf-8')\n        instances = [{\n            'bbox_label': 0,\n            'bbox': [50, 60, 70, 80],\n            'ignore_flag': 0,\n            'mask': rle_mask,\n        }, {\n            'bbox_label': 0,\n            'bbox': [100, 120, 130, 150],\n            'ignore_flag': 0,\n            'mask': rle_mask,\n        }, {\n            'bbox_label': 1,\n            'bbox': [150, 160, 190, 200],\n            'ignore_flag': 0,\n            'mask': rle_mask,\n        }, {\n            'bbox_label': 0,\n            'bbox': [250, 260, 350, 360],\n            'ignore_flag': 0,\n            'mask': rle_mask,\n        }]\n        coco_metric = CocoMetric(\n            ann_file=None,\n            metric=['bbox', 'segm'],\n            classwise=False,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process({}, [\n            dict(\n                pred_instances=dummy_pred,\n                img_id=0,\n                ori_shape=(640, 640),\n                instances=instances)\n        ])\n        eval_results = coco_metric.evaluate(size=1)\n        print(eval_results)\n        target = {\n            'coco/bbox_mAP': 1.0,\n            'coco/bbox_mAP_50': 1.0,\n            'coco/bbox_mAP_75': 1.0,\n            'coco/bbox_mAP_s': 1.0,\n            'coco/bbox_mAP_m': 1.0,\n            'coco/bbox_mAP_l': 1.0,\n            'coco/segm_mAP': 1.0,\n            'coco/segm_mAP_50': 1.0,\n            'coco/segm_mAP_75': 1.0,\n            'coco/segm_mAP_s': 1.0,\n            'coco/segm_mAP_m': 1.0,\n            'coco/segm_mAP_l': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.gt.json')))\n\n    def test_format_only(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_coco_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        with self.assertRaises(AssertionError):\n            CocoMetric(\n                ann_file=fake_json_file,\n                classwise=False,\n                format_only=True,\n                outfile_prefix=None)\n\n        coco_metric = CocoMetric(\n            ann_file=fake_json_file,\n            metric='bbox',\n            classwise=False,\n            format_only=True,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])\n        coco_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = coco_metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, dict())\n        self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_coco_occluded_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom tempfile import TemporaryDirectory\n\nimport mmengine\nimport numpy as np\n\nfrom mmdet.datasets import CocoDataset\nfrom mmdet.evaluation import CocoOccludedSeparatedMetric\n\n\ndef test_coco_occluded_separated_metric():\n    ann = [[\n        'fake1.jpg', 'person', 8, [219.9, 176.12, 11.14, 34.23], {\n            'size': [480, 640],\n            'counts': b'nYW31n>2N2FNbA48Kf=?XBDe=m0OM3M4YOPB8_>L4JXao5'\n        }\n    ]] * 3\n    dummy_mask = np.zeros((10, 10), dtype=np.uint8)\n    dummy_mask[:5, :5] = 1\n    rle = {\n        'size': [480, 640],\n        'counts': b'nYW31n>2N2FNbA48Kf=?XBDe=m0OM3M4YOPB8_>L4JXao5'\n    }\n    res = [(None,\n            dict(\n                img_id=0,\n                bboxes=np.array([[50, 60, 70, 80]] * 2),\n                masks=[rle] * 2,\n                labels=np.array([0, 1], dtype=np.int64),\n                scores=np.array([0.77, 0.77])))] * 3\n\n    tempdir = TemporaryDirectory()\n    ann_path = osp.join(tempdir.name, 'coco_occluded.pkl')\n    mmengine.dump(ann, ann_path)\n\n    metric = CocoOccludedSeparatedMetric(\n        ann_file='tests/data/coco_sample.json',\n        occluded_ann=ann_path,\n        separated_ann=ann_path,\n        metric=[])\n    metric.dataset_meta = CocoDataset.METAINFO\n    eval_res = metric.compute_metrics(res)\n    assert isinstance(eval_res, dict)\n    assert eval_res['occluded_recall'] == 100\n    assert eval_res['separated_recall'] == 100\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_coco_panoptic_metric.py",
    "content": "import os\nimport os.path as osp\nimport tempfile\nimport unittest\nfrom copy import deepcopy\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmengine.fileio import dump\n\nfrom mmdet.evaluation import INSTANCE_OFFSET, CocoPanopticMetric\n\ntry:\n    import panopticapi\nexcept ImportError:\n    panopticapi = None\n\n\nclass TestCocoPanopticMetric(unittest.TestCase):\n\n    def _create_panoptic_gt_annotations(self, ann_file, seg_map_dir):\n        categories = [{\n            'id': 0,\n            'name': 'person',\n            'supercategory': 'person',\n            'isthing': 1\n        }, {\n            'id': 1,\n            'name': 'cat',\n            'supercategory': 'cat',\n            'isthing': 1\n        }, {\n            'id': 2,\n            'name': 'dog',\n            'supercategory': 'dog',\n            'isthing': 1\n        }, {\n            'id': 3,\n            'name': 'wall',\n            'supercategory': 'wall',\n            'isthing': 0\n        }]\n\n        images = [{\n            'id': 0,\n            'width': 80,\n            'height': 60,\n            'file_name': 'fake_name1.jpg',\n        }]\n\n        annotations = [{\n            'segments_info': [{\n                'id': 1,\n                'category_id': 0,\n                'area': 400,\n                'bbox': [10, 10, 10, 40],\n                'iscrowd': 0\n            }, {\n                'id': 2,\n                'category_id': 0,\n                'area': 400,\n                'bbox': [30, 10, 10, 40],\n                'iscrowd': 0\n            }, {\n                'id': 3,\n                'category_id': 2,\n                'iscrowd': 0,\n                'bbox': [50, 10, 10, 5],\n                'area': 50\n            }, {\n                'id': 4,\n                'category_id': 3,\n                'iscrowd': 0,\n                'bbox': [0, 0, 80, 60],\n                'area': 3950\n            }],\n            'file_name':\n            'fake_name1.png',\n            'image_id':\n            0\n        }]\n\n        gt_json = {\n            'images': images,\n            'annotations': annotations,\n            'categories': categories\n        }\n\n        # 4 is the id of the background class annotation.\n        gt = np.zeros((60, 80), dtype=np.int64) + 4\n        gt_bboxes = np.array(\n            [[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],\n            dtype=np.int64)\n        for i in range(3):\n            x, y, w, h = gt_bboxes[i]\n            gt[y:y + h, x:x + w] = i + 1  # id starts from 1\n\n        rgb_gt_seg_map = np.zeros(gt.shape + (3, ), dtype=np.uint8)\n        rgb_gt_seg_map[:, :, 2] = gt // (256 * 256)\n        rgb_gt_seg_map[:, :, 1] = gt % (256 * 256) // 256\n        rgb_gt_seg_map[:, :, 0] = gt % 256\n\n        img_path = osp.join(seg_map_dir, 'fake_name1.png')\n        mmcv.imwrite(rgb_gt_seg_map[:, :, ::-1], img_path)\n        dump(gt_json, ann_file)\n\n        return gt_json\n\n    def _create_panoptic_data_samples(self):\n        # predictions\n        # TP for background class, IoU=3576/4324=0.827\n        # 2 the category id of the background class\n        pred = np.zeros((60, 80), dtype=np.int64) + 2\n        pred_bboxes = np.array(\n            [\n                [11, 11, 10, 40],  # TP IoU=351/449=0.78\n                [38, 10, 10, 40],  # FP\n                [51, 10, 10, 5]  # TP IoU=45/55=0.818\n            ],\n            dtype=np.int64)\n        pred_labels = np.array([0, 0, 1], dtype=np.int64)\n        for i in range(3):\n            x, y, w, h = pred_bboxes[i]\n            pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i]\n\n        data_samples = [{\n            'img_id':\n            0,\n            'ori_shape': (60, 80),\n            'img_path':\n            'xxx/fake_name1.jpg',\n            'segments_info': [{\n                'id': 1,\n                'category': 0,\n                'is_thing': 1\n            }, {\n                'id': 2,\n                'category': 0,\n                'is_thing': 1\n            }, {\n                'id': 3,\n                'category': 1,\n                'is_thing': 1\n            }, {\n                'id': 4,\n                'category': 2,\n                'is_thing': 0\n            }],\n            'seg_map_path':\n            osp.join(self.gt_seg_dir, 'fake_name1.png'),\n            'pred_panoptic_seg': {\n                'sem_seg': torch.from_numpy(pred).unsqueeze(0)\n            },\n        }]\n\n        return data_samples\n\n    def setUp(self):\n        self.tmp_dir = tempfile.TemporaryDirectory()\n        self.gt_json_path = osp.join(self.tmp_dir.name, 'gt.json')\n        self.gt_seg_dir = osp.join(self.tmp_dir.name, 'gt_seg')\n        os.mkdir(self.gt_seg_dir)\n        self._create_panoptic_gt_annotations(self.gt_json_path,\n                                             self.gt_seg_dir)\n        self.dataset_meta = {\n            'classes': ('person', 'dog', 'wall'),\n            'thing_classes': ('person', 'dog'),\n            'stuff_classes': ('wall', )\n        }\n        self.target = {\n            'coco_panoptic/PQ': 67.86874803219071,\n            'coco_panoptic/SQ': 80.89770126158936,\n            'coco_panoptic/RQ': 83.33333333333334,\n            'coco_panoptic/PQ_th': 60.45252075318891,\n            'coco_panoptic/SQ_th': 79.9959505972869,\n            'coco_panoptic/RQ_th': 75.0,\n            'coco_panoptic/PQ_st': 82.70120259019427,\n            'coco_panoptic/SQ_st': 82.70120259019427,\n            'coco_panoptic/RQ_st': 100.0\n        }\n        self.data_samples = self._create_panoptic_data_samples()\n\n    def tearDown(self):\n        self.tmp_dir.cleanup()\n\n    @unittest.skipIf(panopticapi is not None, 'panopticapi is installed')\n    def test_init(self):\n        with self.assertRaises(RuntimeError):\n            CocoPanopticMetric()\n\n    @unittest.skipIf(panopticapi is None, 'panopticapi is not installed')\n    def test_evaluate_without_json(self):\n        # with tmpfile, without json\n        metric = CocoPanopticMetric(\n            ann_file=None,\n            seg_prefix=self.gt_seg_dir,\n            classwise=False,\n            nproc=1,\n            outfile_prefix=None)\n\n        metric.dataset_meta = self.dataset_meta\n        metric.process({}, deepcopy(self.data_samples))\n        eval_results = metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, self.target)\n\n        # without tmpfile and json\n        outfile_prefix = f'{self.tmp_dir.name}/test'\n        metric = CocoPanopticMetric(\n            ann_file=None,\n            seg_prefix=self.gt_seg_dir,\n            classwise=False,\n            nproc=1,\n            outfile_prefix=outfile_prefix)\n\n        metric.dataset_meta = self.dataset_meta\n        metric.process({}, deepcopy(self.data_samples))\n        eval_results = metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, self.target)\n\n    @unittest.skipIf(panopticapi is None, 'panopticapi is not installed')\n    def test_evaluate_with_json(self):\n        # with tmpfile and json\n        metric = CocoPanopticMetric(\n            ann_file=self.gt_json_path,\n            seg_prefix=self.gt_seg_dir,\n            classwise=False,\n            nproc=1,\n            outfile_prefix=None)\n\n        metric.dataset_meta = self.dataset_meta\n        metric.process({}, deepcopy(self.data_samples))\n        eval_results = metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, self.target)\n\n        # classwise\n        metric = CocoPanopticMetric(\n            ann_file=self.gt_json_path,\n            seg_prefix=self.gt_seg_dir,\n            classwise=True,\n            nproc=1,\n            outfile_prefix=None)\n        metric.dataset_meta = self.dataset_meta\n        metric.process({}, deepcopy(self.data_samples))\n        eval_results = metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, self.target)\n\n        # without tmpfile, with json\n        outfile_prefix = f'{self.tmp_dir.name}/test1'\n        metric = CocoPanopticMetric(\n            ann_file=self.gt_json_path,\n            seg_prefix=self.gt_seg_dir,\n            classwise=False,\n            nproc=1,\n            outfile_prefix=outfile_prefix)\n        metric.dataset_meta = self.dataset_meta\n        metric.process({}, deepcopy(self.data_samples))\n        eval_results = metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, self.target)\n\n    @unittest.skipIf(panopticapi is None, 'panopticapi is not installed')\n    def test_format_only(self):\n        with self.assertRaises(AssertionError):\n            metric = CocoPanopticMetric(\n                ann_file=self.gt_json_path,\n                seg_prefix=self.gt_seg_dir,\n                classwise=False,\n                nproc=1,\n                format_only=True,\n                outfile_prefix=None)\n\n        outfile_prefix = f'{self.tmp_dir.name}/test'\n        metric = CocoPanopticMetric(\n            ann_file=self.gt_json_path,\n            seg_prefix=self.gt_seg_dir,\n            classwise=False,\n            nproc=1,\n            format_only=True,\n            outfile_prefix=outfile_prefix)\n        metric.dataset_meta = self.dataset_meta\n        metric.process({}, deepcopy(self.data_samples))\n        eval_results = metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, dict())\n        self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.panoptic'))\n        self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.panoptic.json'))\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_crowdhuman_metric.py",
    "content": "import os.path as osp\nimport tempfile\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\n\nfrom mmdet.evaluation import CrowdHumanMetric\n\n\nclass TestCrowdHumanMetric(TestCase):\n\n    def _create_dummy_results(self):\n        bboxes = np.array([[1330, 317, 418, 1338], [792, 24, 723, 2017],\n                           [693, 291, 307, 894], [522, 290, 285, 826],\n                           [728, 336, 175, 602], [92, 337, 267, 681]])\n        bboxes[:, 2:4] += bboxes[:, 0:2]\n        scores = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n        return dict(\n            bboxes=torch.from_numpy(bboxes), scores=torch.from_numpy(scores))\n\n    def setUp(self):\n        self.tmp_dir = tempfile.TemporaryDirectory()\n        self.ann_file_path = \\\n            'tests/data/crowdhuman_dataset/test_annotation_train.odgt',\n\n    def tearDown(self):\n        self.tmp_dir.cleanup()\n\n    def test_init(self):\n        with self.assertRaisesRegex(KeyError, 'metric should be one of'):\n            CrowdHumanMetric(ann_file=self.ann_file_path[0], metric='unknown')\n\n    def test_evaluate(self):\n        # create dummy data\n        dummy_pred = self._create_dummy_results()\n\n        crowdhuman_metric = CrowdHumanMetric(\n            ann_file=self.ann_file_path[0],\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        crowdhuman_metric.process({}, [\n            dict(\n                pred_instances=dummy_pred,\n                img_id='283554,35288000868e92d4',\n                ori_shape=(1640, 1640))\n        ])\n        eval_results = crowdhuman_metric.evaluate(size=1)\n        target = {\n            'crowd_human/mAP': 0.8333,\n            'crowd_human/mMR': 0.0,\n            'crowd_human/JI': 1.0\n        }\n        self.assertDictEqual(eval_results, target)\n        self.assertTrue(osp.isfile(osp.join(self.tmp_dir.name, 'test.json')))\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_dump_det_results.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport tempfile\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.fileio import load\nfrom torch import Tensor\n\nfrom mmdet.evaluation import DumpDetResults\nfrom mmdet.structures.mask import encode_mask_results\n\n\nclass TestDumpResults(TestCase):\n\n    def test_init(self):\n        with self.assertRaisesRegex(ValueError,\n                                    'The output file must be a pkl file.'):\n            DumpDetResults(out_file_path='./results.json')\n\n    def test_process(self):\n        metric = DumpDetResults(out_file_path='./results.pkl')\n        data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]\n        metric.process(None, data_samples)\n        self.assertEqual(len(metric.results), 1)\n        self.assertEqual(metric.results[0]['data'][0].device,\n                         torch.device('cpu'))\n\n        metric = DumpDetResults(out_file_path='./results.pkl')\n        masks = torch.zeros(10, 10, 4)\n        data_samples = [\n            dict(pred_instances=dict(masks=masks), gt_instances=[])\n        ]\n        metric.process(None, data_samples)\n        self.assertEqual(len(metric.results), 1)\n        self.assertEqual(metric.results[0]['pred_instances']['masks'],\n                         encode_mask_results(masks.numpy()))\n        self.assertNotIn('gt_instances', metric.results[0])\n\n    def test_compute_metrics(self):\n        temp_dir = tempfile.TemporaryDirectory()\n        path = osp.join(temp_dir.name, 'results.pkl')\n        metric = DumpDetResults(out_file_path=path)\n        data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]\n        metric.process(None, data_samples)\n        metric.compute_metrics(metric.results)\n        self.assertTrue(osp.isfile(path))\n\n        results = load(path)\n        self.assertEqual(len(results), 1)\n        self.assertEqual(results[0]['data'][0].device, torch.device('cpu'))\n\n        temp_dir.cleanup()\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_lvis_metric.py",
    "content": "import os.path as osp\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport pycocotools.mask as mask_util\nimport torch\n\nfrom mmdet.evaluation.metrics import LVISMetric\n\ntry:\n    import lvis\nexcept ImportError:\n    lvis = None\n\nfrom mmengine.fileio import dump\n\n\nclass TestLVISMetric(unittest.TestCase):\n\n    def _create_dummy_lvis_json(self, json_name):\n        dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)\n        dummy_mask[:5, :5] = 1\n        rle_mask = mask_util.encode(dummy_mask)\n        rle_mask['counts'] = rle_mask['counts'].decode('utf-8')\n        image = {\n            'id': 0,\n            'width': 640,\n            'height': 640,\n            'neg_category_ids': [],\n            'not_exhaustive_category_ids': [],\n            'coco_url': 'http://images.cocodataset.org/val2017/0.jpg',\n        }\n\n        annotation_1 = {\n            'id': 1,\n            'image_id': 0,\n            'category_id': 1,\n            'area': 400,\n            'bbox': [50, 60, 20, 20],\n            'segmentation': rle_mask,\n        }\n\n        annotation_2 = {\n            'id': 2,\n            'image_id': 0,\n            'category_id': 1,\n            'area': 900,\n            'bbox': [100, 120, 30, 30],\n            'segmentation': rle_mask,\n        }\n\n        annotation_3 = {\n            'id': 3,\n            'image_id': 0,\n            'category_id': 2,\n            'area': 1600,\n            'bbox': [150, 160, 40, 40],\n            'segmentation': rle_mask,\n        }\n\n        annotation_4 = {\n            'id': 4,\n            'image_id': 0,\n            'category_id': 1,\n            'area': 10000,\n            'bbox': [250, 260, 100, 100],\n            'segmentation': rle_mask,\n        }\n\n        categories = [\n            {\n                'id': 1,\n                'name': 'aerosol_can',\n                'frequency': 'c',\n                'image_count': 64\n            },\n            {\n                'id': 2,\n                'name': 'air_conditioner',\n                'frequency': 'f',\n                'image_count': 364\n            },\n        ]\n\n        fake_json = {\n            'images': [image],\n            'annotations':\n            [annotation_1, annotation_2, annotation_3, annotation_4],\n            'categories': categories\n        }\n\n        dump(fake_json, json_name)\n\n    def _create_dummy_results(self):\n        bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150],\n                           [150, 160, 190, 200], [250, 260, 350, 360]])\n        scores = np.array([1.0, 0.98, 0.96, 0.95])\n        labels = np.array([0, 0, 1, 0])\n        dummy_mask = np.zeros((4, 10, 10), dtype=np.uint8)\n        dummy_mask[:, :5, :5] = 1\n        return dict(\n            bboxes=torch.from_numpy(bboxes),\n            scores=torch.from_numpy(scores),\n            labels=torch.from_numpy(labels),\n            masks=torch.from_numpy(dummy_mask))\n\n    def setUp(self):\n        self.tmp_dir = tempfile.TemporaryDirectory()\n\n    def tearDown(self):\n        self.tmp_dir.cleanup()\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_init(self):\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        with self.assertRaisesRegex(KeyError, 'metric should be one of'):\n            LVISMetric(ann_file=fake_json_file, metric='unknown')\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_evaluate(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        # test single lvis dataset evaluation\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file,\n            classwise=False,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {\n            'lvis/bbox_AP': 1.0,\n            'lvis/bbox_AP50': 1.0,\n            'lvis/bbox_AP75': 1.0,\n            'lvis/bbox_APs': 1.0,\n            'lvis/bbox_APm': 1.0,\n            'lvis/bbox_APl': 1.0,\n            'lvis/bbox_APr': -1.0,\n            'lvis/bbox_APc': 1.0,\n            'lvis/bbox_APf': 1.0\n        }\n        self.assertDictEqual(eval_results, target)\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))\n\n        # test box and segm lvis dataset evaluation\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file,\n            metric=['bbox', 'segm'],\n            classwise=False,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {\n            'lvis/bbox_AP': 1.0,\n            'lvis/bbox_AP50': 1.0,\n            'lvis/bbox_AP75': 1.0,\n            'lvis/bbox_APs': 1.0,\n            'lvis/bbox_APm': 1.0,\n            'lvis/bbox_APl': 1.0,\n            'lvis/bbox_APr': -1.0,\n            'lvis/bbox_APc': 1.0,\n            'lvis/bbox_APf': 1.0,\n            'lvis/segm_AP': 1.0,\n            'lvis/segm_AP50': 1.0,\n            'lvis/segm_AP75': 1.0,\n            'lvis/segm_APs': 1.0,\n            'lvis/segm_APm': 1.0,\n            'lvis/segm_APl': 1.0,\n            'lvis/segm_APr': -1.0,\n            'lvis/segm_APc': 1.0,\n            'lvis/segm_APf': 1.0\n        }\n        self.assertDictEqual(eval_results, target)\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))\n        self.assertTrue(\n            osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))\n\n        # test invalid custom metric_items\n        with self.assertRaisesRegex(\n                KeyError,\n                \"metric should be one of 'bbox', 'segm', 'proposal', \"\n                \"'proposal_fast', but got invalid.\"):\n            lvis_metric = LVISMetric(\n                ann_file=fake_json_file, metric=['invalid'])\n            lvis_metric.evaluate(size=1)\n\n        # test custom metric_items\n        lvis_metric = LVISMetric(ann_file=fake_json_file, metric_items=['APm'])\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {\n            'lvis/bbox_APm': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_classwise_evaluate(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        # test single lvis dataset evaluation\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file, metric='bbox', classwise=True)\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {\n            'lvis/bbox_AP': 1.0,\n            'lvis/bbox_AP50': 1.0,\n            'lvis/bbox_AP75': 1.0,\n            'lvis/bbox_APs': 1.0,\n            'lvis/bbox_APm': 1.0,\n            'lvis/bbox_APl': 1.0,\n            'lvis/bbox_APr': -1.0,\n            'lvis/bbox_APc': 1.0,\n            'lvis/bbox_APf': 1.0,\n            'lvis/aerosol_can_precision': 1.0,\n            'lvis/air_conditioner_precision': 1.0,\n        }\n        self.assertDictEqual(eval_results, target)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_manually_set_iou_thrs(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n\n        # test single lvis dataset evaluation\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        self.assertEqual(lvis_metric.iou_thrs, [0.3, 0.6])\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_fast_eval_recall(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        # test default proposal nums\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file, metric='proposal_fast')\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {'lvis/AR@100': 1.0, 'lvis/AR@300': 1.0, 'lvis/AR@1000': 1.0}\n        self.assertDictEqual(eval_results, target)\n\n        # test manually set proposal nums\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file,\n            metric='proposal_fast',\n            proposal_nums=(2, 4))\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {'lvis/AR@2': 0.5, 'lvis/AR@4': 1.0}\n        self.assertDictEqual(eval_results, target)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_evaluate_proposal(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        lvis_metric = LVISMetric(ann_file=fake_json_file, metric='proposal')\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        target = {\n            'lvis/AR@300': 1.0,\n            'lvis/ARs@300': 1.0,\n            'lvis/ARm@300': 1.0,\n            'lvis/ARl@300': 1.0\n        }\n        self.assertDictEqual(eval_results, target)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_empty_results(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        lvis_metric = LVISMetric(ann_file=fake_json_file, metric='bbox')\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        bboxes = np.zeros((0, 4))\n        labels = np.array([])\n        scores = np.array([])\n        dummy_mask = np.zeros((0, 10, 10), dtype=np.uint8)\n        empty_pred = dict(\n            bboxes=torch.from_numpy(bboxes),\n            scores=torch.from_numpy(scores),\n            labels=torch.from_numpy(labels),\n            masks=torch.from_numpy(dummy_mask))\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=empty_pred, img_id=0, ori_shape=(640, 640))])\n        # lvis api Index error will be caught\n        lvis_metric.evaluate(size=1)\n\n    @unittest.skipIf(lvis is None, 'lvis is not installed.')\n    def test_format_only(self):\n        # create dummy data\n        fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n        self._create_dummy_lvis_json(fake_json_file)\n        dummy_pred = self._create_dummy_results()\n\n        with self.assertRaises(AssertionError):\n            LVISMetric(\n                ann_file=fake_json_file,\n                classwise=False,\n                format_only=True,\n                outfile_prefix=None)\n\n        lvis_metric = LVISMetric(\n            ann_file=fake_json_file,\n            metric='bbox',\n            classwise=False,\n            format_only=True,\n            outfile_prefix=f'{self.tmp_dir.name}/test')\n        lvis_metric.dataset_meta = dict(\n            classes=['aerosol_can', 'air_conditioner'])\n        lvis_metric.process(\n            {},\n            [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n        eval_results = lvis_metric.evaluate(size=1)\n        self.assertDictEqual(eval_results, dict())\n        self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))\n"
  },
  {
    "path": "tests/test_evaluation/test_metrics/test_openimages_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport numpy as np\nimport torch\n\nfrom mmdet.datasets import OpenImagesDataset\nfrom mmdet.evaluation import OpenImagesMetric\nfrom mmdet.utils import register_all_modules\n\n\nclass TestOpenImagesMetric(unittest.TestCase):\n\n    def _create_dummy_results(self):\n        bboxes = np.array([[23.2172, 31.7541, 987.3413, 357.8443],\n                           [100, 120, 130, 150], [150, 160, 190, 200],\n                           [250, 260, 350, 360]])\n        scores = np.array([1.0, 0.98, 0.96, 0.95])\n        labels = np.array([0, 0, 0, 0])\n        return dict(\n            bboxes=torch.from_numpy(bboxes),\n            scores=torch.from_numpy(scores),\n            labels=torch.from_numpy(labels))\n\n    def test_init(self):\n        # test invalid iou_thrs\n        with self.assertRaises(AssertionError):\n            OpenImagesMetric(iou_thrs={'a', 0.5}, ioa_thrs={'b', 0.5})\n        # test ioa and iou_thrs length not equal\n        with self.assertRaises(AssertionError):\n            OpenImagesMetric(iou_thrs=[0.5, 0.75], ioa_thrs=[0.5])\n\n        metric = OpenImagesMetric(iou_thrs=0.6)\n        self.assertEqual(metric.iou_thrs, [0.6])\n\n    def test_eval(self):\n        register_all_modules()\n        dataset = OpenImagesDataset(\n            data_root='tests/data/OpenImages/',\n            ann_file='annotations/oidv6-train-annotations-bbox.csv',\n            data_prefix=dict(img='OpenImages/train/'),\n            label_file='annotations/class-descriptions-boxable.csv',\n            hierarchy_file='annotations/bbox_labels_600_hierarchy.json',\n            meta_file='annotations/image-metas.pkl',\n            pipeline=[\n                dict(type='LoadAnnotations', with_bbox=True),\n                dict(\n                    type='PackDetInputs',\n                    meta_keys=('img_id', 'img_path', 'instances'))\n            ])\n        dataset.full_init()\n        data_sample = dataset[0]['data_samples'].to_dict()\n        data_sample['pred_instances'] = self._create_dummy_results()\n\n        metric = OpenImagesMetric()\n        metric.dataset_meta = dataset.metainfo\n        metric.process({}, [data_sample])\n        results = metric.evaluate(size=len(dataset))\n        targets = {'openimages/AP50': 1.0, 'openimages/mAP': 1.0}\n        self.assertDictEqual(results, targets)\n\n        # test multi-threshold\n        metric = OpenImagesMetric(iou_thrs=[0.1, 0.5], ioa_thrs=[0.1, 0.5])\n        metric.dataset_meta = dataset.metainfo\n        metric.process({}, [data_sample])\n        results = metric.evaluate(size=len(dataset))\n        targets = {\n            'openimages/AP10': 1.0,\n            'openimages/AP50': 1.0,\n            'openimages/mAP': 1.0\n        }\n        self.assertDictEqual(results, targets)\n"
  },
  {
    "path": "tests/test_models/test_backbones/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .utils import check_norm_state, is_block, is_norm\n\n__all__ = ['is_block', 'is_norm', 'check_norm_state']\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_csp_darknet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.models.backbones.csp_darknet import CSPDarknet\nfrom .utils import check_norm_state, is_norm\n\n\ndef test_csp_darknet_backbone():\n    with pytest.raises(ValueError):\n        # frozen_stages must in range(-1, len(arch_setting) + 1)\n        CSPDarknet(frozen_stages=6)\n\n    with pytest.raises(AssertionError):\n        # out_indices in range(len(arch_setting) + 1)\n        CSPDarknet(out_indices=[6])\n\n    # Test CSPDarknet with first stage frozen\n    frozen_stages = 1\n    model = CSPDarknet(frozen_stages=frozen_stages)\n    model.train()\n\n    for mod in model.stem.modules():\n        for param in mod.parameters():\n            assert param.requires_grad is False\n    for i in range(1, frozen_stages + 1):\n        layer = getattr(model, f'stage{i}')\n        for mod in layer.modules():\n            if isinstance(mod, _BatchNorm):\n                assert mod.training is False\n        for param in layer.parameters():\n            assert param.requires_grad is False\n\n    # Test CSPDarknet with norm_eval=True\n    model = CSPDarknet(norm_eval=True)\n    model.train()\n\n    assert check_norm_state(model.modules(), False)\n\n    # Test CSPDarknet-P5 forward with widen_factor=0.5\n    model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))\n    model.train()\n\n    imgs = torch.randn(1, 3, 64, 64)\n    feat = model(imgs)\n    assert len(feat) == 5\n    assert feat[0].shape == torch.Size((1, 16, 32, 32))\n    assert feat[1].shape == torch.Size((1, 32, 16, 16))\n    assert feat[2].shape == torch.Size((1, 64, 8, 8))\n    assert feat[3].shape == torch.Size((1, 128, 4, 4))\n    assert feat[4].shape == torch.Size((1, 256, 2, 2))\n\n    # Test CSPDarknet-P6 forward with widen_factor=0.5\n    model = CSPDarknet(\n        arch='P6',\n        widen_factor=0.25,\n        out_indices=range(0, 6),\n        spp_kernal_sizes=(3, 5, 7))\n    model.train()\n\n    imgs = torch.randn(1, 3, 128, 128)\n    feat = model(imgs)\n    assert feat[0].shape == torch.Size((1, 16, 64, 64))\n    assert feat[1].shape == torch.Size((1, 32, 32, 32))\n    assert feat[2].shape == torch.Size((1, 64, 16, 16))\n    assert feat[3].shape == torch.Size((1, 128, 8, 8))\n    assert feat[4].shape == torch.Size((1, 192, 4, 4))\n    assert feat[5].shape == torch.Size((1, 256, 2, 2))\n\n    # Test CSPDarknet forward with dict(type='ReLU')\n    model = CSPDarknet(\n        widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))\n    model.train()\n\n    imgs = torch.randn(1, 3, 64, 64)\n    feat = model(imgs)\n    assert len(feat) == 5\n    assert feat[0].shape == torch.Size((1, 8, 32, 32))\n    assert feat[1].shape == torch.Size((1, 16, 16, 16))\n    assert feat[2].shape == torch.Size((1, 32, 8, 8))\n    assert feat[3].shape == torch.Size((1, 64, 4, 4))\n    assert feat[4].shape == torch.Size((1, 128, 2, 2))\n\n    # Test CSPDarknet with BatchNorm forward\n    model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))\n    for m in model.modules():\n        if is_norm(m):\n            assert isinstance(m, _BatchNorm)\n    model.train()\n\n    imgs = torch.randn(1, 3, 64, 64)\n    feat = model(imgs)\n    assert len(feat) == 5\n    assert feat[0].shape == torch.Size((1, 8, 32, 32))\n    assert feat[1].shape == torch.Size((1, 16, 16, 16))\n    assert feat[2].shape == torch.Size((1, 32, 8, 8))\n    assert feat[3].shape == torch.Size((1, 64, 4, 4))\n    assert feat[4].shape == torch.Size((1, 128, 2, 2))\n\n    # Test CSPDarknet with custom arch forward\n    arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],\n                     [224, 512, 1, True, False]]\n    model = CSPDarknet(\n        arch_ovewrite=arch_ovewrite,\n        widen_factor=0.25,\n        out_indices=(0, 1, 2, 3))\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size((1, 8, 16, 16))\n    assert feat[1].shape == torch.Size((1, 14, 8, 8))\n    assert feat[2].shape == torch.Size((1, 56, 4, 4))\n    assert feat[3].shape == torch.Size((1, 128, 2, 2))\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_detectors_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\n\nfrom mmdet.models.backbones import DetectoRS_ResNet\n\n\ndef test_detectorrs_resnet_backbone():\n    detectorrs_cfg = dict(\n        depth=50,\n        num_stages=4,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=1,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        norm_eval=True,\n        style='pytorch',\n        conv_cfg=dict(type='ConvAWS'),\n        sac=dict(type='SAC', use_deform=True),\n        stage_with_sac=(False, True, True, True),\n        output_img=True)\n    \"\"\"Test init_weights config\"\"\"\n    with pytest.raises(AssertionError):\n        # pretrained and init_cfg cannot be specified at the same time\n        DetectoRS_ResNet(\n            **detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')\n\n    with pytest.raises(AssertionError):\n        # init_cfg must be a dict\n        DetectoRS_ResNet(\n            **detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])\n\n    with pytest.raises(KeyError):\n        # init_cfg must contain the key `type`\n        DetectoRS_ResNet(\n            **detectorrs_cfg,\n            pretrained=None,\n            init_cfg=dict(checkpoint='Pretrained'))\n\n    with pytest.raises(AssertionError):\n        # init_cfg only support initialize pretrained model way\n        DetectoRS_ResNet(\n            **detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))\n\n    with pytest.raises(TypeError):\n        # pretrained mast be a str or None\n        model = DetectoRS_ResNet(\n            **detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)\n        model.init_weights()\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_efficientnet.py",
    "content": "import pytest\nimport torch\n\nfrom mmdet.models.backbones import EfficientNet\n\n\ndef test_efficientnet_backbone():\n    \"\"\"Test EfficientNet backbone.\"\"\"\n    with pytest.raises(AssertionError):\n        # EfficientNet arch should be a key in EfficientNet.arch_settings\n        EfficientNet(arch='c3')\n\n    model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6))\n    model.train()\n\n    imgs = torch.randn(2, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 7\n    assert feat[0].shape == torch.Size([2, 32, 16, 16])\n    assert feat[1].shape == torch.Size([2, 16, 16, 16])\n    assert feat[2].shape == torch.Size([2, 24, 8, 8])\n    assert feat[3].shape == torch.Size([2, 40, 4, 4])\n    assert feat[4].shape == torch.Size([2, 112, 2, 2])\n    assert feat[5].shape == torch.Size([2, 320, 1, 1])\n    assert feat[6].shape == torch.Size([2, 1280, 1, 1])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_hourglass.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones.hourglass import HourglassNet\n\n\ndef test_hourglass_backbone():\n    with pytest.raises(AssertionError):\n        # HourglassNet's num_stacks should larger than 0\n        HourglassNet(num_stacks=0)\n\n    with pytest.raises(AssertionError):\n        # len(stage_channels) should equal len(stage_blocks)\n        HourglassNet(\n            stage_channels=[256, 256, 384, 384, 384],\n            stage_blocks=[2, 2, 2, 2, 2, 4])\n\n    with pytest.raises(AssertionError):\n        # len(stage_channels) should lagrer than downsample_times\n        HourglassNet(\n            downsample_times=5,\n            stage_channels=[256, 256, 384, 384, 384],\n            stage_blocks=[2, 2, 2, 2, 2])\n\n    # Test HourglassNet-52\n    model = HourglassNet(\n        num_stacks=1,\n        stage_channels=(64, 64, 96, 96, 96, 128),\n        feat_channel=64)\n    model.train()\n\n    imgs = torch.randn(1, 3, 256, 256)\n    feat = model(imgs)\n    assert len(feat) == 1\n    assert feat[0].shape == torch.Size([1, 64, 64, 64])\n\n    # Test HourglassNet-104\n    model = HourglassNet(\n        num_stacks=2,\n        stage_channels=(64, 64, 96, 96, 96, 128),\n        feat_channel=64)\n    model.train()\n\n    imgs = torch.randn(1, 3, 256, 256)\n    feat = model(imgs)\n    assert len(feat) == 2\n    assert feat[0].shape == torch.Size([1, 64, 64, 64])\n    assert feat[1].shape == torch.Size([1, 64, 64, 64])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_hrnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones.hrnet import HRModule, HRNet\nfrom mmdet.models.backbones.resnet import BasicBlock, Bottleneck\n\n\n@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])\ndef test_hrmodule(block):\n    # Test multiscale forward\n    num_channles = (32, 64)\n    in_channels = [c * block.expansion for c in num_channles]\n    hrmodule = HRModule(\n        num_branches=2,\n        blocks=block,\n        in_channels=in_channels,\n        num_blocks=(4, 4),\n        num_channels=num_channles,\n    )\n\n    feats = [\n        torch.randn(1, in_channels[0], 64, 64),\n        torch.randn(1, in_channels[1], 32, 32)\n    ]\n    feats = hrmodule(feats)\n\n    assert len(feats) == 2\n    assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])\n    assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])\n\n    # Test single scale forward\n    num_channles = (32, 64)\n    in_channels = [c * block.expansion for c in num_channles]\n    hrmodule = HRModule(\n        num_branches=2,\n        blocks=block,\n        in_channels=in_channels,\n        num_blocks=(4, 4),\n        num_channels=num_channles,\n        multiscale_output=False,\n    )\n\n    feats = [\n        torch.randn(1, in_channels[0], 64, 64),\n        torch.randn(1, in_channels[1], 32, 32)\n    ]\n    feats = hrmodule(feats)\n\n    assert len(feats) == 1\n    assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])\n\n\ndef test_hrnet_backbone():\n    # only have 3 stages\n    extra = dict(\n        stage1=dict(\n            num_modules=1,\n            num_branches=1,\n            block='BOTTLENECK',\n            num_blocks=(4, ),\n            num_channels=(64, )),\n        stage2=dict(\n            num_modules=1,\n            num_branches=2,\n            block='BASIC',\n            num_blocks=(4, 4),\n            num_channels=(32, 64)),\n        stage3=dict(\n            num_modules=4,\n            num_branches=3,\n            block='BASIC',\n            num_blocks=(4, 4, 4),\n            num_channels=(32, 64, 128)))\n\n    with pytest.raises(AssertionError):\n        # HRNet now only support 4 stages\n        HRNet(extra=extra)\n    extra['stage4'] = dict(\n        num_modules=3,\n        num_branches=3,  # should be 4\n        block='BASIC',\n        num_blocks=(4, 4, 4, 4),\n        num_channels=(32, 64, 128, 256))\n\n    with pytest.raises(AssertionError):\n        # len(num_blocks) should equal num_branches\n        HRNet(extra=extra)\n\n    extra['stage4']['num_branches'] = 4\n\n    # Test hrnetv2p_w32\n    model = HRNet(extra=extra)\n    model.init_weights()\n    model.train()\n\n    imgs = torch.randn(1, 3, 256, 256)\n    feats = model(imgs)\n    assert len(feats) == 4\n    assert feats[0].shape == torch.Size([1, 32, 64, 64])\n    assert feats[3].shape == torch.Size([1, 256, 8, 8])\n\n    # Test single scale output\n    model = HRNet(extra=extra, multiscale_output=False)\n    model.init_weights()\n    model.train()\n\n    imgs = torch.randn(1, 3, 256, 256)\n    feats = model(imgs)\n    assert len(feats) == 1\n    assert feats[0].shape == torch.Size([1, 32, 64, 64])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_mobilenet_v2.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom torch.nn.modules import GroupNorm\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.models.backbones.mobilenet_v2 import MobileNetV2\nfrom .utils import check_norm_state, is_block, is_norm\n\n\ndef test_mobilenetv2_backbone():\n    with pytest.raises(ValueError):\n        # frozen_stages must in range(-1, 8)\n        MobileNetV2(frozen_stages=8)\n\n    with pytest.raises(ValueError):\n        # out_indices in range(-1, 8)\n        MobileNetV2(out_indices=[8])\n\n    # Test MobileNetV2 with first stage frozen\n    frozen_stages = 1\n    model = MobileNetV2(frozen_stages=frozen_stages)\n    model.train()\n\n    for mod in model.conv1.modules():\n        for param in mod.parameters():\n            assert param.requires_grad is False\n    for i in range(1, frozen_stages + 1):\n        layer = getattr(model, f'layer{i}')\n        for mod in layer.modules():\n            if isinstance(mod, _BatchNorm):\n                assert mod.training is False\n        for param in layer.parameters():\n            assert param.requires_grad is False\n\n    # Test MobileNetV2 with norm_eval=True\n    model = MobileNetV2(norm_eval=True)\n    model.train()\n\n    assert check_norm_state(model.modules(), False)\n\n    # Test MobileNetV2 forward with widen_factor=1.0\n    model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))\n    model.train()\n\n    assert check_norm_state(model.modules(), True)\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 8\n    assert feat[0].shape == torch.Size((1, 16, 112, 112))\n    assert feat[1].shape == torch.Size((1, 24, 56, 56))\n    assert feat[2].shape == torch.Size((1, 32, 28, 28))\n    assert feat[3].shape == torch.Size((1, 64, 14, 14))\n    assert feat[4].shape == torch.Size((1, 96, 14, 14))\n    assert feat[5].shape == torch.Size((1, 160, 7, 7))\n    assert feat[6].shape == torch.Size((1, 320, 7, 7))\n    assert feat[7].shape == torch.Size((1, 1280, 7, 7))\n\n    # Test MobileNetV2 forward with widen_factor=0.5\n    model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 7\n    assert feat[0].shape == torch.Size((1, 8, 112, 112))\n    assert feat[1].shape == torch.Size((1, 16, 56, 56))\n    assert feat[2].shape == torch.Size((1, 16, 28, 28))\n    assert feat[3].shape == torch.Size((1, 32, 14, 14))\n    assert feat[4].shape == torch.Size((1, 48, 14, 14))\n    assert feat[5].shape == torch.Size((1, 80, 7, 7))\n    assert feat[6].shape == torch.Size((1, 160, 7, 7))\n\n    # Test MobileNetV2 forward with widen_factor=2.0\n    model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert feat[0].shape == torch.Size((1, 32, 112, 112))\n    assert feat[1].shape == torch.Size((1, 48, 56, 56))\n    assert feat[2].shape == torch.Size((1, 64, 28, 28))\n    assert feat[3].shape == torch.Size((1, 128, 14, 14))\n    assert feat[4].shape == torch.Size((1, 192, 14, 14))\n    assert feat[5].shape == torch.Size((1, 320, 7, 7))\n    assert feat[6].shape == torch.Size((1, 640, 7, 7))\n    assert feat[7].shape == torch.Size((1, 2560, 7, 7))\n\n    # Test MobileNetV2 forward with dict(type='ReLU')\n    model = MobileNetV2(\n        widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 7\n    assert feat[0].shape == torch.Size((1, 16, 112, 112))\n    assert feat[1].shape == torch.Size((1, 24, 56, 56))\n    assert feat[2].shape == torch.Size((1, 32, 28, 28))\n    assert feat[3].shape == torch.Size((1, 64, 14, 14))\n    assert feat[4].shape == torch.Size((1, 96, 14, 14))\n    assert feat[5].shape == torch.Size((1, 160, 7, 7))\n    assert feat[6].shape == torch.Size((1, 320, 7, 7))\n\n    # Test MobileNetV2 with BatchNorm forward\n    model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))\n    for m in model.modules():\n        if is_norm(m):\n            assert isinstance(m, _BatchNorm)\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 7\n    assert feat[0].shape == torch.Size((1, 16, 112, 112))\n    assert feat[1].shape == torch.Size((1, 24, 56, 56))\n    assert feat[2].shape == torch.Size((1, 32, 28, 28))\n    assert feat[3].shape == torch.Size((1, 64, 14, 14))\n    assert feat[4].shape == torch.Size((1, 96, 14, 14))\n    assert feat[5].shape == torch.Size((1, 160, 7, 7))\n    assert feat[6].shape == torch.Size((1, 320, 7, 7))\n\n    # Test MobileNetV2 with GroupNorm forward\n    model = MobileNetV2(\n        widen_factor=1.0,\n        norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),\n        out_indices=range(0, 7))\n    for m in model.modules():\n        if is_norm(m):\n            assert isinstance(m, GroupNorm)\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 7\n    assert feat[0].shape == torch.Size((1, 16, 112, 112))\n    assert feat[1].shape == torch.Size((1, 24, 56, 56))\n    assert feat[2].shape == torch.Size((1, 32, 28, 28))\n    assert feat[3].shape == torch.Size((1, 64, 14, 14))\n    assert feat[4].shape == torch.Size((1, 96, 14, 14))\n    assert feat[5].shape == torch.Size((1, 160, 7, 7))\n    assert feat[6].shape == torch.Size((1, 320, 7, 7))\n\n    # Test MobileNetV2 with layers 1, 3, 5 out forward\n    model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 3\n    assert feat[0].shape == torch.Size((1, 16, 112, 112))\n    assert feat[1].shape == torch.Size((1, 32, 28, 28))\n    assert feat[2].shape == torch.Size((1, 96, 14, 14))\n\n    # Test MobileNetV2 with checkpoint forward\n    model = MobileNetV2(\n        widen_factor=1.0, with_cp=True, out_indices=range(0, 7))\n    for m in model.modules():\n        if is_block(m):\n            assert m.with_cp\n    model.train()\n\n    imgs = torch.randn(1, 3, 224, 224)\n    feat = model(imgs)\n    assert len(feat) == 7\n    assert feat[0].shape == torch.Size((1, 16, 112, 112))\n    assert feat[1].shape == torch.Size((1, 24, 56, 56))\n    assert feat[2].shape == torch.Size((1, 32, 28, 28))\n    assert feat[3].shape == torch.Size((1, 64, 14, 14))\n    assert feat[4].shape == torch.Size((1, 96, 14, 14))\n    assert feat[5].shape == torch.Size((1, 160, 7, 7))\n    assert feat[6].shape == torch.Size((1, 320, 7, 7))\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_pvt.py",
    "content": "import pytest\nimport torch\n\nfrom mmdet.models.backbones.pvt import (PVTEncoderLayer,\n                                        PyramidVisionTransformer,\n                                        PyramidVisionTransformerV2)\n\n\ndef test_pvt_block():\n    # test PVT structure and forward\n    block = PVTEncoderLayer(\n        embed_dims=64, num_heads=4, feedforward_channels=256)\n    assert block.ffn.embed_dims == 64\n    assert block.attn.num_heads == 4\n    assert block.ffn.feedforward_channels == 256\n    x = torch.randn(1, 56 * 56, 64)\n    x_out = block(x, (56, 56))\n    assert x_out.shape == torch.Size([1, 56 * 56, 64])\n\n\ndef test_pvt():\n    \"\"\"Test PVT backbone.\"\"\"\n\n    with pytest.raises(TypeError):\n        # Pretrained arg must be str or None.\n        PyramidVisionTransformer(pretrained=123)\n\n    # test pretrained image size\n    with pytest.raises(AssertionError):\n        PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))\n\n    # Test absolute position embedding\n    temp = torch.randn((1, 3, 224, 224))\n    model = PyramidVisionTransformer(\n        pretrain_img_size=224, use_abs_pos_embed=True)\n    model.init_weights()\n    model(temp)\n\n    # Test normal inference\n    temp = torch.randn((1, 3, 32, 32))\n    model = PyramidVisionTransformer()\n    outs = model(temp)\n    assert outs[0].shape == (1, 64, 8, 8)\n    assert outs[1].shape == (1, 128, 4, 4)\n    assert outs[2].shape == (1, 320, 2, 2)\n    assert outs[3].shape == (1, 512, 1, 1)\n\n    # Test abnormal inference size\n    temp = torch.randn((1, 3, 33, 33))\n    model = PyramidVisionTransformer()\n    outs = model(temp)\n    assert outs[0].shape == (1, 64, 8, 8)\n    assert outs[1].shape == (1, 128, 4, 4)\n    assert outs[2].shape == (1, 320, 2, 2)\n    assert outs[3].shape == (1, 512, 1, 1)\n\n    # Test abnormal inference size\n    temp = torch.randn((1, 3, 112, 137))\n    model = PyramidVisionTransformer()\n    outs = model(temp)\n    assert outs[0].shape == (1, 64, 28, 34)\n    assert outs[1].shape == (1, 128, 14, 17)\n    assert outs[2].shape == (1, 320, 7, 8)\n    assert outs[3].shape == (1, 512, 3, 4)\n\n\ndef test_pvtv2():\n    \"\"\"Test PVTv2 backbone.\"\"\"\n\n    with pytest.raises(TypeError):\n        # Pretrained arg must be str or None.\n        PyramidVisionTransformerV2(pretrained=123)\n\n    # test pretrained image size\n    with pytest.raises(AssertionError):\n        PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))\n\n    # Test normal inference\n    temp = torch.randn((1, 3, 32, 32))\n    model = PyramidVisionTransformerV2()\n    outs = model(temp)\n    assert outs[0].shape == (1, 64, 8, 8)\n    assert outs[1].shape == (1, 128, 4, 4)\n    assert outs[2].shape == (1, 320, 2, 2)\n    assert outs[3].shape == (1, 512, 1, 1)\n\n    # Test abnormal inference size\n    temp = torch.randn((1, 3, 31, 31))\n    model = PyramidVisionTransformerV2()\n    outs = model(temp)\n    assert outs[0].shape == (1, 64, 8, 8)\n    assert outs[1].shape == (1, 128, 4, 4)\n    assert outs[2].shape == (1, 320, 2, 2)\n    assert outs[3].shape == (1, 512, 1, 1)\n\n    # Test abnormal inference size\n    temp = torch.randn((1, 3, 112, 137))\n    model = PyramidVisionTransformerV2()\n    outs = model(temp)\n    assert outs[0].shape == (1, 64, 28, 35)\n    assert outs[1].shape == (1, 128, 14, 18)\n    assert outs[2].shape == (1, 320, 7, 9)\n    assert outs[3].shape == (1, 512, 4, 5)\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_regnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones import RegNet\n\nregnet_test_data = [\n    ('regnetx_400mf',\n     dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,\n          bot_mul=1.0), [32, 64, 160, 384]),\n    ('regnetx_800mf',\n     dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,\n          bot_mul=1.0), [64, 128, 288, 672]),\n    ('regnetx_1.6gf',\n     dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,\n          bot_mul=1.0), [72, 168, 408, 912]),\n    ('regnetx_3.2gf',\n     dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,\n          bot_mul=1.0), [96, 192, 432, 1008]),\n    ('regnetx_4.0gf',\n     dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,\n          bot_mul=1.0), [80, 240, 560, 1360]),\n    ('regnetx_6.4gf',\n     dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,\n          bot_mul=1.0), [168, 392, 784, 1624]),\n    ('regnetx_8.0gf',\n     dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,\n          bot_mul=1.0), [80, 240, 720, 1920]),\n    ('regnetx_12gf',\n     dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,\n          bot_mul=1.0), [224, 448, 896, 2240]),\n]\n\n\n@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)\ndef test_regnet_backbone(arch_name, arch, out_channels):\n    with pytest.raises(AssertionError):\n        # ResNeXt depth should be in [50, 101, 152]\n        RegNet(arch_name + '233')\n\n    # Test RegNet with arch_name\n    model = RegNet(arch_name)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])\n    assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])\n    assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])\n    assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])\n\n    # Test RegNet with arch\n    model = RegNet(arch)\n    assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])\n    assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])\n    assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])\n    assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_renext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones import ResNeXt\nfrom mmdet.models.backbones.resnext import Bottleneck as BottleneckX\nfrom .utils import is_block\n\n\ndef test_renext_bottleneck():\n    with pytest.raises(AssertionError):\n        # Style must be in ['pytorch', 'caffe']\n        BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')\n\n    # Test ResNeXt Bottleneck structure\n    block = BottleneckX(\n        64, 64, groups=32, base_width=4, stride=2, style='pytorch')\n    assert block.conv2.stride == (2, 2)\n    assert block.conv2.groups == 32\n    assert block.conv2.out_channels == 128\n\n    # Test ResNeXt Bottleneck with DCN\n    dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n    with pytest.raises(AssertionError):\n        # conv_cfg must be None if dcn is not None\n        BottleneckX(\n            64,\n            64,\n            groups=32,\n            base_width=4,\n            dcn=dcn,\n            conv_cfg=dict(type='Conv'))\n    BottleneckX(64, 64, dcn=dcn)\n\n    # Test ResNeXt Bottleneck forward\n    block = BottleneckX(64, 16, groups=32, base_width=4)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test ResNeXt Bottleneck forward with plugins\n    plugins = [\n        dict(\n            cfg=dict(\n                type='GeneralizedAttention',\n                spatial_range=-1,\n                num_heads=8,\n                attention_type='0010',\n                kv_stride=2),\n            stages=(False, False, True, True),\n            position='after_conv2')\n    ]\n    block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_resnext_backbone():\n    with pytest.raises(KeyError):\n        # ResNeXt depth should be in [50, 101, 152]\n        ResNeXt(depth=18)\n\n    # Test ResNeXt with group 32, base_width 4\n    model = ResNeXt(depth=50, groups=32, base_width=4)\n    for m in model.modules():\n        if is_block(m):\n            assert m.conv2.groups == 32\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 256, 8, 8])\n    assert feat[1].shape == torch.Size([1, 512, 4, 4])\n    assert feat[2].shape == torch.Size([1, 1024, 2, 2])\n    assert feat[3].shape == torch.Size([1, 2048, 1, 1])\n\n\nregnet_test_data = [\n    ('regnetx_400mf',\n     dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,\n          bot_mul=1.0), [32, 64, 160, 384]),\n    ('regnetx_800mf',\n     dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,\n          bot_mul=1.0), [64, 128, 288, 672]),\n    ('regnetx_1.6gf',\n     dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,\n          bot_mul=1.0), [72, 168, 408, 912]),\n    ('regnetx_3.2gf',\n     dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,\n          bot_mul=1.0), [96, 192, 432, 1008]),\n    ('regnetx_4.0gf',\n     dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,\n          bot_mul=1.0), [80, 240, 560, 1360]),\n    ('regnetx_6.4gf',\n     dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,\n          bot_mul=1.0), [168, 392, 784, 1624]),\n    ('regnetx_8.0gf',\n     dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,\n          bot_mul=1.0), [80, 240, 720, 1920]),\n    ('regnetx_12gf',\n     dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,\n          bot_mul=1.0), [224, 448, 896, 2240]),\n]\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_res2net.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones import Res2Net\nfrom mmdet.models.backbones.res2net import Bottle2neck\nfrom .utils import is_block\n\n\ndef test_res2net_bottle2neck():\n    with pytest.raises(AssertionError):\n        # Style must be in ['pytorch', 'caffe']\n        Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')\n\n    with pytest.raises(AssertionError):\n        # Scale must be larger than 1\n        Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')\n\n    # Test Res2Net Bottle2neck structure\n    block = Bottle2neck(\n        64, 64, base_width=26, stride=2, scales=4, style='pytorch')\n    assert block.scales == 4\n\n    # Test Res2Net Bottle2neck with DCN\n    dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n    with pytest.raises(AssertionError):\n        # conv_cfg must be None if dcn is not None\n        Bottle2neck(\n            64,\n            64,\n            base_width=26,\n            scales=4,\n            dcn=dcn,\n            conv_cfg=dict(type='Conv'))\n    Bottle2neck(64, 64, dcn=dcn)\n\n    # Test Res2Net Bottle2neck forward\n    block = Bottle2neck(64, 16, base_width=26, scales=4)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_res2net_backbone():\n    with pytest.raises(KeyError):\n        # Res2Net depth should be in [50, 101, 152]\n        Res2Net(depth=18)\n\n    # Test Res2Net with scales 4, base_width 26\n    model = Res2Net(depth=50, scales=4, base_width=26)\n    for m in model.modules():\n        if is_block(m):\n            assert m.scales == 4\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 256, 8, 8])\n    assert feat[1].shape == torch.Size([1, 512, 4, 4])\n    assert feat[2].shape == torch.Size([1, 1024, 2, 2])\n    assert feat[3].shape == torch.Size([1, 2048, 1, 1])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_resnest.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones import ResNeSt\nfrom mmdet.models.backbones.resnest import Bottleneck as BottleneckS\n\n\ndef test_resnest_bottleneck():\n    with pytest.raises(AssertionError):\n        # Style must be in ['pytorch', 'caffe']\n        BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')\n\n    # Test ResNeSt Bottleneck structure\n    block = BottleneckS(\n        2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')\n    assert block.avd_layer.stride == 2\n    assert block.conv2.channels == 4\n\n    # Test ResNeSt Bottleneck forward\n    block = BottleneckS(16, 4, radix=2, reduction_factor=4)\n    x = torch.randn(2, 16, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([2, 16, 56, 56])\n\n\ndef test_resnest_backbone():\n    with pytest.raises(KeyError):\n        # ResNeSt depth should be in [50, 101, 152, 200]\n        ResNeSt(depth=18)\n\n    # Test ResNeSt with radix 2, reduction_factor 4\n    model = ResNeSt(\n        depth=50,\n        base_channels=4,\n        radix=2,\n        reduction_factor=4,\n        out_indices=(0, 1, 2, 3))\n    model.train()\n\n    imgs = torch.randn(2, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([2, 16, 8, 8])\n    assert feat[1].shape == torch.Size([2, 32, 4, 4])\n    assert feat[2].shape == torch.Size([2, 64, 2, 2])\n    assert feat[3].shape == torch.Size([2, 128, 1, 1])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom mmcv.ops import DeformConv2dPack\nfrom torch.nn.modules import AvgPool2d, GroupNorm\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.models.backbones import ResNet, ResNetV1d\nfrom mmdet.models.backbones.resnet import BasicBlock, Bottleneck\nfrom mmdet.models.layers import ResLayer, SimplifiedBasicBlock\nfrom .utils import check_norm_state, is_block, is_norm\n\n\ndef assert_params_all_zeros(module) -> bool:\n    \"\"\"Check if the parameters of the module is all zeros.\n\n    Args:\n        module (nn.Module): The module to be checked.\n\n    Returns:\n        bool: Whether the parameters of the module is all zeros.\n    \"\"\"\n    weight_data = module.weight.data\n    is_weight_zero = weight_data.allclose(\n        weight_data.new_zeros(weight_data.size()))\n\n    if hasattr(module, 'bias') and module.bias is not None:\n        bias_data = module.bias.data\n        is_bias_zero = bias_data.allclose(\n            bias_data.new_zeros(bias_data.size()))\n    else:\n        is_bias_zero = True\n\n    return is_weight_zero and is_bias_zero\n\n\ndef test_resnet_basic_block():\n    with pytest.raises(AssertionError):\n        # Not implemented yet.\n        dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n        BasicBlock(64, 64, dcn=dcn)\n\n    with pytest.raises(AssertionError):\n        # Not implemented yet.\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv3')\n        ]\n        BasicBlock(64, 64, plugins=plugins)\n\n    with pytest.raises(AssertionError):\n        # Not implemented yet\n        plugins = [\n            dict(\n                cfg=dict(\n                    type='GeneralizedAttention',\n                    spatial_range=-1,\n                    num_heads=8,\n                    attention_type='0010',\n                    kv_stride=2),\n                position='after_conv2')\n        ]\n        BasicBlock(64, 64, plugins=plugins)\n\n    # test BasicBlock structure and forward\n    block = BasicBlock(64, 64)\n    assert block.conv1.in_channels == 64\n    assert block.conv1.out_channels == 64\n    assert block.conv1.kernel_size == (3, 3)\n    assert block.conv2.in_channels == 64\n    assert block.conv2.out_channels == 64\n    assert block.conv2.kernel_size == (3, 3)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test BasicBlock with checkpoint forward\n    block = BasicBlock(64, 64, with_cp=True)\n    assert block.with_cp\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_resnet_bottleneck():\n    with pytest.raises(AssertionError):\n        # Style must be in ['pytorch', 'caffe']\n        Bottleneck(64, 64, style='tensorflow')\n\n    with pytest.raises(AssertionError):\n        # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv4')\n        ]\n        Bottleneck(64, 16, plugins=plugins)\n\n    with pytest.raises(AssertionError):\n        # Need to specify different postfix to avoid duplicate plugin name\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv3'),\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv3')\n        ]\n        Bottleneck(64, 16, plugins=plugins)\n\n    with pytest.raises(KeyError):\n        # Plugin type is not supported\n        plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]\n        Bottleneck(64, 16, plugins=plugins)\n\n    # Test Bottleneck with checkpoint forward\n    block = Bottleneck(64, 16, with_cp=True)\n    assert block.with_cp\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test Bottleneck style\n    block = Bottleneck(64, 64, stride=2, style='pytorch')\n    assert block.conv1.stride == (1, 1)\n    assert block.conv2.stride == (2, 2)\n    block = Bottleneck(64, 64, stride=2, style='caffe')\n    assert block.conv1.stride == (2, 2)\n    assert block.conv2.stride == (1, 1)\n\n    # Test Bottleneck DCN\n    dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n    with pytest.raises(AssertionError):\n        Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))\n    block = Bottleneck(64, 64, dcn=dcn)\n    assert isinstance(block.conv2, DeformConv2dPack)\n\n    # Test Bottleneck forward\n    block = Bottleneck(64, 16)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test Bottleneck with 1 ContextBlock after conv3\n    plugins = [\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16),\n            position='after_conv3')\n    ]\n    block = Bottleneck(64, 16, plugins=plugins)\n    assert block.context_block.in_channels == 64\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test Bottleneck with 1 GeneralizedAttention after conv2\n    plugins = [\n        dict(\n            cfg=dict(\n                type='GeneralizedAttention',\n                spatial_range=-1,\n                num_heads=8,\n                attention_type='0010',\n                kv_stride=2),\n            position='after_conv2')\n    ]\n    block = Bottleneck(64, 16, plugins=plugins)\n    assert block.gen_attention_block.in_channels == 16\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D\n    # after conv2, 1 ContextBlock after conv3\n    plugins = [\n        dict(\n            cfg=dict(\n                type='GeneralizedAttention',\n                spatial_range=-1,\n                num_heads=8,\n                attention_type='0010',\n                kv_stride=2),\n            position='after_conv2'),\n        dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16),\n            position='after_conv3')\n    ]\n    block = Bottleneck(64, 16, plugins=plugins)\n    assert block.gen_attention_block.in_channels == 16\n    assert block.nonlocal_block.in_channels == 16\n    assert block.context_block.in_channels == 64\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after\n    # conv3\n    plugins = [\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),\n            position='after_conv2'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),\n            position='after_conv3'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),\n            position='after_conv3')\n    ]\n    block = Bottleneck(64, 16, plugins=plugins)\n    assert block.context_block1.in_channels == 16\n    assert block.context_block2.in_channels == 64\n    assert block.context_block3.in_channels == 64\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_simplied_basic_block():\n    with pytest.raises(AssertionError):\n        # Not implemented yet.\n        dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n        SimplifiedBasicBlock(64, 64, dcn=dcn)\n\n    with pytest.raises(AssertionError):\n        # Not implemented yet.\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv3')\n        ]\n        SimplifiedBasicBlock(64, 64, plugins=plugins)\n\n    with pytest.raises(AssertionError):\n        # Not implemented yet\n        plugins = [\n            dict(\n                cfg=dict(\n                    type='GeneralizedAttention',\n                    spatial_range=-1,\n                    num_heads=8,\n                    attention_type='0010',\n                    kv_stride=2),\n                position='after_conv2')\n        ]\n        SimplifiedBasicBlock(64, 64, plugins=plugins)\n\n    with pytest.raises(AssertionError):\n        # Not implemented yet\n        SimplifiedBasicBlock(64, 64, with_cp=True)\n\n    # test SimplifiedBasicBlock structure and forward\n    block = SimplifiedBasicBlock(64, 64)\n    assert block.conv1.in_channels == 64\n    assert block.conv1.out_channels == 64\n    assert block.conv1.kernel_size == (3, 3)\n    assert block.conv2.in_channels == 64\n    assert block.conv2.out_channels == 64\n    assert block.conv2.kernel_size == (3, 3)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # test SimplifiedBasicBlock without norm\n    block = SimplifiedBasicBlock(64, 64, norm_cfg=None)\n    assert block.norm1 is None\n    assert block.norm2 is None\n    x_out = block(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n\ndef test_resnet_res_layer():\n    # Test ResLayer of 3 Bottleneck w\\o downsample\n    layer = ResLayer(Bottleneck, 64, 16, 3)\n    assert len(layer) == 3\n    assert layer[0].conv1.in_channels == 64\n    assert layer[0].conv1.out_channels == 16\n    for i in range(1, len(layer)):\n        assert layer[i].conv1.in_channels == 64\n        assert layer[i].conv1.out_channels == 16\n    for i in range(len(layer)):\n        assert layer[i].downsample is None\n    x = torch.randn(1, 64, 56, 56)\n    x_out = layer(x)\n    assert x_out.shape == torch.Size([1, 64, 56, 56])\n\n    # Test ResLayer of 3 Bottleneck with downsample\n    layer = ResLayer(Bottleneck, 64, 64, 3)\n    assert layer[0].downsample[0].out_channels == 256\n    for i in range(1, len(layer)):\n        assert layer[i].downsample is None\n    x = torch.randn(1, 64, 56, 56)\n    x_out = layer(x)\n    assert x_out.shape == torch.Size([1, 256, 56, 56])\n\n    # Test ResLayer of 3 Bottleneck with stride=2\n    layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)\n    assert layer[0].downsample[0].out_channels == 256\n    assert layer[0].downsample[0].stride == (2, 2)\n    for i in range(1, len(layer)):\n        assert layer[i].downsample is None\n    x = torch.randn(1, 64, 56, 56)\n    x_out = layer(x)\n    assert x_out.shape == torch.Size([1, 256, 28, 28])\n\n    # Test ResLayer of 3 Bottleneck with stride=2 and average downsample\n    layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)\n    assert isinstance(layer[0].downsample[0], AvgPool2d)\n    assert layer[0].downsample[1].out_channels == 256\n    assert layer[0].downsample[1].stride == (1, 1)\n    for i in range(1, len(layer)):\n        assert layer[i].downsample is None\n    x = torch.randn(1, 64, 56, 56)\n    x_out = layer(x)\n    assert x_out.shape == torch.Size([1, 256, 28, 28])\n\n    # Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False\n    layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)\n    assert layer[2].downsample[0].out_channels == 64\n    assert layer[2].downsample[0].stride == (2, 2)\n    for i in range(len(layer) - 1):\n        assert layer[i].downsample is None\n    x = torch.randn(1, 64, 56, 56)\n    x_out = layer(x)\n    assert x_out.shape == torch.Size([1, 64, 28, 28])\n\n\ndef test_resnest_stem():\n    # Test default stem_channels\n    model = ResNet(50)\n    assert model.stem_channels == 64\n    assert model.conv1.out_channels == 64\n    assert model.norm1.num_features == 64\n\n    # Test default stem_channels, with base_channels=3\n    model = ResNet(50, base_channels=3)\n    assert model.stem_channels == 3\n    assert model.conv1.out_channels == 3\n    assert model.norm1.num_features == 3\n    assert model.layer1[0].conv1.in_channels == 3\n\n    # Test stem_channels=3\n    model = ResNet(50, stem_channels=3)\n    assert model.stem_channels == 3\n    assert model.conv1.out_channels == 3\n    assert model.norm1.num_features == 3\n    assert model.layer1[0].conv1.in_channels == 3\n\n    # Test stem_channels=3, with base_channels=2\n    model = ResNet(50, stem_channels=3, base_channels=2)\n    assert model.stem_channels == 3\n    assert model.conv1.out_channels == 3\n    assert model.norm1.num_features == 3\n    assert model.layer1[0].conv1.in_channels == 3\n\n    # Test V1d stem_channels\n    model = ResNetV1d(depth=50, stem_channels=6)\n    model.train()\n    assert model.stem[0].out_channels == 3\n    assert model.stem[1].num_features == 3\n    assert model.stem[3].out_channels == 3\n    assert model.stem[4].num_features == 3\n    assert model.stem[6].out_channels == 6\n    assert model.stem[7].num_features == 6\n    assert model.layer1[0].conv1.in_channels == 6\n\n\ndef test_resnet_backbone():\n    \"\"\"Test resnet backbone.\"\"\"\n    with pytest.raises(KeyError):\n        # ResNet depth should be in [18, 34, 50, 101, 152]\n        ResNet(20)\n\n    with pytest.raises(AssertionError):\n        # In ResNet: 1 <= num_stages <= 4\n        ResNet(50, num_stages=0)\n\n    with pytest.raises(AssertionError):\n        # len(stage_with_dcn) == num_stages\n        dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)\n        ResNet(50, dcn=dcn, stage_with_dcn=(True, ))\n\n    with pytest.raises(AssertionError):\n        # len(stage_with_plugin) == num_stages\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                stages=(False, True, True),\n                position='after_conv3')\n        ]\n        ResNet(50, plugins=plugins)\n\n    with pytest.raises(AssertionError):\n        # In ResNet: 1 <= num_stages <= 4\n        ResNet(50, num_stages=5)\n\n    with pytest.raises(AssertionError):\n        # len(strides) == len(dilations) == num_stages\n        ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)\n\n    with pytest.raises(TypeError):\n        # pretrained must be a string path\n        model = ResNet(50, pretrained=0)\n\n    with pytest.raises(AssertionError):\n        # Style must be in ['pytorch', 'caffe']\n        ResNet(50, style='tensorflow')\n\n    # Test ResNet50 norm_eval=True\n    model = ResNet(50, norm_eval=True, base_channels=1)\n    model.train()\n    assert check_norm_state(model.modules(), False)\n\n    # Test ResNet50 with torchvision pretrained weight\n    model = ResNet(\n        depth=50, norm_eval=True, pretrained='torchvision://resnet50')\n    model.train()\n    assert check_norm_state(model.modules(), False)\n\n    # Test ResNet50 with first stage frozen\n    frozen_stages = 1\n    model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)\n    model.train()\n    assert model.norm1.training is False\n    for layer in [model.conv1, model.norm1]:\n        for param in layer.parameters():\n            assert param.requires_grad is False\n    for i in range(1, frozen_stages + 1):\n        layer = getattr(model, f'layer{i}')\n        for mod in layer.modules():\n            if isinstance(mod, _BatchNorm):\n                assert mod.training is False\n        for param in layer.parameters():\n            assert param.requires_grad is False\n\n    # Test ResNet50V1d with first stage frozen\n    model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)\n    assert len(model.stem) == 9\n    model.train()\n    assert check_norm_state(model.stem, False)\n    for param in model.stem.parameters():\n        assert param.requires_grad is False\n    for i in range(1, frozen_stages + 1):\n        layer = getattr(model, f'layer{i}')\n        for mod in layer.modules():\n            if isinstance(mod, _BatchNorm):\n                assert mod.training is False\n        for param in layer.parameters():\n            assert param.requires_grad is False\n\n    # Test ResNet18 forward\n    model = ResNet(18)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 64, 8, 8])\n    assert feat[1].shape == torch.Size([1, 128, 4, 4])\n    assert feat[2].shape == torch.Size([1, 256, 2, 2])\n    assert feat[3].shape == torch.Size([1, 512, 1, 1])\n\n    # Test ResNet18 with checkpoint forward\n    model = ResNet(18, with_cp=True)\n    for m in model.modules():\n        if is_block(m):\n            assert m.with_cp\n\n    # Test ResNet50 with BatchNorm forward\n    model = ResNet(50, base_channels=1)\n    for m in model.modules():\n        if is_norm(m):\n            assert isinstance(m, _BatchNorm)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 4, 8, 8])\n    assert feat[1].shape == torch.Size([1, 8, 4, 4])\n    assert feat[2].shape == torch.Size([1, 16, 2, 2])\n    assert feat[3].shape == torch.Size([1, 32, 1, 1])\n\n    # Test ResNet50 with layers 1, 2, 3 out forward\n    model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 3\n    assert feat[0].shape == torch.Size([1, 4, 8, 8])\n    assert feat[1].shape == torch.Size([1, 8, 4, 4])\n    assert feat[2].shape == torch.Size([1, 16, 2, 2])\n\n    # Test ResNet50 with checkpoint forward\n    model = ResNet(50, with_cp=True, base_channels=1)\n    for m in model.modules():\n        if is_block(m):\n            assert m.with_cp\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 4, 8, 8])\n    assert feat[1].shape == torch.Size([1, 8, 4, 4])\n    assert feat[2].shape == torch.Size([1, 16, 2, 2])\n    assert feat[3].shape == torch.Size([1, 32, 1, 1])\n\n    # Test ResNet50 with GroupNorm forward\n    model = ResNet(\n        50,\n        base_channels=4,\n        norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))\n    for m in model.modules():\n        if is_norm(m):\n            assert isinstance(m, GroupNorm)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 16, 8, 8])\n    assert feat[1].shape == torch.Size([1, 32, 4, 4])\n    assert feat[2].shape == torch.Size([1, 64, 2, 2])\n    assert feat[3].shape == torch.Size([1, 128, 1, 1])\n\n    # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D\n    # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4\n    plugins = [\n        dict(\n            cfg=dict(\n                type='GeneralizedAttention',\n                spatial_range=-1,\n                num_heads=8,\n                attention_type='0010',\n                kv_stride=2),\n            stages=(False, True, True, True),\n            position='after_conv2'),\n        dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16),\n            stages=(False, True, True, False),\n            position='after_conv3')\n    ]\n    model = ResNet(50, plugins=plugins, base_channels=8)\n    for m in model.layer1.modules():\n        if is_block(m):\n            assert not hasattr(m, 'context_block')\n            assert not hasattr(m, 'gen_attention_block')\n            assert m.nonlocal_block.in_channels == 8\n    for m in model.layer2.modules():\n        if is_block(m):\n            assert m.nonlocal_block.in_channels == 16\n            assert m.gen_attention_block.in_channels == 16\n            assert m.context_block.in_channels == 64\n\n    for m in model.layer3.modules():\n        if is_block(m):\n            assert m.nonlocal_block.in_channels == 32\n            assert m.gen_attention_block.in_channels == 32\n            assert m.context_block.in_channels == 128\n\n    for m in model.layer4.modules():\n        if is_block(m):\n            assert m.nonlocal_block.in_channels == 64\n            assert m.gen_attention_block.in_channels == 64\n            assert not hasattr(m, 'context_block')\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 32, 8, 8])\n    assert feat[1].shape == torch.Size([1, 64, 4, 4])\n    assert feat[2].shape == torch.Size([1, 128, 2, 2])\n    assert feat[3].shape == torch.Size([1, 256, 1, 1])\n\n    # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after\n    # conv3 in layers 2, 3, 4\n    plugins = [\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),\n            stages=(False, True, True, False),\n            position='after_conv3'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),\n            stages=(False, True, True, False),\n            position='after_conv3')\n    ]\n\n    model = ResNet(50, plugins=plugins, base_channels=8)\n    for m in model.layer1.modules():\n        if is_block(m):\n            assert not hasattr(m, 'context_block')\n            assert not hasattr(m, 'context_block1')\n            assert not hasattr(m, 'context_block2')\n    for m in model.layer2.modules():\n        if is_block(m):\n            assert not hasattr(m, 'context_block')\n            assert m.context_block1.in_channels == 64\n            assert m.context_block2.in_channels == 64\n\n    for m in model.layer3.modules():\n        if is_block(m):\n            assert not hasattr(m, 'context_block')\n            assert m.context_block1.in_channels == 128\n            assert m.context_block2.in_channels == 128\n\n    for m in model.layer4.modules():\n        if is_block(m):\n            assert not hasattr(m, 'context_block')\n            assert not hasattr(m, 'context_block1')\n            assert not hasattr(m, 'context_block2')\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 32, 8, 8])\n    assert feat[1].shape == torch.Size([1, 64, 4, 4])\n    assert feat[2].shape == torch.Size([1, 128, 2, 2])\n    assert feat[3].shape == torch.Size([1, 256, 1, 1])\n\n    # Test ResNet50 zero initialization of residual\n    model = ResNet(50, zero_init_residual=True, base_channels=1)\n    model.init_weights()\n    for m in model.modules():\n        if isinstance(m, Bottleneck):\n            assert assert_params_all_zeros(m.norm3)\n        elif isinstance(m, BasicBlock):\n            assert assert_params_all_zeros(m.norm2)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 4, 8, 8])\n    assert feat[1].shape == torch.Size([1, 8, 4, 4])\n    assert feat[2].shape == torch.Size([1, 16, 2, 2])\n    assert feat[3].shape == torch.Size([1, 32, 1, 1])\n\n    # Test ResNetV1d forward\n    model = ResNetV1d(depth=50, base_channels=2)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 4\n    assert feat[0].shape == torch.Size([1, 8, 8, 8])\n    assert feat[1].shape == torch.Size([1, 16, 4, 4])\n    assert feat[2].shape == torch.Size([1, 32, 2, 2])\n    assert feat[3].shape == torch.Size([1, 64, 1, 1])\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_swin.py",
    "content": "import pytest\nimport torch\n\nfrom mmdet.models.backbones.swin import SwinBlock, SwinTransformer\n\n\ndef test_swin_block():\n    # test SwinBlock structure and forward\n    block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)\n    assert block.ffn.embed_dims == 64\n    assert block.attn.w_msa.num_heads == 4\n    assert block.ffn.feedforward_channels == 256\n    x = torch.randn(1, 56 * 56, 64)\n    x_out = block(x, (56, 56))\n    assert x_out.shape == torch.Size([1, 56 * 56, 64])\n\n    # Test BasicBlock with checkpoint forward\n    block = SwinBlock(\n        embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)\n    assert block.with_cp\n    x = torch.randn(1, 56 * 56, 64)\n    x_out = block(x, (56, 56))\n    assert x_out.shape == torch.Size([1, 56 * 56, 64])\n\n\ndef test_swin_transformer():\n    \"\"\"Test Swin Transformer backbone.\"\"\"\n\n    with pytest.raises(TypeError):\n        # Pretrained arg must be str or None.\n        SwinTransformer(pretrained=123)\n\n    with pytest.raises(AssertionError):\n        # Because swin uses non-overlapping patch embed, so the stride of patch\n        # embed must be equal to patch size.\n        SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)\n\n    # test pretrained image size\n    with pytest.raises(AssertionError):\n        SwinTransformer(pretrain_img_size=(224, 224, 224))\n\n    # Test absolute position embedding\n    temp = torch.randn((1, 3, 224, 224))\n    model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)\n    model.init_weights()\n    model(temp)\n\n    # Test patch norm\n    model = SwinTransformer(patch_norm=False)\n    model(temp)\n\n    # Test normal inference\n    temp = torch.randn((1, 3, 32, 32))\n    model = SwinTransformer()\n    outs = model(temp)\n    assert outs[0].shape == (1, 96, 8, 8)\n    assert outs[1].shape == (1, 192, 4, 4)\n    assert outs[2].shape == (1, 384, 2, 2)\n    assert outs[3].shape == (1, 768, 1, 1)\n\n    # Test abnormal inference size\n    temp = torch.randn((1, 3, 31, 31))\n    model = SwinTransformer()\n    outs = model(temp)\n    assert outs[0].shape == (1, 96, 8, 8)\n    assert outs[1].shape == (1, 192, 4, 4)\n    assert outs[2].shape == (1, 384, 2, 2)\n    assert outs[3].shape == (1, 768, 1, 1)\n\n    # Test abnormal inference size\n    temp = torch.randn((1, 3, 112, 137))\n    model = SwinTransformer()\n    outs = model(temp)\n    assert outs[0].shape == (1, 96, 28, 35)\n    assert outs[1].shape == (1, 192, 14, 18)\n    assert outs[2].shape == (1, 384, 7, 9)\n    assert outs[3].shape == (1, 768, 4, 5)\n\n    model = SwinTransformer(frozen_stages=4)\n    model.train()\n    for p in model.parameters():\n        assert not p.requires_grad\n"
  },
  {
    "path": "tests/test_models/test_backbones/test_trident_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.backbones import TridentResNet\nfrom mmdet.models.backbones.trident_resnet import TridentBottleneck\n\n\ndef test_trident_resnet_bottleneck():\n    trident_dilations = (1, 2, 3)\n    test_branch_idx = 1\n    concat_output = True\n    trident_build_config = (trident_dilations, test_branch_idx, concat_output)\n\n    with pytest.raises(AssertionError):\n        # Style must be in ['pytorch', 'caffe']\n        TridentBottleneck(\n            *trident_build_config, inplanes=64, planes=64, style='tensorflow')\n\n    with pytest.raises(AssertionError):\n        # Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv4')\n        ]\n        TridentBottleneck(\n            *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n\n    with pytest.raises(AssertionError):\n        # Need to specify different postfix to avoid duplicate plugin name\n        plugins = [\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv3'),\n            dict(\n                cfg=dict(type='ContextBlock', ratio=1. / 16),\n                position='after_conv3')\n        ]\n        TridentBottleneck(\n            *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n\n    with pytest.raises(KeyError):\n        # Plugin type is not supported\n        plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]\n        TridentBottleneck(\n            *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n\n    # Test Bottleneck with checkpoint forward\n    block = TridentBottleneck(\n        *trident_build_config, inplanes=64, planes=16, with_cp=True)\n    assert block.with_cp\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n    # Test Bottleneck style\n    block = TridentBottleneck(\n        *trident_build_config,\n        inplanes=64,\n        planes=64,\n        stride=2,\n        style='pytorch')\n    assert block.conv1.stride == (1, 1)\n    assert block.conv2.stride == (2, 2)\n    block = TridentBottleneck(\n        *trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')\n    assert block.conv1.stride == (2, 2)\n    assert block.conv2.stride == (1, 1)\n\n    # Test Bottleneck forward\n    block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n    # Test Bottleneck with 1 ContextBlock after conv3\n    plugins = [\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16),\n            position='after_conv3')\n    ]\n    block = TridentBottleneck(\n        *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n    assert block.context_block.in_channels == 64\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n    # Test Bottleneck with 1 GeneralizedAttention after conv2\n    plugins = [\n        dict(\n            cfg=dict(\n                type='GeneralizedAttention',\n                spatial_range=-1,\n                num_heads=8,\n                attention_type='0010',\n                kv_stride=2),\n            position='after_conv2')\n    ]\n    block = TridentBottleneck(\n        *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n    assert block.gen_attention_block.in_channels == 16\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n    # Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D\n    # after conv2, 1 ContextBlock after conv3\n    plugins = [\n        dict(\n            cfg=dict(\n                type='GeneralizedAttention',\n                spatial_range=-1,\n                num_heads=8,\n                attention_type='0010',\n                kv_stride=2),\n            position='after_conv2'),\n        dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16),\n            position='after_conv3')\n    ]\n    block = TridentBottleneck(\n        *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n    assert block.gen_attention_block.in_channels == 16\n    assert block.nonlocal_block.in_channels == 16\n    assert block.context_block.in_channels == 64\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n    # Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after\n    # conv3\n    plugins = [\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),\n            position='after_conv2'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),\n            position='after_conv3'),\n        dict(\n            cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),\n            position='after_conv3')\n    ]\n    block = TridentBottleneck(\n        *trident_build_config, inplanes=64, planes=16, plugins=plugins)\n    assert block.context_block1.in_channels == 16\n    assert block.context_block2.in_channels == 64\n    assert block.context_block3.in_channels == 64\n    x = torch.randn(1, 64, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])\n\n\ndef test_trident_resnet_backbone():\n    tridentresnet_config = dict(\n        num_branch=3,\n        test_branch_idx=1,\n        strides=(1, 2, 2),\n        dilations=(1, 1, 1),\n        trident_dilations=(1, 2, 3),\n        out_indices=(2, ),\n    )\n    \"\"\"Test tridentresnet backbone.\"\"\"\n    with pytest.raises(AssertionError):\n        # TridentResNet depth should be in [50, 101, 152]\n        TridentResNet(18, **tridentresnet_config)\n\n    with pytest.raises(AssertionError):\n        # In TridentResNet: num_stages == 3\n        TridentResNet(50, num_stages=4, **tridentresnet_config)\n\n    model = TridentResNet(50, num_stages=3, **tridentresnet_config)\n    model.train()\n\n    imgs = torch.randn(1, 3, 32, 32)\n    feat = model(imgs)\n    assert len(feat) == 1\n    assert feat[0].shape == torch.Size([3, 1024, 2, 2])\n"
  },
  {
    "path": "tests/test_models/test_backbones/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom torch.nn.modules import GroupNorm\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.models.backbones.res2net import Bottle2neck\nfrom mmdet.models.backbones.resnet import BasicBlock, Bottleneck\nfrom mmdet.models.backbones.resnext import Bottleneck as BottleneckX\nfrom mmdet.models.layers import SimplifiedBasicBlock\n\n\ndef is_block(modules):\n    \"\"\"Check if is ResNet building block.\"\"\"\n    if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,\n                            SimplifiedBasicBlock)):\n        return True\n    return False\n\n\ndef is_norm(modules):\n    \"\"\"Check if is one of the norms.\"\"\"\n    if isinstance(modules, (GroupNorm, _BatchNorm)):\n        return True\n    return False\n\n\ndef check_norm_state(modules, train_state):\n    \"\"\"Check if norm layer is in correct train state.\"\"\"\n    for mod in modules:\n        if isinstance(mod, _BatchNorm):\n            if mod.training != train_state:\n                return False\n    return True\n"
  },
  {
    "path": "tests/test_models/test_data_preprocessors/test_batch_resize.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nfrom mmdet.models.data_preprocessors import BatchResize, DetDataPreprocessor\nfrom mmdet.testing import demo_mm_inputs\n\n\nclass TestDetDataPreprocessor(TestCase):\n\n    def test_batch_resize(self):\n\n        processor = DetDataPreprocessor(\n            mean=[103.53, 116.28, 123.675],\n            std=[57.375, 57.12, 58.395],\n            bgr_to_rgb=False,\n            batch_augments=[\n                dict(type='BatchResize', scale=(32, 32), pad_size_divisor=32)\n            ])\n        self.assertTrue(isinstance(processor.batch_augments[0], BatchResize))\n\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 10, 11], [3, 9, 24]], use_box_type=True)\n        data = processor(packed_inputs, training=True)\n        batch_inputs, batch_data_samples = data['inputs'], data['data_samples']\n        self.assertEqual(batch_inputs.shape[-2:], (32, 32))\n        self.assertEqual(batch_data_samples[0].scale_factor,\n                         batch_data_samples[1].scale_factor)\n"
  },
  {
    "path": "tests/test_models/test_data_preprocessors/test_boxinst_preprocessor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\n\nfrom mmdet.models.data_preprocessors import BoxInstDataPreprocessor\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs\n\n\nclass TestBoxInstDataPreprocessor(TestCase):\n\n    def test_forward(self):\n        processor = BoxInstDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])\n\n        data = {\n            'inputs': [torch.randint(0, 256, (3, 256, 256))],\n            'data_samples': [DetDataSample()]\n        }\n\n        # Test evaluation mode\n        out_data = processor(data)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n\n        self.assertEqual(batch_inputs.shape, (1, 3, 256, 256))\n        self.assertEqual(len(batch_data_samples), 1)\n\n        # Test traning mode without gt bboxes\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 256, 256], [3, 128, 128]], num_items=[0, 0])\n        out_data = processor(packed_inputs, training=True)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n\n        self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))\n        self.assertEqual(len(batch_data_samples), 2)\n        self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 0)\n        self.assertEqual(\n            len(batch_data_samples[0].gt_instances.pairwise_masks), 0)\n        self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 0)\n        self.assertEqual(\n            len(batch_data_samples[1].gt_instances.pairwise_masks), 0)\n\n        # Test traning mode with gt bboxes\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 256, 256], [3, 128, 128]], num_items=[2, 1])\n        out_data = processor(packed_inputs, training=True)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n\n        self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))\n        self.assertEqual(len(batch_data_samples), 2)\n        self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 2)\n        self.assertEqual(\n            len(batch_data_samples[0].gt_instances.pairwise_masks), 2)\n        self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 1)\n        self.assertEqual(\n            len(batch_data_samples[1].gt_instances.pairwise_masks), 1)\n"
  },
  {
    "path": "tests/test_models/test_data_preprocessors/test_data_preprocessor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.logging import MessageHub\n\nfrom mmdet.models.data_preprocessors import (BatchFixedSizePad,\n                                             BatchSyncRandomResize,\n                                             DetDataPreprocessor,\n                                             MultiBranchDataPreprocessor)\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs\n\n\nclass TestDetDataPreprocessor(TestCase):\n\n    def test_init(self):\n        # test mean is None\n        processor = DetDataPreprocessor()\n        self.assertTrue(not hasattr(processor, 'mean'))\n        self.assertTrue(processor._enable_normalize is False)\n\n        # test mean is not None\n        processor = DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])\n        self.assertTrue(hasattr(processor, 'mean'))\n        self.assertTrue(hasattr(processor, 'std'))\n        self.assertTrue(processor._enable_normalize)\n\n        # please specify both mean and std\n        with self.assertRaises(AssertionError):\n            DetDataPreprocessor(mean=[0, 0, 0])\n\n        # bgr2rgb and rgb2bgr cannot be set to True at the same time\n        with self.assertRaises(AssertionError):\n            DetDataPreprocessor(bgr_to_rgb=True, rgb_to_bgr=True)\n\n    def test_forward(self):\n        processor = DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])\n\n        data = {\n            'inputs': [torch.randint(0, 256, (3, 11, 10))],\n            'data_samples': [DetDataSample()]\n        }\n        out_data = processor(data)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n\n        self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))\n        self.assertEqual(len(batch_data_samples), 1)\n\n        # test channel_conversion\n        processor = DetDataPreprocessor(\n            mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)\n        out_data = processor(data)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n        self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))\n        self.assertEqual(len(batch_data_samples), 1)\n\n        # test padding\n        data = {\n            'inputs': [\n                torch.randint(0, 256, (3, 10, 11)),\n                torch.randint(0, 256, (3, 9, 14))\n            ]\n        }\n        processor = DetDataPreprocessor(\n            mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)\n        out_data = processor(data)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n        self.assertEqual(batch_inputs.shape, (2, 3, 10, 14))\n        self.assertIsNone(batch_data_samples)\n\n        # test pad_size_divisor\n        data = {\n            'inputs': [\n                torch.randint(0, 256, (3, 10, 11)),\n                torch.randint(0, 256, (3, 9, 24))\n            ],\n            'data_samples': [DetDataSample()] * 2\n        }\n        processor = DetDataPreprocessor(\n            mean=[0., 0., 0.], std=[1., 1., 1.], pad_size_divisor=5)\n        out_data = processor(data)\n        batch_inputs, batch_data_samples = out_data['inputs'], out_data[\n            'data_samples']\n        self.assertEqual(batch_inputs.shape, (2, 3, 10, 25))\n        self.assertEqual(len(batch_data_samples), 2)\n        for data_samples, expected_shape in zip(batch_data_samples,\n                                                [(10, 15), (10, 25)]):\n            self.assertEqual(data_samples.pad_shape, expected_shape)\n\n        # test pad_mask=True and pad_seg=True\n        processor = DetDataPreprocessor(\n            pad_mask=True, mask_pad_value=0, pad_seg=True, seg_pad_value=0)\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 10, 11], [3, 9, 24]],\n            with_mask=True,\n            with_semantic=True,\n            use_box_type=True)\n        packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 10, 11))\n        packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 9, 24))\n        mask_pad_sums = [\n            x.gt_instances.masks.masks.sum()\n            for x in packed_inputs['data_samples']\n        ]\n        seg_pad_sums = [\n            x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']\n        ]\n        batch_data_samples = processor(\n            packed_inputs, training=True)['data_samples']\n        for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(\n                batch_data_samples, [(10, 24), (10, 24)], mask_pad_sums,\n                seg_pad_sums):\n            self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_instances.masks.masks.sum(),\n                             mask_pad_sum)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),\n                             seg_pad_sum)\n\n    def test_batch_sync_random_resize(self):\n        processor = DetDataPreprocessor(batch_augments=[\n            dict(\n                type='BatchSyncRandomResize',\n                random_size_range=(320, 320),\n                size_divisor=32,\n                interval=1)\n        ])\n        self.assertTrue(\n            isinstance(processor.batch_augments[0], BatchSyncRandomResize))\n        message_hub = MessageHub.get_instance('test_batch_sync_random_resize')\n        message_hub.update_info('iter', 0)\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)\n        batch_inputs = processor(packed_inputs, training=True)['inputs']\n        self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))\n\n        # resize after one iter\n        message_hub.update_info('iter', 1)\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)\n        batch_inputs = processor(packed_inputs, training=True)['inputs']\n        self.assertEqual(batch_inputs.shape, (2, 3, 320, 320))\n\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)\n        batch_inputs = processor(packed_inputs, training=False)['inputs']\n        self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))\n\n    def test_batch_fixed_size_pad(self):\n        # test pad_mask=False and pad_seg=False\n        processor = DetDataPreprocessor(\n            pad_mask=False,\n            pad_seg=False,\n            batch_augments=[\n                dict(\n                    type='BatchFixedSizePad',\n                    size=(32, 32),\n                    img_pad_value=0,\n                    pad_mask=True,\n                    mask_pad_value=0,\n                    pad_seg=True,\n                    seg_pad_value=0)\n            ])\n        self.assertTrue(\n            isinstance(processor.batch_augments[0], BatchFixedSizePad))\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 10, 11], [3, 9, 24]],\n            with_mask=True,\n            with_semantic=True,\n            use_box_type=True)\n        packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 10, 11))\n        packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 9, 24))\n        mask_pad_sums = [\n            x.gt_instances.masks.masks.sum()\n            for x in packed_inputs['data_samples']\n        ]\n        seg_pad_sums = [\n            x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']\n        ]\n        data = processor(packed_inputs, training=True)\n        batch_inputs, batch_data_samples = data['inputs'], data['data_samples']\n        self.assertEqual(batch_inputs.shape[-2:], (32, 32))\n        for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(\n                batch_data_samples, [(32, 32), (32, 32)], mask_pad_sums,\n                seg_pad_sums):\n            self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_instances.masks.masks.sum(),\n                             mask_pad_sum)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),\n                             seg_pad_sum)\n\n        # test pad_mask=True and pad_seg=True\n        processor = DetDataPreprocessor(\n            pad_mask=True,\n            pad_seg=True,\n            seg_pad_value=0,\n            mask_pad_value=0,\n            batch_augments=[\n                dict(\n                    type='BatchFixedSizePad',\n                    size=(32, 32),\n                    img_pad_value=0,\n                    pad_mask=True,\n                    mask_pad_value=0,\n                    pad_seg=True,\n                    seg_pad_value=0)\n            ])\n        self.assertTrue(\n            isinstance(processor.batch_augments[0], BatchFixedSizePad))\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 10, 11], [3, 9, 24]],\n            with_mask=True,\n            with_semantic=True,\n            use_box_type=True)\n        packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 10, 11))\n        packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 9, 24))\n        mask_pad_sums = [\n            x.gt_instances.masks.masks.sum()\n            for x in packed_inputs['data_samples']\n        ]\n        seg_pad_sums = [\n            x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']\n        ]\n        data = processor(packed_inputs, training=True)\n        batch_inputs, batch_data_samples = data['inputs'], data['data_samples']\n        self.assertEqual(batch_inputs.shape[-2:], (32, 32))\n        for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(\n                batch_data_samples, [(32, 32), (32, 32)], mask_pad_sums,\n                seg_pad_sums):\n            self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_instances.masks.masks.sum(),\n                             mask_pad_sum)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),\n                             seg_pad_sum)\n\n        # test negative pad/no pad\n        processor = DetDataPreprocessor(\n            pad_mask=True,\n            pad_seg=True,\n            seg_pad_value=0,\n            mask_pad_value=0,\n            batch_augments=[\n                dict(\n                    type='BatchFixedSizePad',\n                    size=(5, 5),\n                    img_pad_value=0,\n                    pad_mask=True,\n                    mask_pad_value=1,\n                    pad_seg=True,\n                    seg_pad_value=1)\n            ])\n        self.assertTrue(\n            isinstance(processor.batch_augments[0], BatchFixedSizePad))\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 10, 11], [3, 9, 24]],\n            with_mask=True,\n            with_semantic=True,\n            use_box_type=True)\n        packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 10, 11))\n        packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(\n            0, 256, (1, 9, 24))\n        mask_pad_sums = [\n            x.gt_instances.masks.masks.sum()\n            for x in packed_inputs['data_samples']\n        ]\n        seg_pad_sums = [\n            x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']\n        ]\n        data = processor(packed_inputs, training=True)\n        batch_inputs, batch_data_samples = data['inputs'], data['data_samples']\n        self.assertEqual(batch_inputs.shape[-2:], (10, 24))\n        for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(\n                batch_data_samples, [(10, 24), (10, 24)], mask_pad_sums,\n                seg_pad_sums):\n            self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],\n                             expected_shape)\n            self.assertEqual(data_samples.gt_instances.masks.masks.sum(),\n                             mask_pad_sum)\n            self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),\n                             seg_pad_sum)\n\n\nclass TestMultiBranchDataPreprocessor(TestCase):\n\n    def setUp(self):\n        \"\"\"Setup the model and optimizer which are used in every test method.\n\n        TestCase calls functions in this order: setUp() -> testMethod() ->\n        tearDown() -> cleanUp()\n        \"\"\"\n        self.data_preprocessor = dict(\n            type='DetDataPreprocessor',\n            mean=[123.675, 116.28, 103.53],\n            std=[58.395, 57.12, 57.375],\n            bgr_to_rgb=True,\n            pad_size_divisor=32)\n        self.multi_data = {\n            'inputs': {\n                'sup': [torch.randint(0, 256, (3, 224, 224))],\n                'unsup_teacher': [\n                    torch.randint(0, 256, (3, 400, 600)),\n                    torch.randint(0, 256, (3, 600, 400))\n                ],\n                'unsup_student': [\n                    torch.randint(0, 256, (3, 700, 500)),\n                    torch.randint(0, 256, (3, 500, 700))\n                ]\n            },\n            'data_samples': {\n                'sup': [DetDataSample()],\n                'unsup_teacher': [DetDataSample(),\n                                  DetDataSample()],\n                'unsup_student': [DetDataSample(),\n                                  DetDataSample()],\n            }\n        }\n        self.data = {\n            'inputs': [torch.randint(0, 256, (3, 224, 224))],\n            'data_samples': [DetDataSample()]\n        }\n\n    def test_multi_data_preprocessor(self):\n        processor = MultiBranchDataPreprocessor(self.data_preprocessor)\n        # test processing multi_data when training\n        multi_data = processor(self.multi_data, training=True)\n        self.assertEqual(multi_data['inputs']['sup'].shape, (1, 3, 224, 224))\n        self.assertEqual(multi_data['inputs']['unsup_teacher'].shape,\n                         (2, 3, 608, 608))\n        self.assertEqual(multi_data['inputs']['unsup_student'].shape,\n                         (2, 3, 704, 704))\n        self.assertEqual(len(multi_data['data_samples']['sup']), 1)\n        self.assertEqual(len(multi_data['data_samples']['unsup_teacher']), 2)\n        self.assertEqual(len(multi_data['data_samples']['unsup_student']), 2)\n        # test processing data when testing\n        data = processor(self.data)\n        self.assertEqual(data['inputs'].shape, (1, 3, 224, 224))\n        self.assertEqual(len(data['data_samples']), 1)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import AnchorHead\n\n\nclass TestAnchorHead(TestCase):\n\n    def test_anchor_head_loss(self):\n        \"\"\"Tests anchor head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.7,\n                    neg_iou_thr=0.3,\n                    min_pos_iou=0.3,\n                    match_low_quality=True,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=256,\n                    pos_fraction=0.5,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=False),\n                allowed_border=0,\n                pos_weight=-1,\n                debug=False))\n        anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)\n\n        # Anchor head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))\n            for i in range(len(anchor_head.prior_generator.strides)))\n        cls_scores, bbox_preds = anchor_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n                                                   [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'\n        assert empty_box_loss.item() == 0, (\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'\n        assert onegt_box_loss.item() > 0, 'box loss should be non-zero'\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_atss_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import ATSSHead\n\n\nclass TestATSSHead(TestCase):\n\n    def test_atss_head_loss(self):\n        \"\"\"Tests atss head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        cfg = Config(\n            dict(\n                assigner=dict(type='ATSSAssigner', topk=9),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        atss_head = ATSSHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            feat_channels=1,\n            norm_cfg=None,\n            train_cfg=cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8, 16, 32, 64, 128]),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=2.0))\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [8, 16, 32, 64, 128]\n        ]\n        cls_scores, bbox_preds, centernesses = atss_head.forward(feat)\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 centernesses, [gt_instances],\n                                                 img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_centerness_loss.item(), 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,\n                                               centernesses, [gt_instances],\n                                               img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_centerness_loss.item(), 0,\n                           'centerness loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_autoassign_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import AutoAssignHead\n\n\nclass TestAutoAssignHead(TestCase):\n\n    def test_autoassign_head_loss(self):\n        \"\"\"Tests autoassign head loss when truth is empty and non-empty.\"\"\"\n        s = 300\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        autoassign_head = AutoAssignHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            feat_channels=1,\n            strides=[8, 16, 32, 64, 128],\n            loss_bbox=dict(type='GIoULoss', loss_weight=5.0),\n            norm_cfg=None)\n\n        # Fcos head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in autoassign_head.prior_generator.strides)\n        cls_scores, bbox_preds, centernesses = autoassign_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,\n                                                       centernesses,\n                                                       [gt_instances],\n                                                       img_metas)\n        # When there is no truth, the neg loss should be nonzero but\n        # pos loss and center loss should be zero\n        empty_pos_loss = empty_gt_losses['loss_pos'].item()\n        empty_neg_loss = empty_gt_losses['loss_neg'].item()\n        empty_ctr_loss = empty_gt_losses['loss_center'].item()\n        self.assertGreater(empty_neg_loss, 0, 'neg loss should be non-zero')\n        self.assertEqual(\n            empty_pos_loss, 0,\n            'there should be no pos loss when there are no true boxes')\n        self.assertEqual(\n            empty_ctr_loss, 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then all pos, neg loss and center loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,\n                                                     centernesses,\n                                                     [gt_instances], img_metas)\n        onegt_pos_loss = one_gt_losses['loss_pos'].item()\n        onegt_neg_loss = one_gt_losses['loss_neg'].item()\n        onegt_ctr_loss = one_gt_losses['loss_center'].item()\n        self.assertGreater(onegt_pos_loss, 0, 'pos loss should be non-zero')\n        self.assertGreater(onegt_neg_loss, 0, 'neg loss should be non-zero')\n        self.assertGreater(onegt_ctr_loss, 0, 'center loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_boxinst_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine import MessageHub\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import BoxInstBboxHead, BoxInstMaskHead\nfrom mmdet.structures.mask import BitmapMasks\n\n\ndef _rand_masks(num_items, bboxes, img_w, img_h):\n    rng = np.random.RandomState(0)\n    masks = np.zeros((num_items, img_h, img_w), dtype=np.float32)\n    for i, bbox in enumerate(bboxes):\n        bbox = bbox.astype(np.int32)\n        mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >\n                0.3).astype(np.int64)\n        masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask\n    return BitmapMasks(masks, height=img_h, width=img_w)\n\n\ndef _fake_mask_feature_head():\n    mask_feature_head = ConfigDict(\n        in_channels=1,\n        feat_channels=1,\n        start_level=0,\n        end_level=2,\n        out_channels=8,\n        mask_stride=8,\n        num_stacked_convs=4,\n        norm_cfg=dict(type='BN', requires_grad=True))\n    return mask_feature_head\n\n\nclass TestBoxInstHead(TestCase):\n\n    def test_boxinst_maskhead_loss(self):\n        \"\"\"Tests boxinst maskhead loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        boxinst_bboxhead = BoxInstBboxHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            stacked_convs=1,\n            norm_cfg=None)\n\n        mask_feature_head = _fake_mask_feature_head()\n        boxinst_maskhead = BoxInstMaskHead(\n            mask_feature_head=mask_feature_head,\n            loss_mask=dict(\n                type='DiceLoss',\n                use_sigmoid=True,\n                activate=True,\n                eps=5e-6,\n                loss_weight=1.0))\n\n        # Fcos head expects a multiple levels of features per image\n        feats = []\n        for i in range(len(boxinst_bboxhead.strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))\n        feats = tuple(feats)\n        cls_scores, bbox_preds, centernesses, param_preds =\\\n            boxinst_bboxhead.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n        gt_instances.pairwise_masks = _rand_masks(\n            0, gt_instances.bboxes.numpy(), s // 4, s // 4).to_tensor(\n                dtype=torch.float32,\n                device='cpu').unsqueeze(1).repeat(1, 8, 1, 1)\n        message_hub = MessageHub.get_instance('runtime_info')\n        message_hub.update_info('iter', 1)\n        _ = boxinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses,\n                                          param_preds, [gt_instances],\n                                          img_metas)\n        # When truth is empty then all mask loss\n        # should be zero for random inputs\n        positive_infos = boxinst_bboxhead.get_positive_infos()\n        mask_outs = boxinst_maskhead.forward(feats, positive_infos)\n        empty_gt_mask_losses = boxinst_maskhead.loss_by_feat(\n            *mask_outs, [gt_instances], img_metas, positive_infos)\n        loss_mask_project = empty_gt_mask_losses['loss_mask_project']\n        loss_mask_pairwise = empty_gt_mask_losses['loss_mask_pairwise']\n        self.assertEqual(loss_mask_project, 0,\n                         'mask project loss should be zero')\n        self.assertEqual(loss_mask_pairwise, 0,\n                         'mask pairwise loss should be zero')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor([[0.111, 0.222, 25.6667, 29.8757]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n        gt_instances.pairwise_masks = _rand_masks(\n            1, gt_instances.bboxes.numpy(), s // 4, s // 4).to_tensor(\n                dtype=torch.float32,\n                device='cpu').unsqueeze(1).repeat(1, 8, 1, 1)\n\n        _ = boxinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses,\n                                          param_preds, [gt_instances],\n                                          img_metas)\n        positive_infos = boxinst_bboxhead.get_positive_infos()\n        mask_outs = boxinst_maskhead.forward(feats, positive_infos)\n        one_gt_mask_losses = boxinst_maskhead.loss_by_feat(\n            *mask_outs, [gt_instances], img_metas, positive_infos)\n        loss_mask_project = one_gt_mask_losses['loss_mask_project']\n        loss_mask_pairwise = one_gt_mask_losses['loss_mask_pairwise']\n        self.assertGreater(loss_mask_project, 0,\n                           'mask project loss should be nonzero')\n        self.assertGreater(loss_mask_pairwise, 0,\n                           'mask pairwise loss should be nonzero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_cascade_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import CascadeRPNHead\nfrom mmdet.structures import DetDataSample\n\nrpn_weight = 0.7\ncascade_rpn_config = ConfigDict(\n    dict(\n        num_stages=2,\n        num_classes=1,\n        stages=[\n            dict(\n                type='StageCascadeRPNHead',\n                in_channels=1,\n                feat_channels=1,\n                anchor_generator=dict(\n                    type='AnchorGenerator',\n                    scales=[8],\n                    ratios=[1.0],\n                    strides=[4, 8, 16, 32, 64]),\n                adapt_cfg=dict(type='dilation', dilation=3),\n                bridged_feature=True,\n                with_cls=False,\n                reg_decoded_bbox=True,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=(.0, .0, .0, .0),\n                    target_stds=(0.1, 0.1, 0.5, 0.5)),\n                loss_bbox=dict(\n                    type='IoULoss', linear=True,\n                    loss_weight=10.0 * rpn_weight)),\n            dict(\n                type='StageCascadeRPNHead',\n                in_channels=1,\n                feat_channels=1,\n                adapt_cfg=dict(type='offset'),\n                bridged_feature=False,\n                with_cls=True,\n                reg_decoded_bbox=True,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=(.0, .0, .0, .0),\n                    target_stds=(0.05, 0.05, 0.1, 0.1)),\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    use_sigmoid=True,\n                    loss_weight=1.0 * rpn_weight),\n                loss_bbox=dict(\n                    type='IoULoss', linear=True,\n                    loss_weight=10.0 * rpn_weight))\n        ],\n        train_cfg=[\n            dict(\n                assigner=dict(\n                    type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False),\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.7,\n                    neg_iou_thr=0.7,\n                    min_pos_iou=0.3,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=256,\n                    pos_fraction=0.5,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=False),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False)\n        ],\n        test_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.8))))\n\n\nclass TestStageCascadeRPNHead(TestCase):\n\n    def test_cascade_rpn_head_loss(self):\n        \"\"\"Tests cascade rpn head loss when truth is empty and non-empty.\"\"\"\n        cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)\n\n        s = 256\n        feats = [\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in cascade_rpn_head.stages[0].prior_generator.strides\n        ]\n        img_metas = {\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': 1,\n        }\n        sample = DetDataSample()\n        sample.set_metainfo(img_metas)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        sample.gt_instances = gt_instances\n\n        empty_gt_losses = cascade_rpn_head.loss(feats, [sample])\n        for key, loss in empty_gt_losses.items():\n            loss = sum(loss)\n            if 'cls' in key:\n                self.assertGreater(loss.item(), 0,\n                                   'cls loss should be non-zero')\n            elif 'reg' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no reg loss when no ground true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([0])\n        sample.gt_instances = gt_instances\n\n        one_gt_losses = cascade_rpn_head.loss(feats, [sample])\n        for loss in one_gt_losses.values():\n            loss = sum(loss)\n            self.assertGreater(\n                loss.item(), 0,\n                'cls loss, or box loss, or iou loss should be non-zero')\n\n    def test_cascade_rpn_head_loss_and_predict(self):\n        \"\"\"Tests cascade rpn head loss and predict function.\"\"\"\n        cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)\n\n        s = 256\n        feats = [\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in cascade_rpn_head.stages[0].prior_generator.strides\n        ]\n        img_metas = {\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': 1,\n        }\n        sample = DetDataSample()\n        sample.set_metainfo(img_metas)\n\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        sample.gt_instances = gt_instances\n        proposal_cfg = ConfigDict(\n            dict(max_per_img=300, nms=dict(iou_threshold=0.8)))\n\n        cascade_rpn_head.loss_and_predict(feats, [sample], proposal_cfg)\n\n    def test_cascade_rpn_head_predict(self):\n        \"\"\"Tests cascade rpn head predict function.\"\"\"\n        cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)\n\n        s = 256\n        feats = [\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in cascade_rpn_head.stages[0].prior_generator.strides\n        ]\n        img_metas = {\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': 1,\n        }\n        sample = DetDataSample()\n        sample.set_metainfo(img_metas)\n\n        cascade_rpn_head.predict(feats, [sample])\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_centernet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import CenterNetHead\n\n\nclass TestCenterNetHead(TestCase):\n\n    def test_center_head_loss(self):\n        \"\"\"Tests center head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{'batch_input_shape': (s, s, 3)}]\n        test_cfg = dict(topK=100, max_per_img=100)\n        centernet_head = CenterNetHead(\n            num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)\n\n        feat = [torch.rand(1, 1, s, s)]\n        center_out, wh_out, offset_out = centernet_head.forward(feat)\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        empty_gt_losses = centernet_head.loss_by_feat(center_out, wh_out,\n                                                      offset_out,\n                                                      [gt_instances],\n                                                      img_metas)\n        loss_center = empty_gt_losses['loss_center_heatmap']\n        loss_wh = empty_gt_losses['loss_wh']\n        loss_offset = empty_gt_losses['loss_offset']\n        assert loss_center.item() > 0, 'loss_center should be non-zero'\n        assert loss_wh.item() == 0, (\n            'there should be no loss_wh when there are no true boxes')\n        assert loss_offset.item() == 0, (\n            'there should be no loss_offset when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = centernet_head.loss_by_feat(center_out, wh_out,\n                                                    offset_out, [gt_instances],\n                                                    img_metas)\n        loss_center = one_gt_losses['loss_center_heatmap']\n        loss_wh = one_gt_losses['loss_wh']\n        loss_offset = one_gt_losses['loss_offset']\n        assert loss_center.item() > 0, 'loss_center should be non-zero'\n        assert loss_wh.item() > 0, 'loss_wh should be non-zero'\n        assert loss_offset.item() > 0, 'loss_offset should be non-zero'\n\n    def test_centernet_head_get_targets(self):\n        \"\"\"Tests center head generating and decoding the heatmap.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'batch_input_shape': (s, s),\n        }]\n        test_cfg = ConfigDict(\n            dict(topk=100, local_maximum_kernel=3, max_per_img=100))\n        gt_bboxes = [\n            torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],\n                          [10, 20, 100, 240]])\n        ]\n        gt_labels = [torch.LongTensor([1, 1, 2])]\n\n        centernet_head = CenterNetHead(\n            num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)\n        self.feat_shape = (1, 1, s // 4, s // 4)\n        targets, _ = centernet_head.get_targets(gt_bboxes, gt_labels,\n                                                self.feat_shape,\n                                                img_metas[0]['img_shape'])\n        center_target = targets['center_heatmap_target']\n        wh_target = targets['wh_target']\n        offset_target = targets['offset_target']\n        # make sure assign target right\n        for i in range(len(gt_bboxes[0])):\n            bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]\n            ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2\n            int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(\n                sum(bbox[1::2]) / 2)\n            w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]\n            x_off = ctx - int(ctx)\n            y_off = cty - int(cty)\n            assert center_target[0, label, int_cty, int_ctx] == 1\n            assert wh_target[0, 0, int_cty, int_ctx] == w\n            assert wh_target[0, 1, int_cty, int_ctx] == h\n            assert offset_target[0, 0, int_cty, int_ctx] == x_off\n            assert offset_target[0, 1, int_cty, int_ctx] == y_off\n\n    def test_centernet_head_get_results(self):\n        \"\"\"Tests center head generating and decoding the heatmap.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'batch_input_shape': (s, s),\n            'border': (0, 0, 0, 0),\n        }]\n        test_cfg = ConfigDict(\n            dict(\n                topk=100,\n                local_maximum_kernel=3,\n                max_per_img=100,\n                nms=dict(type='nms', iou_threshold=0.5)))\n        gt_bboxes = [\n            torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],\n                          [10, 20, 100, 240]])\n        ]\n        gt_labels = [torch.LongTensor([1, 1, 2])]\n\n        centernet_head = CenterNetHead(\n            num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)\n        self.feat_shape = (1, 1, s // 4, s // 4)\n        targets, _ = centernet_head.get_targets(gt_bboxes, gt_labels,\n                                                self.feat_shape,\n                                                img_metas[0]['img_shape'])\n        center_target = targets['center_heatmap_target']\n        wh_target = targets['wh_target']\n        offset_target = targets['offset_target']\n        # make sure get_bboxes is right\n        detections = centernet_head.predict_by_feat([center_target],\n                                                    [wh_target],\n                                                    [offset_target],\n                                                    img_metas,\n                                                    rescale=True,\n                                                    with_nms=False)\n\n        pred_instances = detections[0]\n        out_bboxes = pred_instances.bboxes[:3]\n        out_clses = pred_instances.labels[:3]\n        for bbox, cls in zip(out_bboxes, out_clses):\n            flag = False\n            for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):\n                if (bbox[:4] == gt_bbox[:4]).all():\n                    flag = True\n            assert flag, 'get_bboxes is wrong'\n\n        detections = centernet_head.predict_by_feat([center_target],\n                                                    [wh_target],\n                                                    [offset_target],\n                                                    img_metas,\n                                                    rescale=True,\n                                                    with_nms=True)\n\n        pred_instances = detections[0]\n        out_bboxes = pred_instances.bboxes[:3]\n        out_clses = pred_instances.labels[:3]\n        for bbox, cls in zip(out_bboxes, out_clses):\n            flag = False\n            for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):\n                if (bbox[:4] == gt_bbox[:4]).all():\n                    flag = True\n        assert flag, 'get_bboxes is wrong'\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_centernet_update_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import CenterNetUpdateHead\n\n\nclass TestCenterNetUpdateHead(TestCase):\n\n    def test_centernet_update_head_loss(self):\n        \"\"\"Tests fcos head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        centernet_head = CenterNetUpdateHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            stacked_convs=1,\n            norm_cfg=None)\n\n        # Fcos head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in centernet_head.prior_generator.strides)\n        cls_scores, bbox_preds = centernet_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = centernet_head.loss_by_feat(cls_scores, bbox_preds,\n                                                      [gt_instances],\n                                                      img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box loss and centerness loss should be zero\n        empty_cls_loss = empty_gt_losses['loss_cls'].item()\n        empty_box_loss = empty_gt_losses['loss_bbox'].item()\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss, 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = centernet_head.loss_by_feat(cls_scores, bbox_preds,\n                                                    [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls'].item()\n        onegt_box_loss = one_gt_losses['loss_bbox'].item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_centripetal_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import CentripetalHead\n\n\nclass TestCentripetalHead(TestCase):\n\n    def test_centripetal_head_loss(self):\n        \"\"\"Tests corner head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }]\n\n        centripetal_head = CentripetalHead(\n            num_classes=4, in_channels=1, corner_emb_channels=0)\n\n        # Corner head expects a multiple levels of features per image\n        feat = [\n            torch.rand(1, 1, s // 4, s // 4)\n            for _ in range(centripetal_head.num_feat_levels)\n        ]\n        forward_outputs = centripetal_head.forward(feat)\n\n        # Test that empty ground truth encourages the network\n        # to predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_bboxes_ignore = None\n\n        empty_gt_losses = centripetal_head.loss_by_feat(\n            *forward_outputs, [gt_instances], img_metas, gt_bboxes_ignore)\n        empty_det_loss = sum(empty_gt_losses['det_loss'])\n        empty_guiding_loss = sum(empty_gt_losses['guiding_loss'])\n        empty_centripetal_loss = sum(empty_gt_losses['centripetal_loss'])\n        empty_off_loss = sum(empty_gt_losses['off_loss'])\n        self.assertTrue(empty_det_loss.item() > 0,\n                        'det loss should be non-zero')\n        self.assertTrue(\n            empty_guiding_loss.item() == 0,\n            'there should be no guiding loss when there are no true boxes')\n        self.assertTrue(\n            empty_centripetal_loss.item() == 0,\n            'there should be no centripetal loss when there are no true boxes')\n        self.assertTrue(\n            empty_off_loss.item() == 0,\n            'there should be no box loss when there are no true boxes')\n\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874],\n             [123.6667, 123.8757, 138.6326, 251.8874]])\n        gt_instances.labels = torch.LongTensor([2, 3])\n\n        two_gt_losses = centripetal_head.loss_by_feat(*forward_outputs,\n                                                      [gt_instances],\n                                                      img_metas,\n                                                      gt_bboxes_ignore)\n        twogt_det_loss = sum(two_gt_losses['det_loss'])\n        twogt_guiding_loss = sum(two_gt_losses['guiding_loss'])\n        twogt_centripetal_loss = sum(two_gt_losses['centripetal_loss'])\n        twogt_off_loss = sum(two_gt_losses['off_loss'])\n        assert twogt_det_loss.item() > 0, 'det loss should be non-zero'\n        assert twogt_guiding_loss.item() > 0, 'push loss should be non-zero'\n        assert twogt_centripetal_loss.item(\n        ) > 0, 'pull loss should be non-zero'\n        assert twogt_off_loss.item() > 0, 'off loss should be non-zero'\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_condinst_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import CondInstBboxHead, CondInstMaskHead\nfrom mmdet.structures.mask import BitmapMasks\n\n\ndef _rand_masks(num_items, bboxes, img_w, img_h):\n    rng = np.random.RandomState(0)\n    masks = np.zeros((num_items, img_h, img_w), dtype=np.float32)\n    for i, bbox in enumerate(bboxes):\n        bbox = bbox.astype(np.int32)\n        mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >\n                0.3).astype(np.int64)\n        masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask\n    return BitmapMasks(masks, height=img_h, width=img_w)\n\n\ndef _fake_mask_feature_head():\n    mask_feature_head = ConfigDict(\n        in_channels=1,\n        feat_channels=1,\n        start_level=0,\n        end_level=2,\n        out_channels=8,\n        mask_stride=8,\n        num_stacked_convs=4,\n        norm_cfg=dict(type='BN', requires_grad=True))\n    return mask_feature_head\n\n\nclass TestCondInstHead(TestCase):\n\n    def test_condinst_bboxhead_loss(self):\n        \"\"\"Tests condinst bboxhead loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        condinst_bboxhead = CondInstBboxHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            stacked_convs=1,\n            norm_cfg=None)\n\n        # Fcos head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in condinst_bboxhead.prior_generator.strides)\n        cls_scores, bbox_preds, centernesses, param_preds =\\\n            condinst_bboxhead.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n\n        empty_gt_losses = condinst_bboxhead.loss_by_feat(\n            cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],\n            img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box loss and centerness loss should be zero\n        empty_cls_loss = empty_gt_losses['loss_cls'].item()\n        empty_box_loss = empty_gt_losses['loss_bbox'].item()\n        empty_ctr_loss = empty_gt_losses['loss_centerness'].item()\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss, 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_ctr_loss, 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n\n        one_gt_losses = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n                                                       centernesses,\n                                                       param_preds,\n                                                       [gt_instances],\n                                                       img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls'].item()\n        onegt_box_loss = one_gt_losses['loss_bbox'].item()\n        onegt_ctr_loss = one_gt_losses['loss_centerness'].item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(onegt_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n\n        # Test the `center_sampling` works fine.\n        condinst_bboxhead.center_sampling = True\n        ctrsamp_losses = condinst_bboxhead.loss_by_feat(\n            cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],\n            img_metas)\n        ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()\n        ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()\n        ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()\n        self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(ctrsamp_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n\n        # Test the `norm_on_bbox` works fine.\n        condinst_bboxhead.norm_on_bbox = True\n        normbox_losses = condinst_bboxhead.loss_by_feat(\n            cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],\n            img_metas)\n        normbox_cls_loss = normbox_losses['loss_cls'].item()\n        normbox_box_loss = normbox_losses['loss_bbox'].item()\n        normbox_ctr_loss = normbox_losses['loss_centerness'].item()\n        self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(normbox_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n\n    def test_condinst_maskhead_loss(self):\n        \"\"\"Tests condinst maskhead loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        condinst_bboxhead = CondInstBboxHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            stacked_convs=1,\n            norm_cfg=None)\n\n        mask_feature_head = _fake_mask_feature_head()\n        condinst_maskhead = CondInstMaskHead(\n            mask_feature_head=mask_feature_head,\n            loss_mask=dict(\n                type='DiceLoss',\n                use_sigmoid=True,\n                activate=True,\n                eps=5e-6,\n                loss_weight=1.0))\n\n        # Fcos head expects a multiple levels of features per image\n        feats = []\n        for i in range(len(condinst_bboxhead.strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))\n        feats = tuple(feats)\n        cls_scores, bbox_preds, centernesses, param_preds =\\\n            condinst_bboxhead.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n\n        _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n                                           centernesses, param_preds,\n                                           [gt_instances], img_metas)\n        # When truth is empty then all mask loss\n        # should be zero for random inputs\n        positive_infos = condinst_bboxhead.get_positive_infos()\n        mask_outs = condinst_maskhead.forward(feats, positive_infos)\n        empty_gt_mask_losses = condinst_maskhead.loss_by_feat(\n            *mask_outs, [gt_instances], img_metas, positive_infos)\n        loss_mask = empty_gt_mask_losses['loss_mask']\n        self.assertEqual(loss_mask, 0, 'mask loss should be zero')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n\n        _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n                                           centernesses, param_preds,\n                                           [gt_instances], img_metas)\n        positive_infos = condinst_bboxhead.get_positive_infos()\n        mask_outs = condinst_maskhead.forward(feats, positive_infos)\n        one_gt_mask_losses = condinst_maskhead.loss_by_feat(\n            *mask_outs, [gt_instances], img_metas, positive_infos)\n        loss_mask = one_gt_mask_losses['loss_mask']\n        self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_corner_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.evaluation import bbox_overlaps\nfrom mmdet.models.dense_heads import CornerHead\n\n\nclass TestCornerHead(TestCase):\n\n    def test_corner_head_loss(self):\n        \"\"\"Tests corner head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }]\n\n        corner_head = CornerHead(num_classes=4, in_channels=1)\n\n        # Corner head expects a multiple levels of features per image\n        feat = [\n            torch.rand(1, 1, s // 4, s // 4)\n            for _ in range(corner_head.num_feat_levels)\n        ]\n        forward_outputs = corner_head.forward(feat)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_bboxes_ignore = None\n\n        empty_gt_losses = corner_head.loss_by_feat(*forward_outputs,\n                                                   [gt_instances], img_metas,\n                                                   gt_bboxes_ignore)\n        empty_det_loss = sum(empty_gt_losses['det_loss'])\n        empty_push_loss = sum(empty_gt_losses['push_loss'])\n        empty_pull_loss = sum(empty_gt_losses['pull_loss'])\n        empty_off_loss = sum(empty_gt_losses['off_loss'])\n        self.assertTrue(empty_det_loss.item() > 0,\n                        'det loss should be non-zero')\n        self.assertTrue(\n            empty_push_loss.item() == 0,\n            'there should be no push loss when there are no true boxes')\n        self.assertTrue(\n            empty_pull_loss.item() == 0,\n            'there should be no pull loss when there are no true boxes')\n        self.assertTrue(\n            empty_off_loss.item() == 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = corner_head.loss_by_feat(*forward_outputs,\n                                                 [gt_instances], img_metas,\n                                                 gt_bboxes_ignore)\n        onegt_det_loss = sum(one_gt_losses['det_loss'])\n        onegt_push_loss = sum(one_gt_losses['push_loss'])\n        onegt_pull_loss = sum(one_gt_losses['pull_loss'])\n        onegt_off_loss = sum(one_gt_losses['off_loss'])\n        self.assertTrue(onegt_det_loss.item() > 0,\n                        'det loss should be non-zero')\n        self.assertTrue(\n            onegt_push_loss.item() == 0,\n            'there should be no push loss when there are only one true box')\n        self.assertTrue(onegt_pull_loss.item() > 0,\n                        'pull loss should be non-zero')\n        self.assertTrue(onegt_off_loss.item() > 0,\n                        'off loss should be non-zero')\n\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874],\n             [123.6667, 123.8757, 138.6326, 251.8874]])\n        gt_instances.labels = torch.LongTensor([2, 3])\n\n        two_gt_losses = corner_head.loss_by_feat(*forward_outputs,\n                                                 [gt_instances], img_metas,\n                                                 gt_bboxes_ignore)\n        twogt_det_loss = sum(two_gt_losses['det_loss'])\n        twogt_push_loss = sum(two_gt_losses['push_loss'])\n        twogt_pull_loss = sum(two_gt_losses['pull_loss'])\n        twogt_off_loss = sum(two_gt_losses['off_loss'])\n        self.assertTrue(twogt_det_loss.item() > 0,\n                        'det loss should be non-zero')\n        # F.relu limits push loss larger than or equal to 0.\n        self.assertTrue(twogt_push_loss.item() >= 0,\n                        'push loss should be non-zero')\n        self.assertTrue(twogt_pull_loss.item() > 0,\n                        'pull loss should be non-zero')\n        self.assertTrue(twogt_off_loss.item() > 0,\n                        'off loss should be non-zero')\n\n    def test_corner_head_encode_and_decode_heatmap(self):\n        \"\"\"Tests corner head generating and decoding the heatmap.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3),\n            'border': (0, 0, 0, 0)\n        }]\n\n        gt_bboxes = [\n            torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],\n                          [10, 20, 200, 240]])\n        ]\n        gt_labels = [torch.LongTensor([1, 1, 2])]\n\n        corner_head = CornerHead(\n            num_classes=4, in_channels=1, corner_emb_channels=1)\n\n        feat = [\n            torch.rand(1, 1, s // 4, s // 4)\n            for _ in range(corner_head.num_feat_levels)\n        ]\n\n        targets = corner_head.get_targets(\n            gt_bboxes,\n            gt_labels,\n            feat[0].shape,\n            img_metas[0]['batch_input_shape'],\n            with_corner_emb=corner_head.with_corner_emb)\n\n        gt_tl_heatmap = targets['topleft_heatmap']\n        gt_br_heatmap = targets['bottomright_heatmap']\n        gt_tl_offset = targets['topleft_offset']\n        gt_br_offset = targets['bottomright_offset']\n        embedding = targets['corner_embedding']\n        [top, left], [bottom, right] = embedding[0][0]\n        gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])\n        gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])\n        gt_tl_embedding_heatmap[0, 0, top, left] = 1\n        gt_br_embedding_heatmap[0, 0, bottom, right] = 1\n\n        batch_bboxes, batch_scores, batch_clses = corner_head._decode_heatmap(\n            tl_heat=gt_tl_heatmap,\n            br_heat=gt_br_heatmap,\n            tl_off=gt_tl_offset,\n            br_off=gt_br_offset,\n            tl_emb=gt_tl_embedding_heatmap,\n            br_emb=gt_br_embedding_heatmap,\n            img_meta=img_metas[0],\n            k=100,\n            kernel=3,\n            distance_threshold=0.5)\n\n        bboxes = batch_bboxes.view(-1, 4)\n        scores = batch_scores.view(-1, 1)\n        clses = batch_clses.view(-1, 1)\n\n        idx = scores.argsort(dim=0, descending=True)\n        bboxes = bboxes[idx].view(-1, 4)\n        scores = scores[idx].view(-1)\n        clses = clses[idx].view(-1)\n\n        valid_bboxes = bboxes[torch.where(scores > 0.05)]\n        valid_labels = clses[torch.where(scores > 0.05)]\n        max_coordinate = valid_bboxes.max()\n        offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1)\n        gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1)\n\n        offset_bboxes = valid_bboxes + offsets[:, None]\n        offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None]\n\n        iou_matrix = bbox_overlaps(offset_bboxes.numpy(),\n                                   offset_gtbboxes.numpy())\n        self.assertEqual((iou_matrix == 1).sum(), 3)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_ddod_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import DDODHead\n\n\nclass TestDDODHead(TestCase):\n\n    def test_ddod_head_loss(self):\n        \"\"\"Tests ddod head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        cfg = Config(\n            dict(\n                assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),\n                reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        atss_head = DDODHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            feat_channels=1,\n            use_dcn=False,\n            norm_cfg=None,\n            train_cfg=cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8, 16, 32, 64, 128]),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n            loss_iou=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [8, 16, 32, 64, 128]\n        ]\n        cls_scores, bbox_preds, centernesses = atss_head.forward(feat)\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 centernesses, [gt_instances],\n                                                 img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        empty_centerness_loss = sum(empty_gt_losses['loss_iou'])\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_centerness_loss.item(), 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,\n                                               centernesses, [gt_instances],\n                                               img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        onegt_centerness_loss = sum(one_gt_losses['loss_iou'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_centerness_loss.item(), 0,\n                           'centerness loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_embedding_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport pytest\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import EmbeddingRPNHead\nfrom mmdet.structures import DetDataSample\n\n\nclass TestEmbeddingRPNHead(TestCase):\n\n    def test_init(self):\n        \"\"\"Test init rpn head.\"\"\"\n        rpn_head = EmbeddingRPNHead(\n            num_proposals=100, proposal_feature_channel=256)\n        rpn_head.init_weights()\n        self.assertTrue(rpn_head.init_proposal_bboxes)\n        self.assertTrue(rpn_head.init_proposal_features)\n\n    def test_loss_and_predict(self):\n        s = 256\n        img_meta = {\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }\n        rpn_head = EmbeddingRPNHead(\n            num_proposals=100, proposal_feature_channel=256)\n\n        feats = [\n            torch.rand(2, 1, s // (2**(i + 2)), s // (2**(i + 2)))\n            for i in range(5)\n        ]\n\n        data_sample = DetDataSample()\n        data_sample.set_metainfo(img_meta)\n\n        # test predict\n        result_list = rpn_head.predict(feats, [data_sample])\n        self.assertTrue(isinstance(result_list, list))\n        self.assertTrue(isinstance(result_list[0], InstanceData))\n\n        # test loss_and_predict\n        result_list = rpn_head.loss_and_predict(feats, [data_sample])\n        self.assertTrue(isinstance(result_list, tuple))\n        self.assertTrue(isinstance(result_list[0], dict))\n        self.assertEqual(len(result_list[0]), 0)\n        self.assertTrue(isinstance(result_list[1], list))\n        self.assertTrue(isinstance(result_list[1][0], InstanceData))\n\n        # test loss\n        with pytest.raises(NotImplementedError):\n            rpn_head.loss(feats, [data_sample])\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_fcos_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import FCOSHead\n\n\nclass TestFCOSHead(TestCase):\n\n    def test_fcos_head_loss(self):\n        \"\"\"Tests fcos head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        fcos_head = FCOSHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            stacked_convs=1,\n            norm_cfg=None)\n\n        # Fcos head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in fcos_head.prior_generator.strides)\n        cls_scores, bbox_preds, centernesses = fcos_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 centernesses, [gt_instances],\n                                                 img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box loss and centerness loss should be zero\n        empty_cls_loss = empty_gt_losses['loss_cls'].item()\n        empty_box_loss = empty_gt_losses['loss_bbox'].item()\n        empty_ctr_loss = empty_gt_losses['loss_centerness'].item()\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss, 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_ctr_loss, 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,\n                                               centernesses, [gt_instances],\n                                               img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls'].item()\n        onegt_box_loss = one_gt_losses['loss_bbox'].item()\n        onegt_ctr_loss = one_gt_losses['loss_centerness'].item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(onegt_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n\n        # Test the `center_sampling` works fine.\n        fcos_head.center_sampling = True\n        ctrsamp_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,\n                                                centernesses, [gt_instances],\n                                                img_metas)\n        ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()\n        ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()\n        ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()\n        self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(ctrsamp_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n\n        # Test the `norm_on_bbox` works fine.\n        fcos_head.norm_on_bbox = True\n        normbox_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,\n                                                centernesses, [gt_instances],\n                                                img_metas)\n        normbox_cls_loss = normbox_losses['loss_cls'].item()\n        normbox_box_loss = normbox_losses['loss_bbox'].item()\n        normbox_ctr_loss = normbox_losses['loss_centerness'].item()\n        self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(normbox_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_fovea_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import FoveaHead\n\n\nclass TestFOVEAHead(TestCase):\n\n    def test_fovea_head_loss(self):\n        \"\"\"Tests anchor head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        fovea_head = FoveaHead(num_classes=4, in_channels=1)\n\n        # Anchor head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))\n            for i in range(len(fovea_head.prior_generator.strides)))\n        cls_scores, bbox_preds = fovea_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,\n                                                  [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,\n                                                [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_free_anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import FreeAnchorRetinaHead\n\n\nclass TestFreeAnchorRetinaHead(TestCase):\n\n    def test_free_anchor_head_loss(self):\n        \"\"\"Tests rpn head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        anchor_head = FreeAnchorRetinaHead(num_classes=1, in_channels=1)\n\n        # Anchor head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))\n            for i in range(len(anchor_head.prior_generator.strides)))\n        cls_scores, bbox_preds = anchor_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n                                                   [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        positive_bag_loss = empty_gt_losses['positive_bag_loss']\n        negative_bag_loss = empty_gt_losses['negative_bag_loss']\n        self.assertGreater(negative_bag_loss.item(), 0,\n                           'negative_bag loss should be non-zero')\n        self.assertEqual(\n            positive_bag_loss.item(), 0,\n            'there should be no positive_bag loss when there are no true boxes'\n        )\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([0])\n\n        one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['positive_bag_loss']\n        onegt_box_loss = one_gt_losses['negative_bag_loss']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'positive bag loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'negative bag loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_fsaf_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom math import ceil\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import FSAFHead\n\n\nclass TestFSAFHead(TestCase):\n\n    def test_fsaf_head_loss(self):\n        \"\"\"Tests fsaf head loss when truth is empty and non-empty.\"\"\"\n        s = 300\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': 1,\n        }]\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='CenterRegionAssigner',\n                    pos_scale=0.2,\n                    neg_scale=0.2,\n                    min_pos_iof=0.01),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        fsaf_head = FSAFHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            feat_channels=1,\n            reg_decoded_bbox=True,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                octave_base_scale=1,\n                scales_per_octave=1,\n                ratios=[1.0],\n                strides=[8, 16, 32, 64, 128]),\n            bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0,\n                reduction='none'),\n            loss_bbox=dict(\n                type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'),\n            train_cfg=cfg)\n\n        # FSAF head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))\n            for stride in fsaf_head.prior_generator.strides)\n        cls_scores, bbox_preds = fsaf_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box loss should be zero\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,\n                                               [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_ga_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.models.dense_heads import GARetinaHead\n\nga_retina_head_config = ConfigDict(\n    dict(\n        num_classes=4,\n        in_channels=4,\n        feat_channels=4,\n        stacked_convs=1,\n        approx_anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        square_anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            scales=[4],\n            strides=[8, 16, 32, 64, 128]),\n        anchor_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loc_filter_thr=0.01,\n        loss_loc=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0),\n        train_cfg=dict(\n            ga_assigner=dict(\n                type='ApproxMaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.4,\n                min_pos_iou=0.4,\n                ignore_iof_thr=-1),\n            ga_sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.0,\n                ignore_iof_thr=-1),\n            allowed_border=-1,\n            pos_weight=-1,\n            center_ratio=0.2,\n            ignore_ratio=0.5,\n            debug=False),\n        test_cfg=dict(\n            nms_pre=1000,\n            min_bbox_size=0,\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)))\n\n\nclass TestGARetinaHead(TestCase):\n\n    def test_ga_retina_head_init_and_forward(self):\n        \"\"\"The GARetinaHead inherit loss and prediction function from\n        GuidedAchorHead.\n\n        Here, we only test GARetinaHet initialization and forward.\n        \"\"\"\n        # Test initializaion\n        ga_retina_head = GARetinaHead(**ga_retina_head_config)\n\n        # Test forward\n        s = 256\n        feats = (\n            torch.rand(1, 4, s // stride[1], s // stride[0])\n            for stride in ga_retina_head.square_anchor_generator.strides)\n        ga_retina_head(feats)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_ga_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import GARPNHead\n\nga_rpn_config = ConfigDict(\n    dict(\n        num_classes=1,\n        in_channels=4,\n        feat_channels=4,\n        approx_anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=8,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[4, 8, 16, 32, 64]),\n        square_anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            scales=[8],\n            strides=[4, 8, 16, 32, 64]),\n        anchor_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[0.07, 0.07, 0.14, 0.14]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[0.07, 0.07, 0.11, 0.11]),\n        loc_filter_thr=0.01,\n        loss_loc=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),\n        loss_cls=dict(\n            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n        loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),\n        train_cfg=dict(\n            ga_assigner=dict(\n                type='ApproxMaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                ignore_iof_thr=-1),\n            ga_sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.7,\n                neg_iou_thr=0.3,\n                min_pos_iou=0.3,\n                match_low_quality=True,\n                ignore_iof_thr=-1),\n            sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            allowed_border=-1,\n            center_ratio=0.2,\n            ignore_ratio=0.5,\n            pos_weight=-1,\n            debug=False),\n        test_cfg=dict(\n            nms_pre=1000,\n            ms_post=1000,\n            max_per_img=300,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0)))\n\n\nclass TestGARPNHead(TestCase):\n\n    def test_ga_rpn_head_loss(self):\n        \"\"\"Tests ga rpn head loss.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': (1, 1)\n        }]\n        ga_rpn_head = GARPNHead(**ga_rpn_config)\n\n        feats = (\n            torch.rand(1, 4, s // stride[1], s // stride[0])\n            for stride in ga_rpn_head.square_anchor_generator.strides)\n        outs = ga_rpn_head(feats)\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([0])\n\n        one_gt_losses = ga_rpn_head.loss_by_feat(*outs, [gt_instances],\n                                                 img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls']).item()\n        onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox']).item()\n        onegt_shape_loss = sum(one_gt_losses['loss_anchor_shape']).item()\n        onegt_loc_loss = sum(one_gt_losses['loss_anchor_loc']).item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(onegt_shape_loss, 0,\n                           'shape loss should be non-zero')\n        self.assertGreater(onegt_loc_loss, 0,\n                           'location loss should be non-zero')\n\n    def test_ga_rpn_head_predict_by_feat(self):\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': (1, 1)\n        }]\n        ga_rpn_head = GARPNHead(**ga_rpn_config)\n\n        feats = (\n            torch.rand(1, 4, s // stride[1], s // stride[0])\n            for stride in ga_rpn_head.square_anchor_generator.strides)\n        outs = ga_rpn_head(feats)\n\n        cfg = ConfigDict(\n            dict(\n                nms_pre=2000,\n                nms_post=1000,\n                max_per_img=300,\n                nms=dict(type='nms', iou_threshold=0.7),\n                min_bbox_size=0))\n        ga_rpn_head.predict_by_feat(\n            *outs, batch_img_metas=img_metas, cfg=cfg, rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_gfl_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import GFLHead\n\n\nclass TestGFLHead(TestCase):\n\n    def test_gfl_head_loss(self):\n        \"\"\"Tests gfl head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(type='ATSSAssigner', topk=9),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        gfl_head = GFLHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            train_cfg=train_cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8, 16, 32, 64, 128]),\n            loss_cls=dict(\n                type='QualityFocalLoss',\n                use_sigmoid=True,\n                beta=2.0,\n                loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=2.0))\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16, 32, 64]\n        ]\n        cls_scores, bbox_preds = gfl_head.forward(feat)\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,\n                                                [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_dfl_loss.item(), 0,\n            'there should be no dfl loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,\n                                              [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_dfl_loss.item(), 0,\n                           'dfl loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_guided_anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import GuidedAnchorHead\n\nguided_anchor_head_config = ConfigDict(\n    dict(\n        num_classes=4,\n        in_channels=4,\n        feat_channels=4,\n        approx_anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        square_anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            scales=[4],\n            strides=[8, 16, 32, 64, 128]),\n        anchor_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]),\n        loc_filter_thr=0.01,\n        loss_loc=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),\n        loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0),\n        train_cfg=dict(\n            ga_assigner=dict(\n                type='ApproxMaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.4,\n                min_pos_iou=0.4,\n                ignore_iof_thr=-1),\n            ga_sampler=dict(\n                type='RandomSampler',\n                num=256,\n                pos_fraction=0.5,\n                neg_pos_ub=-1,\n                add_gt_as_proposals=False),\n            assigner=dict(\n                type='MaxIoUAssigner',\n                pos_iou_thr=0.5,\n                neg_iou_thr=0.5,\n                min_pos_iou=0.0,\n                ignore_iof_thr=-1),\n            allowed_border=-1,\n            pos_weight=-1,\n            center_ratio=0.2,\n            ignore_ratio=0.5,\n            debug=False),\n        test_cfg=dict(\n            nms_pre=1000,\n            min_bbox_size=0,\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)))\n\n\nclass TestGuidedAnchorHead(TestCase):\n\n    def test_guided_anchor_head_loss(self):\n        \"\"\"Tests guided anchor loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': (1, 1)\n        }]\n        guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)\n\n        feats = (\n            torch.rand(1, 4, s // stride[1], s // stride[0])\n            for stride in guided_anchor_head.square_anchor_generator.strides)\n        outs = guided_anchor_head(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = guided_anchor_head.loss_by_feat(\n            *outs, [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box shape and location loss should be zero\n        empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()\n        empty_box_loss = sum(empty_gt_losses['loss_bbox']).item()\n        empty_shape_loss = sum(empty_gt_losses['loss_shape']).item()\n        empty_loc_loss = sum(empty_gt_losses['loss_loc']).item()\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(empty_loc_loss, 0,\n                           'location loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss, 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_shape_loss, 0,\n            'there should be no shape loss when there are no true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = guided_anchor_head.loss_by_feat(\n            *outs, [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()\n        onegt_box_loss = sum(one_gt_losses['loss_bbox']).item()\n        onegt_shape_loss = sum(one_gt_losses['loss_shape']).item()\n        onegt_loc_loss = sum(one_gt_losses['loss_loc']).item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(onegt_shape_loss, 0,\n                           'shape loss should be non-zero')\n        self.assertGreater(onegt_loc_loss, 0,\n                           'location loss should be non-zero')\n\n    def test_guided_anchor_head_predict_by_feat(self):\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': (1, 1)\n        }]\n        guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)\n\n        feats = (\n            torch.rand(1, 4, s // stride[1], s // stride[0])\n            for stride in guided_anchor_head.square_anchor_generator.strides)\n        outs = guided_anchor_head(feats)\n\n        guided_anchor_head.predict_by_feat(\n            *outs, batch_img_metas=img_metas, rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_lad_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import LADHead, lad_head\nfrom mmdet.models.dense_heads.lad_head import levels_to_images\n\n\nclass TestLADHead(TestCase):\n\n    def test_lad_head_loss(self):\n        \"\"\"Tests lad head loss when truth is empty and non-empty.\"\"\"\n\n        class mock_skm:\n\n            def GaussianMixture(self, *args, **kwargs):\n                return self\n\n            def fit(self, loss):\n                pass\n\n            def predict(self, loss):\n                components = np.zeros_like(loss, dtype=np.long)\n                return components.reshape(-1)\n\n            def score_samples(self, loss):\n                scores = np.random.random(len(loss))\n                return scores\n\n        lad_head.skm = mock_skm()\n\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.1,\n                    neg_iou_thr=0.1,\n                    min_pos_iou=0,\n                    ignore_iof_thr=-1),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        # since Focal Loss is not supported on CPU\n        # since Focal Loss is not supported on CPU\n        lad = LADHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=1.3),\n            loss_centerness=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))\n        teacher_model = LADHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=1.3),\n            loss_centerness=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16, 32, 64]\n        ]\n        lad.init_weights()\n        teacher_model.init_weights()\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        batch_gt_instances_ignore = None\n\n        outs_teacher = teacher_model(feat)\n        label_assignment_results = teacher_model.get_label_assignment(\n            *outs_teacher, [gt_instances], img_metas,\n            batch_gt_instances_ignore)\n\n        outs = teacher_model(feat)\n        empty_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas,\n                                           batch_gt_instances_ignore,\n                                           label_assignment_results)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        empty_iou_loss = empty_gt_losses['loss_iou']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_iou_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        batch_gt_instances_ignore = None\n\n        label_assignment_results = teacher_model.get_label_assignment(\n            *outs_teacher, [gt_instances], img_metas,\n            batch_gt_instances_ignore)\n        one_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas,\n                                         batch_gt_instances_ignore,\n                                         label_assignment_results)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        onegt_iou_loss = one_gt_losses['loss_iou']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_iou_loss.item(), 0,\n                           'box loss should be non-zero')\n        n, c, h, w = 10, 4, 20, 20\n        mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]\n        results = levels_to_images(mlvl_tensor)\n        self.assertEqual(len(results), n)\n        self.assertEqual(results[0].size(), (h * w * 5, c))\n        self.assertTrue(lad.with_score_voting)\n\n        lad = LADHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8]),\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=1.3),\n            loss_centerness=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))\n        cls_scores = [torch.ones(2, 4, 5, 5)]\n        bbox_preds = [torch.ones(2, 4, 5, 5)]\n        iou_preds = [torch.ones(2, 1, 5, 5)]\n        cfg = Config(\n            dict(\n                nms_pre=1000,\n                min_bbox_size=0,\n                score_thr=0.05,\n                nms=dict(type='nms', iou_threshold=0.6),\n                max_per_img=100))\n        rescale = False\n        lad.predict_by_feat(\n            cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_ld_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import GFLHead, LDHead\n\n\nclass TestLDHead(TestCase):\n\n    def test_ld_head_loss(self):\n        \"\"\"Tests ld head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n\n        ld_head = LDHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            loss_ld=dict(\n                type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),\n            loss_cls=dict(\n                type='QualityFocalLoss',\n                use_sigmoid=True,\n                beta=2.0,\n                loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8, 16, 32, 64, 128]))\n\n        teacher_model = GFLHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            loss_cls=dict(\n                type='QualityFocalLoss',\n                use_sigmoid=True,\n                beta=2.0,\n                loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8, 16, 32, 64, 128]))\n\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16, 32, 64]\n        ]\n        cls_scores, bbox_preds = ld_head.forward(feat)\n        rand_soft_target = teacher_model.forward(feat)[1]\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        batch_gt_instances_ignore = None\n\n        empty_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,\n                                               [gt_instances], img_metas,\n                                               rand_soft_target,\n                                               batch_gt_instances_ignore)\n\n        # When there is no truth, the cls loss should be nonzero, ld loss\n        # should be non-negative but there should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        empty_ld_loss = sum(empty_gt_losses['loss_ld'])\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertGreaterEqual(empty_ld_loss.item(), 0,\n                                'ld loss should be non-negative')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        batch_gt_instances_ignore = None\n\n        one_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,\n                                             [gt_instances], img_metas,\n                                             rand_soft_target,\n                                             batch_gt_instances_ignore)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n\n        batch_gt_instances_ignore = gt_instances\n\n        # When truth is non-empty but ignored then the cls loss should be\n        # nonzero, but there should be no box loss.\n        ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,\n                                                [gt_instances], img_metas,\n                                                rand_soft_target,\n                                                batch_gt_instances_ignore)\n        ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])\n        ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])\n\n        self.assertGreater(ignore_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(ignore_box_loss.item(), 0,\n                         'gt bbox ignored loss should be zero')\n\n        # When truth is non-empty and not ignored then both cls and box loss\n        # should be nonzero for random inputs\n        batch_gt_instances_ignore = InstanceData()\n        batch_gt_instances_ignore.bboxes = torch.randn(1, 4)\n\n        not_ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,\n                                                    [gt_instances], img_metas,\n                                                    rand_soft_target,\n                                                    batch_gt_instances_ignore)\n        not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])\n        not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])\n\n        self.assertGreater(not_ignore_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreaterEqual(not_ignore_box_loss.item(), 0,\n                                'gt bbox not ignored loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_nasfcos_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import NASFCOSHead\n\n\nclass TestNASFCOSHead(TestCase):\n\n    def test_nasfcos_head_loss(self):\n        \"\"\"Tests nasfcos head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        nasfcos_head = NASFCOSHead(\n            num_classes=4,\n            in_channels=2,  # the same as `deform_groups` in dconv3x3_config\n            feat_channels=2,\n            norm_cfg=None)\n\n        # Nasfcos head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 2, s // stride[1], s // stride[0]).float()\n            for stride in nasfcos_head.prior_generator.strides)\n        cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,\n                                                    centernesses,\n                                                    [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box loss and centerness loss should be zero\n        empty_cls_loss = empty_gt_losses['loss_cls'].item()\n        empty_box_loss = empty_gt_losses['loss_bbox'].item()\n        empty_ctr_loss = empty_gt_losses['loss_centerness'].item()\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss, 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_ctr_loss, 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,\n                                                  centernesses, [gt_instances],\n                                                  img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls'].item()\n        onegt_box_loss = one_gt_losses['loss_bbox'].item()\n        onegt_ctr_loss = one_gt_losses['loss_centerness'].item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')\n        self.assertGreater(onegt_ctr_loss, 0,\n                           'centerness loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_paa_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import PAAHead, paa_head\nfrom mmdet.models.utils import levels_to_images\n\n\nclass TestPAAHead(TestCase):\n\n    def test_paa_head_loss(self):\n        \"\"\"Tests paa head loss when truth is empty and non-empty.\"\"\"\n\n        class mock_skm:\n\n            def GaussianMixture(self, *args, **kwargs):\n                return self\n\n            def fit(self, loss):\n                pass\n\n            def predict(self, loss):\n                components = np.zeros_like(loss, dtype=np.long)\n                return components.reshape(-1)\n\n            def score_samples(self, loss):\n                scores = np.random.random(len(loss))\n                return scores\n\n        paa_head.skm = mock_skm()\n\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.1,\n                    neg_iou_thr=0.1,\n                    min_pos_iou=0,\n                    ignore_iof_thr=-1),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        # since Focal Loss is not supported on CPU\n        paa = PAAHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8, 16, 32, 64, 128]),\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=1.3),\n            loss_centerness=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16, 32, 64]\n        ]\n        paa.init_weights()\n        cls_scores, bbox_preds, iou_preds = paa(feat)\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        empty_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds,\n                                           [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        empty_iou_loss = empty_gt_losses['loss_iou']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_iou_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds,\n                                         [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        onegt_iou_loss = one_gt_losses['loss_iou']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_iou_loss.item(), 0,\n                           'box loss should be non-zero')\n        n, c, h, w = 10, 4, 20, 20\n        mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]\n        results = levels_to_images(mlvl_tensor)\n        self.assertEqual(len(results), n)\n        self.assertEqual(results[0].size(), (h * w * 5, c))\n        self.assertTrue(paa.with_score_voting)\n\n        paa = PAAHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                octave_base_scale=8,\n                scales_per_octave=1,\n                strides=[8]),\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=1.3),\n            loss_centerness=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))\n        cls_scores = [torch.ones(2, 4, 5, 5)]\n        bbox_preds = [torch.ones(2, 4, 5, 5)]\n        iou_preds = [torch.ones(2, 1, 5, 5)]\n        cfg = Config(\n            dict(\n                nms_pre=1000,\n                min_bbox_size=0,\n                score_thr=0.05,\n                nms=dict(type='nms', iou_threshold=0.6),\n                max_per_img=100))\n        rescale = False\n        paa.predict_by_feat(\n            cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_pisa_retinanet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom math import ceil\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import PISARetinaHead\n\n\nclass TestPISARetinaHead(TestCase):\n\n    def test_pisa_reitnanet_head_loss(self):\n        \"\"\"Tests pisa retinanet head loss when truth is empty and non-empty.\"\"\"\n        s = 300\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': 1,\n        }]\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.4,\n                    min_pos_iou=0,\n                    ignore_iof_thr=-1),\n                isr=dict(k=2., bias=0.),\n                carl=dict(k=1., bias=0.2),\n                sampler=dict(type='PseudoSampler'),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        pisa_retinanet_head = PISARetinaHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            feat_channels=256,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                octave_base_scale=4,\n                scales_per_octave=3,\n                ratios=[0.5, 1.0, 2.0],\n                strides=[8, 16, 32, 64, 128]),\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[.0, .0, .0, .0],\n                target_stds=[1.0, 1.0, 1.0, 1.0]),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0),\n            loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),\n            train_cfg=cfg)\n\n        # pisa retina head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))\n            for stride in pisa_retinanet_head.prior_generator.strides)\n        cls_scores, bbox_preds = pisa_retinanet_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = pisa_retinanet_head.loss_by_feat(\n            cls_scores, bbox_preds, [gt_instances], img_metas)\n        # When there is no truth, cls_loss and box_loss should all be zero.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        empty_carl_loss = empty_gt_losses['loss_carl']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_carl_loss.item(), 0,\n            'there should be no carl loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = pisa_retinanet_head.loss_by_feat(\n            cls_scores, bbox_preds, [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        onegt_carl_loss = one_gt_losses['loss_carl']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_carl_loss.item(), 0,\n                           'carl loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_pisa_ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom math import ceil\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import PISASSDHead\n\n\nclass TestPISASSDHead(TestCase):\n\n    def test_pisa_ssd_head_loss(self):\n        \"\"\"Tests pisa ssd head loss when truth is empty and non-empty.\"\"\"\n        s = 300\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.5,\n                    min_pos_iou=0.,\n                    ignore_iof_thr=-1,\n                    gt_max_assign_all=False),\n                sampler=dict(type='PseudoSampler'),\n                smoothl1_beta=1.,\n                allowed_border=-1,\n                pos_weight=-1,\n                neg_pos_ratio=3,\n                debug=False))\n        pisa_ssd_head = PISASSDHead(\n            num_classes=4,\n            in_channels=(1, 1, 1, 1, 1, 1),\n            anchor_generator=dict(\n                type='SSDAnchorGenerator',\n                scale_major=False,\n                input_size=s,\n                basesize_ratio_range=(0.15, 0.9),\n                strides=[8, 16, 32, 64, 100, 300],\n                ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),\n            train_cfg=cfg)\n\n        # PISA SSD head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))\n            for stride in pisa_ssd_head.prior_generator.strides)\n        cls_scores, bbox_preds = pisa_ssd_head.forward(feats)\n\n        # test without isr and carl\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,\n                                                     [gt_instances], img_metas)\n        # When there is no truth, cls_loss and box_loss should all be zero.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        self.assertEqual(\n            empty_cls_loss.item(), 0,\n            'there should be no cls loss when there are no true boxes')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,\n                                                   [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n\n        pisa_ssd_head.train_cfg.update(\n            dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))\n\n        # test with isr and carl\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,\n                                                     [gt_instances], img_metas)\n        # When there is no truth, cls_loss and box_loss should all be zero.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        self.assertEqual(\n            empty_cls_loss.item(), 0,\n            'there should be no cls loss when there are no true boxes')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,\n                                                   [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_reppoints_head.py",
    "content": "import unittest\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom parameterized import parameterized\n\nfrom mmdet.models.dense_heads import RepPointsHead\nfrom mmdet.structures import DetDataSample\n\n\nclass TestRepPointsHead(unittest.TestCase):\n\n    @parameterized.expand(['moment', 'minmax', 'partial_minmax'])\n    def test_head_loss(self, transform_method='moment'):\n        cfg = ConfigDict(\n            dict(\n                num_classes=2,\n                in_channels=32,\n                point_feat_channels=10,\n                num_points=9,\n                gradient_mul=0.1,\n                point_strides=[8, 16, 32, 64, 128],\n                point_base_scale=4,\n                loss_cls=dict(\n                    type='FocalLoss',\n                    use_sigmoid=True,\n                    gamma=2.0,\n                    alpha=0.25,\n                    loss_weight=1.0),\n                loss_bbox_init=dict(\n                    type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),\n                loss_bbox_refine=dict(\n                    type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n                use_grid_points=False,\n                center_init=True,\n                transform_method=transform_method,\n                moment_mul=0.01,\n                init_cfg=dict(\n                    type='Normal',\n                    layer='Conv2d',\n                    std=0.01,\n                    override=dict(\n                        type='Normal',\n                        name='reppoints_cls_out',\n                        std=0.01,\n                        bias_prob=0.01)),\n                train_cfg=dict(\n                    init=dict(\n                        assigner=dict(\n                            type='PointAssigner', scale=4, pos_num=1),\n                        allowed_border=-1,\n                        pos_weight=-1,\n                        debug=False),\n                    refine=dict(\n                        assigner=dict(\n                            type='MaxIoUAssigner',\n                            pos_iou_thr=0.5,\n                            neg_iou_thr=0.4,\n                            min_pos_iou=0,\n                            ignore_iof_thr=-1),\n                        allowed_border=-1,\n                        pos_weight=-1,\n                        debug=False)),\n                test_cfg=dict(\n                    nms_pre=1000,\n                    min_bbox_size=0,\n                    score_thr=0.05,\n                    nms=dict(type='nms', iou_threshold=0.5),\n                    max_per_img=100)))\n        reppoints_head = RepPointsHead(**cfg)\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s),\n            'scale_factor': (1, 1),\n            'pad_shape': (s, s),\n            'batch_input_shape': (s, s)\n        }]\n        x = [\n            torch.rand(1, 32, s // 2**(i + 2), s // 2**(i + 2))\n            for i in range(5)\n        ]\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_bboxes_ignore = None\n\n        reppoints_head.train()\n        forward_outputs = reppoints_head.forward(x)\n        empty_gt_losses = reppoints_head.loss_by_feat(*forward_outputs,\n                                                      [gt_instances],\n                                                      img_metas,\n                                                      gt_bboxes_ignore)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no pts loss.\n        for key, losses in empty_gt_losses.items():\n            for loss in losses:\n                if 'cls' in key:\n                    self.assertGreater(loss.item(), 0,\n                                       'cls loss should be non-zero')\n                elif 'pts' in key:\n                    self.assertEqual(\n                        loss.item(), 0,\n                        'there should be no reg loss when no ground true boxes'\n                    )\n\n        # When truth is non-empty then both cls and pts loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = reppoints_head.loss_by_feat(*forward_outputs,\n                                                    [gt_instances], img_metas,\n                                                    gt_bboxes_ignore)\n        # loss_cls should all be non-zero\n        self.assertTrue(\n            all([loss.item() > 0 for loss in one_gt_losses['loss_cls']]))\n        # only one level loss_pts_init is non-zero\n        cnt_non_zero = 0\n        for loss in one_gt_losses['loss_pts_init']:\n            if loss.item() != 0:\n                cnt_non_zero += 1\n        self.assertEqual(cnt_non_zero, 1)\n\n        # only one level loss_pts_refine is non-zero\n        cnt_non_zero = 0\n        for loss in one_gt_losses['loss_pts_init']:\n            if loss.item() != 0:\n                cnt_non_zero += 1\n        self.assertEqual(cnt_non_zero, 1)\n\n        # test loss\n        samples = DetDataSample()\n        samples.set_metainfo(img_metas[0])\n        samples.gt_instances = gt_instances\n        reppoints_head.loss(x, [samples])\n        # test only predict\n        reppoints_head.eval()\n        reppoints_head.predict(x, [samples], rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_retina_sepBN_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import RetinaSepBNHead\n\n\nclass TestRetinaSepBNHead(TestCase):\n\n    def test_init(self):\n        \"\"\"Test init RetinaSepBN head.\"\"\"\n        anchor_head = RetinaSepBNHead(num_classes=1, num_ins=1, in_channels=1)\n        anchor_head.init_weights()\n        self.assertTrue(anchor_head.cls_convs)\n        self.assertTrue(anchor_head.reg_convs)\n        self.assertTrue(anchor_head.retina_cls)\n        self.assertTrue(anchor_head.retina_reg)\n\n    def test_retina_sepbn_head_loss(self):\n        \"\"\"Tests RetinaSepBN head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.4,\n                    min_pos_iou=0,\n                    ignore_iof_thr=-1),\n                sampler=dict(type='PseudoSampler'\n                             ),  # Focal loss should use PseudoSampler\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        anchor_head = RetinaSepBNHead(\n            num_classes=4, num_ins=5, in_channels=1, train_cfg=cfg)\n\n        # Anchor head expects a multiple levels of features per image\n        feats = []\n        for i in range(len(anchor_head.prior_generator.strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))\n\n        cls_scores, bbox_preds = anchor_head.forward(tuple(feats))\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n                                                   [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport pytest\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import RPNHead\n\n\nclass TestRPNHead(TestCase):\n\n    def test_init(self):\n        \"\"\"Test init rpn head.\"\"\"\n        rpn_head = RPNHead(num_classes=1, in_channels=1)\n        self.assertTrue(rpn_head.rpn_conv)\n        self.assertTrue(rpn_head.rpn_cls)\n        self.assertTrue(rpn_head.rpn_reg)\n\n        # rpn_head.num_convs > 1\n        rpn_head = RPNHead(num_classes=1, in_channels=1, num_convs=2)\n        self.assertTrue(rpn_head.rpn_conv)\n        self.assertTrue(rpn_head.rpn_cls)\n        self.assertTrue(rpn_head.rpn_reg)\n\n    def test_rpn_head_loss(self):\n        \"\"\"Tests rpn head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.7,\n                    neg_iou_thr=0.3,\n                    min_pos_iou=0.3,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='RandomSampler',\n                    num=256,\n                    pos_fraction=0.5,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=False),\n                allowed_border=0,\n                pos_weight=-1,\n                debug=False))\n        rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)\n\n        # Anchor head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))\n            for i in range(len(rpn_head.prior_generator.strides)))\n        cls_scores, bbox_preds = rpn_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,\n                                                [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'rpn cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([0])\n\n        one_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,\n                                              [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'rpn cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'rpn box loss should be non-zero')\n\n        # When there is no valid anchor, the loss will be None,\n        # and this will raise a ValueError.\n        img_metas = [{\n            'img_shape': (8, 8, 3),\n            'pad_shape': (8, 8, 3),\n            'scale_factor': 1,\n        }]\n        with pytest.raises(ValueError):\n            rpn_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances],\n                                  img_metas)\n\n    def test_bbox_post_process(self):\n        \"\"\"Test the length of detection instance results is 0.\"\"\"\n        from mmengine.config import ConfigDict\n        cfg = ConfigDict(\n            nms_pre=1000,\n            max_per_img=1000,\n            nms=dict(type='nms', iou_threshold=0.7),\n            min_bbox_size=0)\n\n        rpn_head = RPNHead(num_classes=1, in_channels=1)\n        results = InstanceData(metainfo=dict())\n        results.bboxes = torch.zeros((0, 4))\n        results.scores = torch.zeros(0)\n        results = rpn_head._bbox_post_process(results, cfg, img_meta=dict())\n        self.assertEqual(len(results), 0)\n        self.assertEqual(results.bboxes.size(), (0, 4))\n        self.assertEqual(results.scores.size(), (0, ))\n        self.assertEqual(results.labels.size(), (0, ))\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_sabl_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import SABLRetinaHead\n\n\nclass TestSABLRetinaHead(TestCase):\n\n    def test_sabl_retina_head(self):\n        \"\"\"Tests sabl retina head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s),\n            'pad_shape': (s, s),\n            'scale_factor': [1, 1],\n        }]\n        train_cfg = ConfigDict(\n            dict(\n                assigner=dict(\n                    type='ApproxMaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.4,\n                    min_pos_iou=0.0,\n                    ignore_iof_thr=-1),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        sabl_retina_head = SABLRetinaHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            stacked_convs=1,\n            approx_anchor_generator=dict(\n                type='AnchorGenerator',\n                octave_base_scale=4,\n                scales_per_octave=3,\n                ratios=[0.5, 1.0, 2.0],\n                strides=[8, 16, 32, 64, 128]),\n            square_anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                scales=[4],\n                strides=[8, 16, 32, 64, 128]),\n            bbox_coder=dict(\n                type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0),\n            loss_bbox_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),\n            loss_bbox_reg=dict(\n                type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),\n            train_cfg=train_cfg)\n\n        # Fcos head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in sabl_retina_head.square_anchor_generator.strides)\n        outs = sabl_retina_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = sabl_retina_head.loss_by_feat(\n            *outs, [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # box loss and centerness loss should be zero\n        empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()\n        empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls']).item()\n        empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg']).item()\n        self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_cls_loss, 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_box_reg_loss, 0,\n            'there should be no centerness loss when there are no true boxes')\n\n        # When truth is non-empty then all cls, box loss and centerness loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = sabl_retina_head.loss_by_feat(*outs, [gt_instances],\n                                                      img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()\n        onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls']).item()\n        onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg']).item()\n        self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(onegt_box_cls_loss, 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_box_reg_loss, 0,\n                           'centerness loss should be non-zero')\n\n        test_cfg = ConfigDict(\n            dict(\n                nms_pre=1000,\n                min_bbox_size=0,\n                score_thr=0.05,\n                nms=dict(type='nms', iou_threshold=0.5),\n                max_per_img=100))\n        # test predict_by_feat\n        sabl_retina_head.predict_by_feat(\n            *outs, batch_img_metas=img_metas, cfg=test_cfg, rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_solo_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom parameterized import parameterized\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import (DecoupledSOLOHead,\n                                      DecoupledSOLOLightHead, SOLOHead)\nfrom mmdet.structures.mask import BitmapMasks\n\n\ndef _rand_masks(num_items, bboxes, img_w, img_h):\n    rng = np.random.RandomState(0)\n    masks = np.zeros((num_items, img_h, img_w))\n    for i, bbox in enumerate(bboxes):\n        bbox = bbox.astype(np.int32)\n        mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >\n                0.3).astype(np.int64)\n        masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask\n    return BitmapMasks(masks, height=img_h, width=img_w)\n\n\nclass TestSOLOHead(TestCase):\n\n    @parameterized.expand([(SOLOHead, ), (DecoupledSOLOHead, ),\n                           (DecoupledSOLOLightHead, )])\n    def test_mask_head_loss(self, MaskHead):\n        \"\"\"Tests mask head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'ori_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }]\n\n        mask_head = MaskHead(num_classes=4, in_channels=1)\n\n        # SOLO head expects a multiple levels of features per image\n        feats = []\n        for i in range(len(mask_head.strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))\n        feats = tuple(feats)\n\n        mask_outs = mask_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty(0, 4)\n        gt_instances.labels = torch.LongTensor([])\n        gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n\n        empty_gt_losses = mask_head.loss_by_feat(\n            *mask_outs,\n            batch_gt_instances=[gt_instances],\n            batch_img_metas=img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_mask_loss = empty_gt_losses['loss_mask']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_mask_loss.item(), 0,\n            'there should be no mask loss when there are no true mask')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n\n        one_gt_losses = mask_head.loss_by_feat(\n            *mask_outs,\n            batch_gt_instances=[gt_instances],\n            batch_img_metas=img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_mask_loss = one_gt_losses['loss_mask']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_mask_loss.item(), 0,\n                           'mask loss should be non-zero')\n\n    def test_solo_head_empty_result(self):\n        s = 256\n        img_metas = {\n            'img_shape': (s, s, 3),\n            'ori_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }\n\n        mask_head = SOLOHead(num_classes=4, in_channels=1)\n\n        cls_scores = torch.empty(0, 80)\n        mask_preds = torch.empty(0, 16, 16)\n        test_cfg = ConfigDict(\n            score_thr=0.1,\n            mask_thr=0.5,\n        )\n        results = mask_head._predict_by_feat_single(\n            cls_scores=cls_scores,\n            mask_preds=mask_preds,\n            img_meta=img_metas,\n            cfg=test_cfg)\n\n        self.assertIsInstance(results, InstanceData)\n        self.assertEqual(len(results), 0)\n\n    def test_decoupled_solo_head_empty_result(self):\n        s = 256\n        img_metas = {\n            'img_shape': (s, s, 3),\n            'ori_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }\n\n        mask_head = DecoupledSOLOHead(num_classes=4, in_channels=1)\n\n        cls_scores = torch.empty(0, 80)\n        mask_preds_x = torch.empty(0, 16, 16)\n        mask_preds_y = torch.empty(0, 16, 16)\n        test_cfg = ConfigDict(\n            score_thr=0.1,\n            mask_thr=0.5,\n        )\n        results = mask_head._predict_by_feat_single(\n            cls_scores=cls_scores,\n            mask_preds_x=mask_preds_x,\n            mask_preds_y=mask_preds_y,\n            img_meta=img_metas,\n            cfg=test_cfg)\n\n        self.assertIsInstance(results, InstanceData)\n        self.assertEqual(len(results), 0)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_solov2_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import SOLOV2Head\nfrom mmdet.structures.mask import BitmapMasks\n\n\ndef _rand_masks(num_items, bboxes, img_w, img_h):\n    rng = np.random.RandomState(0)\n    masks = np.zeros((num_items, img_h, img_w))\n    for i, bbox in enumerate(bboxes):\n        bbox = bbox.astype(np.int32)\n        mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >\n                0.3).astype(np.int64)\n        masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask\n    return BitmapMasks(masks, height=img_h, width=img_w)\n\n\ndef _fake_mask_feature_head():\n    mask_feature_head = ConfigDict(\n        feat_channels=128,\n        start_level=0,\n        end_level=3,\n        out_channels=256,\n        mask_stride=4,\n        norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))\n    return mask_feature_head\n\n\nclass TestSOLOv2Head(TestCase):\n\n    def test_solov2_head_loss(self):\n        \"\"\"Tests mask head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'ori_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }]\n\n        mask_feature_head = _fake_mask_feature_head()\n\n        mask_head = SOLOV2Head(\n            num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)\n\n        # SOLO head expects a multiple levels of features per image\n        feats = []\n        for i in range(len(mask_head.strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))\n        feats = tuple(feats)\n\n        mask_outs = mask_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty(0, 4)\n        gt_instances.labels = torch.LongTensor([])\n        gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n\n        empty_gt_losses = mask_head.loss_by_feat(\n            *mask_outs,\n            batch_gt_instances=[gt_instances],\n            batch_img_metas=img_metas)\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_mask_loss = empty_gt_losses['loss_mask']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_mask_loss.item(), 0,\n            'there should be no mask loss when there are no true mask')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n\n        one_gt_losses = mask_head.loss_by_feat(\n            *mask_outs,\n            batch_gt_instances=[gt_instances],\n            batch_img_metas=img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_mask_loss = one_gt_losses['loss_mask']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_mask_loss.item(), 0,\n                           'mask loss should be non-zero')\n\n    def test_solov2_head_empty_result(self):\n        s = 256\n        img_metas = {\n            'img_shape': (s, s, 3),\n            'ori_shape': (s, s, 3),\n            'scale_factor': 1,\n            'batch_input_shape': (s, s, 3)\n        }\n\n        mask_feature_head = _fake_mask_feature_head()\n        mask_head = SOLOV2Head(\n            num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)\n\n        kernel_preds = torch.empty(0, 128)\n        cls_scores = torch.empty(0, 80)\n        mask_feats = torch.empty(0, 16, 16)\n        test_cfg = ConfigDict(\n            score_thr=0.1,\n            mask_thr=0.5,\n        )\n        results = mask_head._predict_by_feat_single(\n            kernel_preds=kernel_preds,\n            cls_scores=cls_scores,\n            mask_feats=mask_feats,\n            img_meta=img_metas,\n            cfg=test_cfg)\n\n        self.assertIsInstance(results, InstanceData)\n        self.assertEqual(len(results), 0)\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom math import ceil\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import SSDHead\n\n\nclass TestSSDHead(TestCase):\n\n    def test_ssd_head_loss(self):\n        \"\"\"Tests ssd head loss when truth is empty and non-empty.\"\"\"\n        s = 300\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        cfg = Config(\n            dict(\n                assigner=dict(\n                    type='MaxIoUAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.5,\n                    min_pos_iou=0.,\n                    ignore_iof_thr=-1,\n                    gt_max_assign_all=False),\n                sampler=dict(type='PseudoSampler'),\n                smoothl1_beta=1.,\n                allowed_border=-1,\n                pos_weight=-1,\n                neg_pos_ratio=3,\n                debug=False))\n        ssd_head = SSDHead(\n            num_classes=4,\n            in_channels=(1, 1, 1, 1, 1, 1),\n            stacked_convs=1,\n            feat_channels=1,\n            use_depthwise=True,\n            anchor_generator=dict(\n                type='SSDAnchorGenerator',\n                scale_major=False,\n                input_size=s,\n                basesize_ratio_range=(0.15, 0.9),\n                strides=[8, 16, 32, 64, 100, 300],\n                ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),\n            train_cfg=cfg)\n\n        # SSD head expects a multiple levels of features per image\n        feats = (\n            torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))\n            for stride in ssd_head.prior_generator.strides)\n        cls_scores, bbox_preds = ssd_head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,\n                                                [gt_instances], img_metas)\n        # When there is no truth, cls_loss and box_loss should all be zero.\n        empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n        empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n        self.assertEqual(\n            empty_cls_loss.item(), 0,\n            'there should be no cls loss when there are no true boxes')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,\n                                              [gt_instances], img_metas)\n        onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n        onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_tood_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config, MessageHub\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import TOODHead\n\n\ndef _tood_head(anchor_type):\n    \"\"\"Set type of tood head.\"\"\"\n    train_cfg = Config(\n        dict(\n            initial_epoch=4,\n            initial_assigner=dict(type='ATSSAssigner', topk=9),\n            assigner=dict(type='TaskAlignedAssigner', topk=13),\n            alpha=1,\n            beta=6,\n            allowed_border=-1,\n            pos_weight=-1,\n            debug=False))\n    test_cfg = Config(\n        dict(\n            nms_pre=1000,\n            min_bbox_size=0,\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.6),\n            max_per_img=100))\n\n    tood_head = TOODHead(\n        num_classes=80,\n        in_channels=1,\n        stacked_convs=1,\n        feat_channels=8,  # the same as `la_down_rate` in TaskDecomposition\n        norm_cfg=None,\n        anchor_type=anchor_type,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            octave_base_scale=8,\n            scales_per_octave=1,\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[0.1, 0.1, 0.2, 0.2]),\n        initial_loss_cls=dict(\n            type='FocalLoss',\n            use_sigmoid=True,\n            activated=True,  # use probability instead of logit as input\n            gamma=2.0,\n            alpha=0.25,\n            loss_weight=1.0),\n        loss_cls=dict(\n            type='QualityFocalLoss',\n            use_sigmoid=True,\n            activated=True,  # use probability instead of logit as input\n            beta=2.0,\n            loss_weight=1.0),\n        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n        train_cfg=train_cfg,\n        test_cfg=test_cfg)\n    return tood_head\n\n\nclass TestTOODHead(TestCase):\n\n    def test_tood_head_anchor_free_loss(self):\n        \"\"\"Tests tood head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        tood_head = _tood_head('anchor_free')\n        tood_head.init_weights()\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [8, 16, 32, 64, 128]\n        ]\n        cls_scores, bbox_preds = tood_head(feat)\n\n        message_hub = MessageHub.get_instance('runtime_info')\n        message_hub.update_info('epoch', 0)\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_bboxes_ignore = None\n        empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas,\n                                                 gt_bboxes_ignore)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(\n            sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            sum(empty_box_loss).item(), 0,\n            'there should be no box loss when there are no true boxes')\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_bboxes_ignore = None\n        one_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,\n                                               [gt_instances], img_metas,\n                                               gt_bboxes_ignore)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        self.assertGreater(\n            sum(onegt_cls_loss).item(), 0, 'cls loss should be non-zero')\n        self.assertGreater(\n            sum(onegt_box_loss).item(), 0, 'box loss should be non-zero')\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_bboxes_ignore = None\n        empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas,\n                                                 gt_bboxes_ignore)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(\n            sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            sum(empty_box_loss).item(), 0,\n            'there should be no box loss when there are no true boxes')\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        gt_bboxes_ignore = None\n        one_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,\n                                               [gt_instances], img_metas,\n                                               gt_bboxes_ignore)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        self.assertGreater(\n            sum(onegt_cls_loss).item(), 0, 'cls loss should be non-zero')\n        self.assertGreater(\n            sum(onegt_box_loss).item(), 0, 'box loss should be non-zero')\n\n    def test_tood_head_anchor_based_loss(self):\n        \"\"\"Tests tood head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'pad_shape': (s, s, 3),\n            'scale_factor': 1\n        }]\n        tood_head = _tood_head('anchor_based')\n        tood_head.init_weights()\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [8, 16, 32, 64, 128]\n        ]\n        cls_scores, bbox_preds = tood_head(feat)\n\n        message_hub = MessageHub.get_instance('runtime_info')\n        message_hub.update_info('epoch', 0)\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        gt_bboxes_ignore = None\n        empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,\n                                                 [gt_instances], img_metas,\n                                                 gt_bboxes_ignore)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(\n            sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')\n        self.assertEqual(\n            sum(empty_box_loss).item(), 0,\n            'there should be no box loss when there are no true boxes')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_vfnet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import VFNetHead\n\n\nclass TestVFNetHead(TestCase):\n\n    def test_vfnet_head_loss(self):\n        \"\"\"Tests vfnet head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n            'pad_shape': (s, s, 3)\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(type='ATSSAssigner', topk=9),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        # since VarFocal Loss is not supported on CPU\n        vfnet_head = VFNetHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            loss_cls=dict(\n                type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))\n\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16, 32, 64]\n        ]\n        cls_scores, bbox_preds, bbox_preds_refine = vfnet_head.forward(feat)\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        empty_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,\n                                                  bbox_preds_refine,\n                                                  [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,\n                                                bbox_preds_refine,\n                                                [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n\n    def test_vfnet_head_loss_without_atss(self):\n        \"\"\"Tests vfnet head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n            'pad_shape': (s, s, 3)\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(type='ATSSAssigner', topk=9),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        # since VarFocal Loss is not supported on CPU\n        vfnet_head = VFNetHead(\n            num_classes=4,\n            in_channels=1,\n            train_cfg=train_cfg,\n            use_atss=False,\n            loss_cls=dict(\n                type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))\n\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16, 32, 64]\n        ]\n        cls_scores, bbox_preds, bbox_preds_refine = vfnet_head.forward(feat)\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        empty_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,\n                                                  bbox_preds_refine,\n                                                  [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,\n                                                bbox_preds_refine,\n                                                [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_yolo_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.dense_heads import YOLOV3Head\n\n\nclass TestYOLOV3Head(TestCase):\n\n    def test_yolo_head_loss(self):\n        \"\"\"Tests YOLO head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        head = YOLOV3Head(\n            num_classes=4,\n            in_channels=[1, 1, 1],\n            out_channels=[1, 1, 1],\n            train_cfg=Config(\n                dict(\n                    assigner=dict(\n                        type='GridAssigner',\n                        pos_iou_thr=0.5,\n                        neg_iou_thr=0.5,\n                        min_pos_iou=0))))\n        head.init_weights()\n\n        # YOLO head expects a multiple levels of features per image\n        feats = [\n            torch.rand(1, 1, s // stride[1], s // stride[0])\n            for stride in head.prior_generator.strides\n        ]\n        predmaps, = head.forward(feats)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n\n        empty_gt_losses = head.loss_by_feat(predmaps, [gt_instances],\n                                            img_metas)\n        # When there is no truth, the conf loss should be nonzero but\n        # cls loss and xy&wh loss should be zero\n        empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()\n        empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()\n        empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()\n        empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()\n        self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')\n        self.assertEqual(\n            empty_cls_loss, 0,\n            'there should be no cls loss when there are no true boxes')\n        self.assertEqual(\n            empty_xy_loss, 0,\n            'there should be no xy loss when there are no true boxes')\n        self.assertEqual(\n            empty_wh_loss, 0,\n            'there should be no wh loss when there are no true boxes')\n\n        # When truth is non-empty then all conf, cls loss and xywh loss\n        # should be nonzero for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n\n        one_gt_losses = head.loss_by_feat(predmaps, [gt_instances], img_metas)\n        one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()\n        one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()\n        one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()\n        one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()\n        self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')\n        self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')\n        self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')\n        self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_yolof_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import Config\nfrom mmengine.structures import InstanceData\n\nfrom mmdet import *  # noqa\nfrom mmdet.models.dense_heads import YOLOFHead\n\n\nclass TestYOLOFHead(TestCase):\n\n    def test_yolof_head_loss(self):\n        \"\"\"Tests yolof head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n            'pad_shape': (s, s, 3)\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(\n                    type='UniformAssigner',\n                    pos_ignore_thr=0.15,\n                    neg_ignore_thr=0.7),\n                allowed_border=-1,\n                pos_weight=-1,\n                debug=False))\n        yolof_head = YOLOFHead(\n            num_classes=4,\n            in_channels=1,\n            feat_channels=1,\n            reg_decoded_bbox=True,\n            train_cfg=train_cfg,\n            anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                scales=[1, 2, 4, 8, 16],\n                strides=[32]),\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[.0, .0, .0, .0],\n                target_stds=[1., 1., 1., 1.],\n                add_ctr_clamp=True,\n                ctr_clamp=32),\n            loss_cls=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0),\n            loss_bbox=dict(type='GIoULoss', loss_weight=1.0))\n        feat = [torch.rand(1, 1, s // 32, s // 32)]\n        cls_scores, bbox_preds = yolof_head.forward(feat)\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        empty_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,\n                                                  [gt_instances], img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls']\n        empty_box_loss = empty_gt_losses['loss_bbox']\n        self.assertGreater(empty_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        one_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,\n                                                [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls']\n        onegt_box_loss = one_gt_losses['loss_bbox']\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_dense_heads/test_yolox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmengine.config import Config\nfrom mmengine.model import bias_init_with_prob\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.dense_heads import YOLOXHead\n\n\nclass TestYOLOXHead(TestCase):\n\n    def test_init_weights(self):\n        head = YOLOXHead(\n            num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=False)\n        head.init_weights()\n        bias_init = bias_init_with_prob(0.01)\n        for conv_cls, conv_obj in zip(head.multi_level_conv_cls,\n                                      head.multi_level_conv_obj):\n            assert_allclose(conv_cls.bias.data,\n                            torch.ones_like(conv_cls.bias.data) * bias_init)\n            assert_allclose(conv_obj.bias.data,\n                            torch.ones_like(conv_obj.bias.data) * bias_init)\n\n    def test_predict_by_feat(self):\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': (1.0, 1.0),\n        }]\n        test_cfg = Config(\n            dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))\n        head = YOLOXHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            use_depthwise=False,\n            test_cfg=test_cfg)\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16]\n        ]\n        cls_scores, bbox_preds, objectnesses = head.forward(feat)\n        head.predict_by_feat(\n            cls_scores,\n            bbox_preds,\n            objectnesses,\n            img_metas,\n            cfg=test_cfg,\n            rescale=True,\n            with_nms=True)\n        head.predict_by_feat(\n            cls_scores,\n            bbox_preds,\n            objectnesses,\n            img_metas,\n            cfg=test_cfg,\n            rescale=False,\n            with_nms=False)\n\n    def test_loss_by_feat(self):\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        train_cfg = Config(\n            dict(\n                assigner=dict(\n                    type='SimOTAAssigner',\n                    center_radius=2.5,\n                    candidate_topk=10,\n                    iou_weight=3.0,\n                    cls_weight=1.0)))\n        head = YOLOXHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            use_depthwise=False,\n            train_cfg=train_cfg)\n        assert not head.use_l1\n        assert isinstance(head.multi_level_cls_convs[0][0], ConvModule)\n\n        feat = [\n            torch.rand(1, 1, s // feat_size, s // feat_size)\n            for feat_size in [4, 8, 16]\n        ]\n        cls_scores, bbox_preds, objectnesses = head.forward(feat)\n\n        # Test that empty ground truth encourages the network to predict\n        # background\n        gt_instances = InstanceData(\n            bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))\n\n        empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,\n                                            objectnesses, [gt_instances],\n                                            img_metas)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        empty_cls_loss = empty_gt_losses['loss_cls'].sum()\n        empty_box_loss = empty_gt_losses['loss_bbox'].sum()\n        empty_obj_loss = empty_gt_losses['loss_obj'].sum()\n        self.assertEqual(\n            empty_cls_loss.item(), 0,\n            'there should be no cls loss when there are no true boxes')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertGreater(empty_obj_loss.item(), 0,\n                           'objectness loss should be non-zero')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        head = YOLOXHead(\n            num_classes=4,\n            in_channels=1,\n            stacked_convs=1,\n            use_depthwise=True,\n            train_cfg=train_cfg)\n        assert isinstance(head.multi_level_cls_convs[0][0],\n                          DepthwiseSeparableConvModule)\n        head.use_l1 = True\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),\n            labels=torch.LongTensor([2]))\n\n        one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,\n                                          [gt_instances], img_metas)\n        onegt_cls_loss = one_gt_losses['loss_cls'].sum()\n        onegt_box_loss = one_gt_losses['loss_bbox'].sum()\n        onegt_obj_loss = one_gt_losses['loss_obj'].sum()\n        onegt_l1_loss = one_gt_losses['loss_l1'].sum()\n        self.assertGreater(onegt_cls_loss.item(), 0,\n                           'cls loss should be non-zero')\n        self.assertGreater(onegt_box_loss.item(), 0,\n                           'box loss should be non-zero')\n        self.assertGreater(onegt_obj_loss.item(), 0,\n                           'obj loss should be non-zero')\n        self.assertGreater(onegt_l1_loss.item(), 0,\n                           'l1 loss should be non-zero')\n\n        # Test groud truth out of bound\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]]),\n            labels=torch.LongTensor([2]))\n        empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,\n                                            objectnesses, [gt_instances],\n                                            img_metas)\n        # When gt_bboxes out of bound, the assign results should be empty,\n        # so the cls and bbox loss should be zero.\n        empty_cls_loss = empty_gt_losses['loss_cls'].sum()\n        empty_box_loss = empty_gt_losses['loss_bbox'].sum()\n        empty_obj_loss = empty_gt_losses['loss_obj'].sum()\n        self.assertEqual(\n            empty_cls_loss.item(), 0,\n            'there should be no cls loss when gt_bboxes out of bound')\n        self.assertEqual(\n            empty_box_loss.item(), 0,\n            'there should be no box loss when gt_bboxes out of bound')\n        self.assertGreater(empty_obj_loss.item(), 0,\n                           'objectness loss should be non-zero')\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_conditional_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestConditionalDETR(TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n\n    def test_conditional_detr_head_loss(self):\n        \"\"\"Tests transformer head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        metainfo = {\n            'img_shape': (s, s),\n            'scale_factor': (1, 1),\n            'pad_shape': (s, s),\n            'batch_input_shape': (s, s)\n        }\n        img_metas = DetDataSample()\n        img_metas.set_metainfo(metainfo)\n        batch_data_samples = []\n        batch_data_samples.append(img_metas)\n\n        config = get_detector_cfg(\n            'conditional_detr/conditional-detr_r50_8xb2-50e_coco.py')\n\n        model = MODELS.build(config)\n        model.init_weights()\n        random_image = torch.rand(1, 3, s, s)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        img_metas.gt_instances = gt_instances\n        batch_data_samples1 = []\n        batch_data_samples1.append(img_metas)\n        empty_gt_losses = model.loss(\n            random_image, batch_data_samples=batch_data_samples1)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        for key, loss in empty_gt_losses.items():\n            if 'cls' in key:\n                self.assertGreater(loss.item(), 0,\n                                   'cls loss should be non-zero')\n            elif 'bbox' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no box loss when no ground true boxes')\n            elif 'iou' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no iou loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        img_metas.gt_instances = gt_instances\n        batch_data_samples2 = []\n        batch_data_samples2.append(img_metas)\n        one_gt_losses = model.loss(\n            random_image, batch_data_samples=batch_data_samples2)\n        for loss in one_gt_losses.values():\n            self.assertGreater(\n                loss.item(), 0,\n                'cls loss, or box loss, or iou loss should be non-zero')\n\n        model.eval()\n        # test _forward\n        model._forward(random_image, batch_data_samples=batch_data_samples2)\n        # test only predict\n        model.predict(\n            random_image, batch_data_samples=batch_data_samples2, rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_cornernet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestCornerNet(TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n        model_cfg = get_detector_cfg(\n            'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')\n\n        backbone = dict(\n            type='ResNet',\n            depth=18,\n            num_stages=4,\n            out_indices=(3, ),\n            norm_cfg=dict(type='BN', requires_grad=True),\n            norm_eval=True,\n            style='pytorch')\n\n        neck = dict(\n            type='FPN',\n            in_channels=[512],\n            out_channels=256,\n            start_level=0,\n            add_extra_convs='on_input',\n            num_outs=1)\n\n        model_cfg.backbone = ConfigDict(**backbone)\n        model_cfg.neck = ConfigDict(**neck)\n        model_cfg.bbox_head.num_feat_levels = 1\n        self.model_cfg = model_cfg\n\n    def test_init(self):\n        model = get_detector_cfg(\n            'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.bbox_head is not None)\n        self.assertTrue(detector.backbone is not None)\n        self.assertTrue(not hasattr(detector, 'neck'))\n\n    @unittest.skipIf(not torch.cuda.is_available(),\n                     'test requires GPU and torch+cuda')\n    def test_cornernet_forward_loss_mode(self):\n        from mmdet.registry import MODELS\n        detector = MODELS.build(self.model_cfg)\n        detector.init_weights()\n\n        packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])\n        data = detector.data_preprocessor(packed_inputs, True)\n        losses = detector.forward(**data, mode='loss')\n        assert isinstance(losses, dict)\n\n    @unittest.skipIf(not torch.cuda.is_available(),\n                     'test requires GPU and torch+cuda')\n    def test_cornernet_forward_predict_mode(self):\n        from mmdet.registry import MODELS\n        detector = MODELS.build(self.model_cfg)\n        detector.init_weights()\n\n        packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])\n        data = detector.data_preprocessor(packed_inputs, False)\n\n        # Test forward test\n        detector.eval()\n        with torch.no_grad():\n            batch_results = detector.forward(**data, mode='predict')\n            assert len(batch_results) == 2\n            assert isinstance(batch_results[0], DetDataSample)\n\n    @unittest.skipIf(not torch.cuda.is_available(),\n                     'test requires GPU and torch+cuda')\n    def test_cornernet_forward_tensor_mode(self):\n        from mmdet.registry import MODELS\n        detector = MODELS.build(self.model_cfg)\n        detector.init_weights()\n\n        packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])\n        data = detector.data_preprocessor(packed_inputs, False)\n        batch_results = detector.forward(**data, mode='tensor')\n        assert isinstance(batch_results, tuple)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_dab_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestDABDETR(TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n\n    def test_dab_detr_head_loss(self):\n        \"\"\"Tests transformer head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        metainfo = {\n            'img_shape': (s, s),\n            'scale_factor': (1, 1),\n            'pad_shape': (s, s),\n            'batch_input_shape': (s, s)\n        }\n        img_metas = DetDataSample()\n        img_metas.set_metainfo(metainfo)\n        batch_data_samples = []\n        batch_data_samples.append(img_metas)\n\n        config = get_detector_cfg('dab_detr/dab-detr_r50_8xb2-50e_coco.py')\n\n        model = MODELS.build(config)\n        model.init_weights()\n        random_image = torch.rand(1, 3, s, s)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        img_metas.gt_instances = gt_instances\n        batch_data_samples1 = []\n        batch_data_samples1.append(img_metas)\n        empty_gt_losses = model.loss(\n            random_image, batch_data_samples=batch_data_samples1)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        for key, loss in empty_gt_losses.items():\n            if 'cls' in key:\n                self.assertGreater(loss.item(), 0,\n                                   'cls loss should be non-zero')\n            elif 'bbox' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no box loss when no ground true boxes')\n            elif 'iou' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no iou loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        img_metas.gt_instances = gt_instances\n        batch_data_samples2 = []\n        batch_data_samples2.append(img_metas)\n        one_gt_losses = model.loss(\n            random_image, batch_data_samples=batch_data_samples2)\n        for loss in one_gt_losses.values():\n            self.assertGreater(\n                loss.item(), 0,\n                'cls loss, or box loss, or iou loss should be non-zero')\n\n        model.eval()\n        # test _forward\n        model._forward(random_image, batch_data_samples=batch_data_samples2)\n        # test only predict\n        model.predict(\n            random_image, batch_data_samples=batch_data_samples2, rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_deformable_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestDeformableDETR(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    def test_deformable_detr_head_loss(self):\n        \"\"\"Tests transformer head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        metainfo = {\n            'img_shape': (s, s),\n            'scale_factor': (1, 1),\n            'pad_shape': (s, s),\n            'batch_input_shape': (s, s)\n        }\n        img_metas = DetDataSample()\n        img_metas.set_metainfo(metainfo)\n        batch_data_samples = []\n        batch_data_samples.append(img_metas)\n\n        configs = [\n            get_detector_cfg(\n                'deformable_detr/deformable-detr_r50_16xb2-50e_coco.py'),\n            get_detector_cfg(\n                'deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py'  # noqa\n            ),\n            get_detector_cfg(\n                'deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py'  # noqa\n            )\n        ]\n\n        for config in configs:\n            model = MODELS.build(config)\n            model.init_weights()\n            random_image = torch.rand(1, 3, s, s)\n\n            # Test that empty ground truth encourages the network to\n            # predict background\n            gt_instances = InstanceData()\n            gt_instances.bboxes = torch.empty((0, 4))\n            gt_instances.labels = torch.LongTensor([])\n            img_metas.gt_instances = gt_instances\n            batch_data_samples1 = []\n            batch_data_samples1.append(img_metas)\n            empty_gt_losses = model.loss(\n                random_image, batch_data_samples=batch_data_samples1)\n            # When there is no truth, the cls loss should be nonzero but there\n            # should be no box loss.\n            for key, loss in empty_gt_losses.items():\n                if 'cls' in key:\n                    self.assertGreater(loss.item(), 0,\n                                       'cls loss should be non-zero')\n                elif 'bbox' in key:\n                    self.assertEqual(\n                        loss.item(), 0,\n                        'there should be no box loss when no ground true boxes'\n                    )\n                elif 'iou' in key:\n                    self.assertEqual(\n                        loss.item(), 0,\n                        'there should be no iou loss when no ground true boxes'\n                    )\n\n            # When truth is non-empty then both cls and box loss should\n            # be nonzero for random inputs\n            gt_instances = InstanceData()\n            gt_instances.bboxes = torch.Tensor(\n                [[23.6667, 23.8757, 238.6326, 151.8874]])\n            gt_instances.labels = torch.LongTensor([2])\n            img_metas.gt_instances = gt_instances\n            batch_data_samples2 = []\n            batch_data_samples2.append(img_metas)\n            one_gt_losses = model.loss(\n                random_image, batch_data_samples=batch_data_samples2)\n            for loss in one_gt_losses.values():\n                self.assertGreater(\n                    loss.item(), 0,\n                    'cls loss, or box loss, or iou loss should be non-zero')\n\n            model.eval()\n            # test _forward\n            model._forward(\n                random_image, batch_data_samples=batch_data_samples2)\n            # test only predict\n            model.predict(\n                random_image,\n                batch_data_samples=batch_data_samples2,\n                rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestDETR(TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n\n    def test_detr_head_loss(self):\n        \"\"\"Tests transformer head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        metainfo = {\n            'img_shape': (s, s),\n            'scale_factor': (1, 1),\n            'pad_shape': (s, s),\n            'batch_input_shape': (s, s)\n        }\n        img_metas = DetDataSample()\n        img_metas.set_metainfo(metainfo)\n        batch_data_samples = []\n        batch_data_samples.append(img_metas)\n\n        config = get_detector_cfg('detr/detr_r50_8xb2-150e_coco.py')\n\n        model = MODELS.build(config)\n        model.init_weights()\n        random_image = torch.rand(1, 3, s, s)\n\n        # Test that empty ground truth encourages the network to\n        # predict background\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4))\n        gt_instances.labels = torch.LongTensor([])\n        img_metas.gt_instances = gt_instances\n        batch_data_samples1 = []\n        batch_data_samples1.append(img_metas)\n        empty_gt_losses = model.loss(\n            random_image, batch_data_samples=batch_data_samples1)\n        # When there is no truth, the cls loss should be nonzero but there\n        # should be no box loss.\n        for key, loss in empty_gt_losses.items():\n            if 'cls' in key:\n                self.assertGreater(loss.item(), 0,\n                                   'cls loss should be non-zero')\n            elif 'bbox' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no box loss when no ground true boxes')\n            elif 'iou' in key:\n                self.assertEqual(\n                    loss.item(), 0,\n                    'there should be no iou loss when there are no true boxes')\n\n        # When truth is non-empty then both cls and box loss should be nonzero\n        # for random inputs\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.Tensor(\n            [[23.6667, 23.8757, 238.6326, 151.8874]])\n        gt_instances.labels = torch.LongTensor([2])\n        img_metas.gt_instances = gt_instances\n        batch_data_samples2 = []\n        batch_data_samples2.append(img_metas)\n        one_gt_losses = model.loss(\n            random_image, batch_data_samples=batch_data_samples2)\n        for loss in one_gt_losses.values():\n            self.assertGreater(\n                loss.item(), 0,\n                'cls loss, or box loss, or iou loss should be non-zero')\n\n        model.eval()\n        # test _forward\n        model._forward(random_image, batch_data_samples=batch_data_samples2)\n        # test only predict\n        model.predict(\n            random_image, batch_data_samples=batch_data_samples2, rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_dino.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestDINO(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    def test_dino_head_loss(self):\n        \"\"\"Tests transformer head loss when truth is empty and non-empty.\"\"\"\n        s = 256\n        metainfo = {\n            'img_shape': (s, s),\n            'scale_factor': (1, 1),\n            'pad_shape': (s, s),\n            'batch_input_shape': (s, s)\n        }\n        data_sample = DetDataSample()\n        data_sample.set_metainfo(metainfo)\n\n        configs = [get_detector_cfg('dino/dino-4scale_r50_8xb2-12e_coco.py')]\n\n        for config in configs:\n            model = MODELS.build(config)\n            model.init_weights()\n            random_image = torch.rand(1, 3, s, s)\n\n            # Test that empty ground truth encourages the network to\n            # predict background\n            gt_instances = InstanceData()\n            gt_instances.bboxes = torch.empty((0, 4))\n            gt_instances.labels = torch.LongTensor([])\n            data_sample.gt_instances = gt_instances\n            batch_data_samples_1 = [data_sample]\n            empty_gt_losses = model.loss(\n                random_image, batch_data_samples=batch_data_samples_1)\n            # When there is no truth, the cls loss should be nonzero but there\n            # should be no box loss.\n            for key, loss in empty_gt_losses.items():\n                _loss = loss.item()\n                if 'bbox' in key or 'iou' in key or 'dn' in key:\n                    self.assertEqual(\n                        _loss, 0, f'there should be no {key}({_loss}) '\n                        f'when no ground true boxes')\n                elif 'cls' in key:\n                    self.assertGreater(_loss, 0,\n                                       f'{key}({_loss}) should be non-zero')\n\n            # When truth is non-empty then both cls and box loss should\n            # be nonzero for random inputs\n            gt_instances = InstanceData()\n            gt_instances.bboxes = torch.Tensor(\n                [[23.6667, 23.8757, 238.6326, 151.8874]])\n            gt_instances.labels = torch.LongTensor([2])\n            data_sample.gt_instances = gt_instances\n            batch_data_samples_2 = [data_sample]\n            one_gt_losses = model.loss(\n                random_image, batch_data_samples=batch_data_samples_2)\n            for loss in one_gt_losses.values():\n                self.assertGreater(\n                    loss.item(), 0,\n                    'cls loss, or box loss, or iou loss should be non-zero')\n\n            model.eval()\n            # test _forward\n            model._forward(\n                random_image, batch_data_samples=batch_data_samples_2)\n            # test only predict\n            model.predict(\n                random_image,\n                batch_data_samples=batch_data_samples_2,\n                rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_kd_single_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet import *  # noqa\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestKDSingleStageDetector(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    @parameterized.expand(['ld/ld_r18-gflv1-r101_fpn_1x_coco.py'])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.backbone)\n        self.assertTrue(detector.neck)\n        self.assertTrue(detector.bbox_head)\n\n    @parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',\n                                                                    'cuda'))])\n    def test_single_stage_forward_train(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, True)\n            # Test forward train\n            losses = detector.forward(**data, mode='loss')\n            self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',\n                                                                    'cuda'))])\n    def test_single_stage_forward_test(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, False)\n\n            # Test forward test\n            detector.eval()\n            with torch.no_grad():\n                batch_results = detector.forward(**data, mode='predict')\n                self.assertEqual(len(batch_results), 2)\n                self.assertIsInstance(batch_results[0], DetDataSample)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_maskformer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing._utils import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestMaskFormer(unittest.TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    def _create_model_cfg(self):\n        cfg_path = 'maskformer/maskformer_r50_ms-16xb1-75e_coco.py'\n        model_cfg = get_detector_cfg(cfg_path)\n        base_channels = 32\n        model_cfg.backbone.depth = 18\n        model_cfg.backbone.init_cfg = None\n        model_cfg.backbone.base_channels = base_channels\n        model_cfg.panoptic_head.in_channels = [\n            base_channels * 2**i for i in range(4)\n        ]\n        model_cfg.panoptic_head.feat_channels = base_channels\n        model_cfg.panoptic_head.out_channels = base_channels\n        model_cfg.panoptic_head.pixel_decoder.encoder.\\\n            layer_cfg.self_attn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.pixel_decoder.encoder.\\\n            layer_cfg.ffn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.pixel_decoder.encoder.\\\n            layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8\n        model_cfg.panoptic_head.pixel_decoder.\\\n            positional_encoding.num_feats = base_channels // 2\n        model_cfg.panoptic_head.positional_encoding.\\\n            num_feats = base_channels // 2\n        model_cfg.panoptic_head.transformer_decoder.\\\n            layer_cfg.self_attn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.transformer_decoder. \\\n            layer_cfg.cross_attn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.transformer_decoder.\\\n            layer_cfg.ffn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.transformer_decoder.\\\n            layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8\n        return model_cfg\n\n    def test_init(self):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n        detector.init_weights()\n        assert detector.backbone\n        assert detector.panoptic_head\n\n    @parameterized.expand([('cpu', ), ('cuda', )])\n    def test_forward_loss_mode(self, device):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n\n        packed_inputs = demo_mm_inputs(\n            2,\n            image_shapes=[(3, 128, 127), (3, 91, 92)],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=True)\n        data = detector.data_preprocessor(packed_inputs, True)\n        # Test loss mode\n        losses = detector.forward(**data, mode='loss')\n        self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([('cpu', ), ('cuda', )])\n    def test_forward_predict_mode(self, device):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n        packed_inputs = demo_mm_inputs(\n            2,\n            image_shapes=[(3, 128, 127), (3, 91, 92)],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=True)\n        data = detector.data_preprocessor(packed_inputs, False)\n        # Test forward test\n        detector.eval()\n        with torch.no_grad():\n            batch_results = detector.forward(**data, mode='predict')\n            self.assertEqual(len(batch_results), 2)\n            self.assertIsInstance(batch_results[0], DetDataSample)\n\n    @parameterized.expand([('cpu', ), ('cuda', )])\n    def test_forward_tensor_mode(self, device):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 125, 130]],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=True)\n        data = detector.data_preprocessor(packed_inputs, False)\n        out = detector.forward(**data, mode='tensor')\n        self.assertIsInstance(out, tuple)\n\n\nclass TestMask2Former(unittest.TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    def _create_model_cfg(self, cfg_path):\n        model_cfg = get_detector_cfg(cfg_path)\n        base_channels = 32\n        model_cfg.backbone.depth = 18\n        model_cfg.backbone.init_cfg = None\n        model_cfg.backbone.base_channels = base_channels\n        model_cfg.panoptic_head.in_channels = [\n            base_channels * 2**i for i in range(4)\n        ]\n        model_cfg.panoptic_head.feat_channels = base_channels\n        model_cfg.panoptic_head.out_channels = base_channels\n        model_cfg.panoptic_head.pixel_decoder.encoder.\\\n            layer_cfg.self_attn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.pixel_decoder.encoder.\\\n            layer_cfg.ffn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.pixel_decoder.encoder.\\\n            layer_cfg.ffn_cfg.feedforward_channels = base_channels * 4\n        model_cfg.panoptic_head.pixel_decoder.\\\n            positional_encoding.num_feats = base_channels // 2\n        model_cfg.panoptic_head.positional_encoding.\\\n            num_feats = base_channels // 2\n        model_cfg.panoptic_head.transformer_decoder.\\\n            layer_cfg.self_attn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.transformer_decoder. \\\n            layer_cfg.cross_attn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.transformer_decoder.\\\n            layer_cfg.ffn_cfg.embed_dims = base_channels\n        model_cfg.panoptic_head.transformer_decoder.\\\n            layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8\n\n        return model_cfg\n\n    def test_init(self):\n        model_cfg = self._create_model_cfg(\n            'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py')\n        detector = MODELS.build(model_cfg)\n        detector.init_weights()\n        assert detector.backbone\n        assert detector.panoptic_head\n\n    @parameterized.expand([\n        ('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),\n        ('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),\n        ('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),\n        ('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')\n    ])\n    def test_forward_loss_mode(self, device, cfg_path):\n        print(device, cfg_path)\n        with_semantic = 'panoptic' in cfg_path\n        model_cfg = self._create_model_cfg(cfg_path)\n        detector = MODELS.build(model_cfg)\n\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n\n        packed_inputs = demo_mm_inputs(\n            2,\n            image_shapes=[(3, 128, 127), (3, 91, 92)],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=with_semantic)\n        data = detector.data_preprocessor(packed_inputs, True)\n        # Test loss mode\n        losses = detector.forward(**data, mode='loss')\n        self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([\n        ('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),\n        ('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),\n        ('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),\n        ('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')\n    ])\n    def test_forward_predict_mode(self, device, cfg_path):\n        with_semantic = 'panoptic' in cfg_path\n        model_cfg = self._create_model_cfg(cfg_path)\n        detector = MODELS.build(model_cfg)\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n        packed_inputs = demo_mm_inputs(\n            2,\n            image_shapes=[(3, 128, 127), (3, 91, 92)],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=with_semantic)\n        data = detector.data_preprocessor(packed_inputs, False)\n        # Test forward test\n        detector.eval()\n        with torch.no_grad():\n            batch_results = detector.forward(**data, mode='predict')\n            self.assertEqual(len(batch_results), 2)\n            self.assertIsInstance(batch_results[0], DetDataSample)\n\n    @parameterized.expand([\n        ('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),\n        ('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),\n        ('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),\n        ('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')\n    ])\n    def test_forward_tensor_mode(self, device, cfg_path):\n        with_semantic = 'panoptic' in cfg_path\n        model_cfg = self._create_model_cfg(cfg_path)\n        detector = MODELS.build(model_cfg)\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 125, 130]],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=with_semantic)\n        data = detector.data_preprocessor(packed_inputs, False)\n        out = detector.forward(**data, mode='tensor')\n        self.assertIsInstance(out, tuple)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_panoptic_two_stage_segmentor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing._utils import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestTwoStagePanopticSegmentor(unittest.TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    def _create_model_cfg(self):\n        cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'\n        model_cfg = get_detector_cfg(cfg_file)\n        model_cfg.backbone.depth = 18\n        model_cfg.neck.in_channels = [64, 128, 256, 512]\n        model_cfg.backbone.init_cfg = None\n        return model_cfg\n\n    def test_init(self):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n        assert detector.backbone\n        assert detector.neck\n        assert detector.rpn_head\n        assert detector.roi_head\n        assert detector.roi_head.mask_head\n        assert detector.with_semantic_head\n        assert detector.with_panoptic_fusion_head\n\n    @parameterized.expand([('cpu', ), ('cuda', )])\n    def test_forward_loss_mode(self, device):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n\n        packed_inputs = demo_mm_inputs(\n            2,\n            image_shapes=[(3, 128, 127), (3, 91, 92)],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=True)\n        data = detector.data_preprocessor(packed_inputs, True)\n        # Test loss mode\n        losses = detector.forward(**data, mode='loss')\n        self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([('cpu', ), ('cuda', )])\n    def test_forward_predict_mode(self, device):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n        packed_inputs = demo_mm_inputs(\n            2,\n            image_shapes=[(3, 128, 127), (3, 91, 92)],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=True)\n        data = detector.data_preprocessor(packed_inputs, False)\n        # Test forward test\n        detector.eval()\n        with torch.no_grad():\n            batch_results = detector.forward(**data, mode='predict')\n            self.assertEqual(len(batch_results), 2)\n            self.assertIsInstance(batch_results[0], DetDataSample)\n\n    @parameterized.expand([('cpu', ), ('cuda', )])\n    def test_forward_tensor_mode(self, device):\n        model_cfg = self._create_model_cfg()\n        detector = MODELS.build(model_cfg)\n        if device == 'cuda' and not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.to(device)\n\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 125, 130]],\n            sem_seg_output_strides=1,\n            with_mask=True,\n            with_semantic=True)\n        data = detector.data_preprocessor(packed_inputs, False)\n        out = detector.forward(**data, mode='tensor')\n        self.assertIsInstance(out, tuple)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_rpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestRPN(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    @parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.backbone)\n        self.assertTrue(detector.neck)\n        self.assertTrue(detector.bbox_head)\n\n        # if rpn.num_classes > 1, force set rpn.num_classes = 1\n        model.rpn_head.num_classes = 2\n        detector = MODELS.build(model)\n        self.assertEqual(detector.bbox_head.num_classes, 1)\n\n    @parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])\n    def test_rpn_forward_loss_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, True)\n            # Test forward train\n            losses = detector.forward(**data, mode='loss')\n            self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])\n    def test_rpn_forward_predict_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, False)\n            # Test forward test\n            detector.eval()\n            with torch.no_grad():\n                batch_results = detector.forward(**data, mode='predict')\n                self.assertEqual(len(batch_results), 2)\n                self.assertIsInstance(batch_results[0], DetDataSample)\n\n    @parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])\n    def test_rpn_forward_tensor_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, False)\n            batch_results = detector.forward(**data, mode='tensor')\n            self.assertIsInstance(batch_results, tuple)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_semi_base.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nfrom mmengine.registry import MODELS\nfrom parameterized import parameterized\n\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\nclass TestSemiBase(TestCase):\n\n    @parameterized.expand([\n        'soft_teacher/'\n        'soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py',\n    ])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.detector.backbone.depth = 18\n        model.detector.neck.in_channels = [64, 128, 256, 512]\n        model.detector.backbone.init_cfg = None\n\n        model = MODELS.build(model)\n        self.assertTrue(model.teacher.backbone)\n        self.assertTrue(model.teacher.neck)\n        self.assertTrue(model.teacher.rpn_head)\n        self.assertTrue(model.teacher.roi_head)\n        self.assertTrue(model.student.backbone)\n        self.assertTrue(model.student.neck)\n        self.assertTrue(model.student.rpn_head)\n        self.assertTrue(model.student.roi_head)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_single_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport time\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.logging import MessageHub\nfrom parameterized import parameterized\n\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestSingleStageDetector(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    @parameterized.expand([\n        'retinanet/retinanet_r18_fpn_1x_coco.py',\n        'centernet/centernet_r18_8xb16-crop512-140e_coco.py',\n        'fsaf/fsaf_r50_fpn_1x_coco.py',\n        'yolox/yolox_tiny_8xb8-300e_coco.py',\n        'yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py',\n        'reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py',\n    ])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.backbone)\n        self.assertTrue(detector.neck)\n        self.assertTrue(detector.bbox_head)\n\n    @parameterized.expand([\n        ('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',\n                                                                'cuda')),\n        ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),\n        ('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),\n        ('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',\n                                                                      'cuda')),\n    ])\n    def test_single_stage_forward_loss_mode(self, cfg_file, devices):\n        message_hub = MessageHub.get_instance(\n            f'test_single_stage_forward_loss_mode-{time.time()}')\n        message_hub.update_info('iter', 0)\n        message_hub.update_info('epoch', 0)\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n            detector.init_weights()\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, True)\n            losses = detector.forward(**data, mode='loss')\n            self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([\n        ('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',\n                                                                'cuda')),\n        ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),\n        ('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),\n        ('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',\n                                                                      'cuda')),\n    ])\n    def test_single_stage_forward_predict_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, False)\n            # Test forward test\n            detector.eval()\n            with torch.no_grad():\n                batch_results = detector.forward(**data, mode='predict')\n                self.assertEqual(len(batch_results), 2)\n                self.assertIsInstance(batch_results[0], DetDataSample)\n\n    @parameterized.expand([\n        ('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',\n                                                                'cuda')),\n        ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),\n        ('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),\n        ('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',\n                                                                      'cuda')),\n    ])\n    def test_single_stage_forward_tensor_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, False)\n            batch_results = detector.forward(**data, mode='tensor')\n            self.assertIsInstance(batch_results, tuple)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_single_stage_instance_seg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestSingleStageInstanceSegmentor(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    @parameterized.expand([\n        'solo/solo_r50_fpn_1x_coco.py',\n        'solo/decoupled-solo_r50_fpn_1x_coco.py',\n        'solo/decoupled-solo-light_r50_fpn_3x_coco.py',\n        'solov2/solov2_r50_fpn_1x_coco.py',\n        'solov2/solov2-light_r18_fpn_ms-3x_coco.py',\n        'yolact/yolact_r50_1xb8-55e_coco.py',\n    ])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.backbone)\n        self.assertTrue(detector.neck)\n        self.assertTrue(detector.mask_head)\n        if detector.with_bbox:\n            self.assertTrue(detector.bbox_head)\n\n    @parameterized.expand([\n        ('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),\n        ('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),\n        ('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),\n    ])\n    def test_single_stage_forward_loss_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n            detector.init_weights()\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(\n                2, [[3, 128, 128], [3, 125, 130]], with_mask=True)\n            data = detector.data_preprocessor(packed_inputs, True)\n            losses = detector.forward(**data, mode='loss')\n            self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([\n        ('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),\n        ('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),\n        ('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),\n    ])\n    def test_single_stage_forward_predict_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(\n                2, [[3, 128, 128], [3, 125, 130]], with_mask=True)\n            data = detector.data_preprocessor(packed_inputs, False)\n            # Test forward test\n            detector.eval()\n            with torch.no_grad():\n                batch_results = detector.forward(**data, mode='predict')\n                self.assertEqual(len(batch_results), 2)\n                self.assertIsInstance(batch_results[0], DetDataSample)\n\n    @parameterized.expand([\n        ('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),\n        ('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),\n        ('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),\n        ('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),\n    ])\n    def test_single_stage_forward_tensor_mode(self, cfg_file, devices):\n        model = get_detector_cfg(cfg_file)\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        assert all([device in ['cpu', 'cuda'] for device in devices])\n\n        for device in devices:\n            detector = MODELS.build(model)\n\n            if device == 'cuda':\n                if not torch.cuda.is_available():\n                    return unittest.skip('test requires GPU and torch+cuda')\n                detector = detector.cuda()\n\n            packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n            data = detector.data_preprocessor(packed_inputs, False)\n            batch_results = detector.forward(**data, mode='tensor')\n            self.assertIsInstance(batch_results, tuple)\n"
  },
  {
    "path": "tests/test_models/test_detectors/test_two_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import demo_mm_inputs, get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestTwoStageBBox(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    @parameterized.expand([\n        'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',\n        'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',\n        'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',\n    ])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.backbone)\n        self.assertTrue(detector.neck)\n        self.assertTrue(detector.rpn_head)\n        self.assertTrue(detector.roi_head)\n\n        # if rpn.num_classes > 1, force set rpn.num_classes = 1\n        if hasattr(model.rpn_head, 'num_classes'):\n            model.rpn_head.num_classes = 2\n            detector = MODELS.build(model)\n            self.assertEqual(detector.rpn_head.num_classes, 1)\n\n    @parameterized.expand([\n        'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',\n        'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',\n        'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',\n    ])\n    def test_two_stage_forward_loss_mode(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n\n        if not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.cuda()\n\n        packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n\n        data = detector.data_preprocessor(packed_inputs, True)\n        # Test loss mode\n        losses = detector.forward(**data, mode='loss')\n        self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([\n        'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',\n        'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',\n        'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',\n    ])\n    def test_two_stage_forward_predict_mode(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n\n        if not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.cuda()\n\n        packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n        data = detector.data_preprocessor(packed_inputs, False)\n        # Test forward test\n        detector.eval()\n        with torch.no_grad():\n            with torch.no_grad():\n                batch_results = detector.forward(**data, mode='predict')\n            self.assertEqual(len(batch_results), 2)\n            self.assertIsInstance(batch_results[0], DetDataSample)\n\n    # TODO: Awaiting refactoring\n    # @parameterized.expand([\n    #     'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',\n    #     'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',\n    #     'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',\n    # ])\n    # def test_two_stage_forward_tensor_mode(self, cfg_file):\n    #     model = get_detector_cfg(cfg_file)\n    #     # backbone convert to ResNet18\n    #     model.backbone.depth = 18\n    #     model.neck.in_channels = [64, 128, 256, 512]\n    #     model.backbone.init_cfg = None\n    #\n    #     from mmdet.models import build_detector\n    #     detector = build_detector(model)\n    #\n    #     if not torch.cuda.is_available():\n    #         return unittest.skip('test requires GPU and torch+cuda')\n    #     detector = detector.cuda()\n    #\n    #     packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])\n\n    # data = detector.data_preprocessor(packed_inputs, False)\n\n    # out = detector.forward(**data, mode='tensor')\n    # self.assertIsInstance(out, tuple)\n\n\nclass TestTwoStageMask(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    @parameterized.expand([\n        'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',\n        'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',\n        'queryinst/queryinst_r50_fpn_1x_coco.py'\n    ])\n    def test_init(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n        self.assertTrue(detector.backbone)\n        self.assertTrue(detector.neck)\n        self.assertTrue(detector.rpn_head)\n        self.assertTrue(detector.roi_head)\n        self.assertTrue(detector.roi_head.mask_head)\n\n        # if rpn.num_classes > 1, force set rpn.num_classes = 1\n        if hasattr(model.rpn_head, 'num_classes'):\n            model.rpn_head.num_classes = 2\n            detector = MODELS.build(model)\n            self.assertEqual(detector.rpn_head.num_classes, 1)\n\n    @parameterized.expand([\n        'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',\n        'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',\n        'queryinst/queryinst_r50_fpn_1x_coco.py'\n    ])\n    def test_two_stage_forward_loss_mode(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n\n        if not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.cuda()\n\n        packed_inputs = demo_mm_inputs(\n            2, [[3, 128, 128], [3, 125, 130]], with_mask=True)\n        data = detector.data_preprocessor(packed_inputs, True)\n        # Test loss mode\n        losses = detector.forward(**data, mode='loss')\n        self.assertIsInstance(losses, dict)\n\n    @parameterized.expand([\n        'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',\n        'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',\n        'queryinst/queryinst_r50_fpn_1x_coco.py'\n    ])\n    def test_two_stage_forward_predict_mode(self, cfg_file):\n        model = get_detector_cfg(cfg_file)\n        # backbone convert to ResNet18\n        model.backbone.depth = 18\n        model.neck.in_channels = [64, 128, 256, 512]\n        model.backbone.init_cfg = None\n\n        from mmdet.registry import MODELS\n        detector = MODELS.build(model)\n\n        if not torch.cuda.is_available():\n            return unittest.skip('test requires GPU and torch+cuda')\n        detector = detector.cuda()\n\n        packed_inputs = demo_mm_inputs(2, [[3, 256, 256], [3, 255, 260]])\n        data = detector.data_preprocessor(packed_inputs, False)\n        # Test forward test\n        detector.eval()\n        with torch.no_grad():\n            batch_results = detector.forward(**data, mode='predict')\n            self.assertEqual(len(batch_results), 2)\n            self.assertIsInstance(batch_results[0], DetDataSample)\n\n    # TODO: Awaiting refactoring\n    # @parameterized.expand([\n    #     'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',\n    #     'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',\n    #     'queryinst/queryinst_r50_fpn_1x_coco.py'\n    # ])\n    # def test_two_stage_forward_tensor_mode(self, cfg_file):\n    #     model = get_detector_cfg(cfg_file)\n    #     # backbone convert to ResNet18\n    #     model.backbone.depth = 18\n    #     model.neck.in_channels = [64, 128, 256, 512]\n    #     model.backbone.init_cfg = None\n    #\n    #     from mmdet.models import build_detector\n    #     detector = build_detector(model)\n    #\n    #     if not torch.cuda.is_available():\n    #         return unittest.skip('test requires GPU and torch+cuda')\n    #     detector = detector.cuda()\n    #\n    #     packed_inputs = demo_mm_inputs(\n    #         2, [[3, 128, 128], [3, 125, 130]], with_mask=True)\n    #     data = detector.data_preprocessor(packed_inputs, False)\n    #\n    #     # out = detector.forward(**data, mode='tensor')\n    #     # self.assertIsInstance(out, tuple)\n"
  },
  {
    "path": "tests/test_models/test_layers/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_models/test_layers/test_brick_wrappers.py",
    "content": "from unittest.mock import patch\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmdet.models.layers import AdaptiveAvgPool2d, adaptive_avg_pool2d\n\nif torch.__version__ != 'parrots':\n    torch_version = '1.7'\nelse:\n    torch_version = 'parrots'\n\n\n@patch('torch.__version__', torch_version)\ndef test_adaptive_avg_pool2d():\n    # Test the empty batch dimension\n    # Test the two input conditions\n    x_empty = torch.randn(0, 3, 4, 5)\n    # 1. tuple[int, int]\n    wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2))\n    assert wrapper_out.shape == (0, 3, 2, 2)\n    # 2. int\n    wrapper_out = adaptive_avg_pool2d(x_empty, 2)\n    assert wrapper_out.shape == (0, 3, 2, 2)\n\n    # wrapper op with 3-dim input\n    x_normal = torch.randn(3, 3, 4, 5)\n    wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2))\n    ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2))\n    assert wrapper_out.shape == (3, 3, 2, 2)\n    assert torch.equal(wrapper_out, ref_out)\n\n    wrapper_out = adaptive_avg_pool2d(x_normal, 2)\n    ref_out = F.adaptive_avg_pool2d(x_normal, 2)\n    assert wrapper_out.shape == (3, 3, 2, 2)\n    assert torch.equal(wrapper_out, ref_out)\n\n\n@patch('torch.__version__', torch_version)\ndef test_AdaptiveAvgPool2d():\n    # Test the empty batch dimension\n    x_empty = torch.randn(0, 3, 4, 5)\n    # Test the four input conditions\n    # 1. tuple[int, int]\n    wrapper = AdaptiveAvgPool2d((2, 2))\n    wrapper_out = wrapper(x_empty)\n    assert wrapper_out.shape == (0, 3, 2, 2)\n\n    # 2. int\n    wrapper = AdaptiveAvgPool2d(2)\n    wrapper_out = wrapper(x_empty)\n    assert wrapper_out.shape == (0, 3, 2, 2)\n\n    # 3. tuple[None, int]\n    wrapper = AdaptiveAvgPool2d((None, 2))\n    wrapper_out = wrapper(x_empty)\n    assert wrapper_out.shape == (0, 3, 4, 2)\n\n    # 3. tuple[int, None]\n    wrapper = AdaptiveAvgPool2d((2, None))\n    wrapper_out = wrapper(x_empty)\n    assert wrapper_out.shape == (0, 3, 2, 5)\n\n    # Test the normal batch dimension\n    x_normal = torch.randn(3, 3, 4, 5)\n    wrapper = AdaptiveAvgPool2d((2, 2))\n    ref = nn.AdaptiveAvgPool2d((2, 2))\n    wrapper_out = wrapper(x_normal)\n    ref_out = ref(x_normal)\n    assert wrapper_out.shape == (3, 3, 2, 2)\n    assert torch.equal(wrapper_out, ref_out)\n\n    wrapper = AdaptiveAvgPool2d(2)\n    ref = nn.AdaptiveAvgPool2d(2)\n    wrapper_out = wrapper(x_normal)\n    ref_out = ref(x_normal)\n    assert wrapper_out.shape == (3, 3, 2, 2)\n    assert torch.equal(wrapper_out, ref_out)\n\n    wrapper = AdaptiveAvgPool2d((None, 2))\n    ref = nn.AdaptiveAvgPool2d((None, 2))\n    wrapper_out = wrapper(x_normal)\n    ref_out = ref(x_normal)\n    assert wrapper_out.shape == (3, 3, 4, 2)\n    assert torch.equal(wrapper_out, ref_out)\n\n    wrapper = AdaptiveAvgPool2d((2, None))\n    ref = nn.AdaptiveAvgPool2d((2, None))\n    wrapper_out = wrapper(x_normal)\n    ref_out = ref(x_normal)\n    assert wrapper_out.shape == (3, 3, 2, 5)\n    assert torch.equal(wrapper_out, ref_out)\n"
  },
  {
    "path": "tests/test_models/test_layers/test_conv_upsample.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.layers import ConvUpsample\n\n\n@pytest.mark.parametrize('num_layers', [0, 1, 2])\ndef test_conv_upsample(num_layers):\n    num_upsample = num_layers if num_layers > 0 else 0\n    num_layers = num_layers if num_layers > 0 else 1\n    layer = ConvUpsample(\n        10,\n        5,\n        num_layers=num_layers,\n        num_upsample=num_upsample,\n        conv_cfg=None,\n        norm_cfg=None)\n\n    size = 5\n    x = torch.randn((1, 10, size, size))\n    size = size * pow(2, num_upsample)\n    x = layer(x)\n    assert x.shape[-2:] == (size, size)\n"
  },
  {
    "path": "tests/test_models/test_layers/test_ema.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nimport math\nfrom unittest import TestCase\n\nimport torch\nimport torch.nn as nn\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.layers import ExpMomentumEMA\n\n\nclass TestEMA(TestCase):\n\n    def test_exp_momentum_ema(self):\n        model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))\n        # Test invalid gamma\n        with self.assertRaisesRegex(AssertionError,\n                                    'gamma must be greater than 0'):\n            ExpMomentumEMA(model, gamma=-1)\n\n        # Test EMA\n        model = torch.nn.Sequential(\n            torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10))\n        momentum = 0.1\n        gamma = 4\n\n        ema_model = ExpMomentumEMA(model, momentum=momentum, gamma=gamma)\n        averaged_params = [\n            torch.zeros_like(param) for param in model.parameters()\n        ]\n        n_updates = 10\n        for i in range(n_updates):\n            updated_averaged_params = []\n            for p, p_avg in zip(model.parameters(), averaged_params):\n                p.detach().add_(torch.randn_like(p))\n                if i == 0:\n                    updated_averaged_params.append(p.clone())\n                else:\n                    m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum\n                    updated_averaged_params.append(\n                        (p_avg * (1 - m) + p * m).clone())\n            ema_model.update_parameters(model)\n            averaged_params = updated_averaged_params\n\n        for p_target, p_ema in zip(averaged_params, ema_model.parameters()):\n            assert_allclose(p_target, p_ema)\n\n    def test_exp_momentum_ema_update_buffer(self):\n        model = nn.Sequential(\n            nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),\n            nn.Linear(5, 10))\n        # Test invalid gamma\n        with self.assertRaisesRegex(AssertionError,\n                                    'gamma must be greater than 0'):\n            ExpMomentumEMA(model, gamma=-1)\n\n        # Test EMA with momentum annealing.\n        momentum = 0.1\n        gamma = 4\n\n        ema_model = ExpMomentumEMA(\n            model, gamma=gamma, momentum=momentum, update_buffers=True)\n        averaged_params = [\n            torch.zeros_like(param)\n            for param in itertools.chain(model.parameters(), model.buffers())\n            if param.size() != torch.Size([])\n        ]\n        n_updates = 10\n        for i in range(n_updates):\n            updated_averaged_params = []\n            params = [\n                param for param in itertools.chain(model.parameters(),\n                                                   model.buffers())\n                if param.size() != torch.Size([])\n            ]\n            for p, p_avg in zip(params, averaged_params):\n                p.detach().add_(torch.randn_like(p))\n                if i == 0:\n                    updated_averaged_params.append(p.clone())\n                else:\n                    m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum\n                    updated_averaged_params.append(\n                        (p_avg * (1 - m) + p * m).clone())\n            ema_model.update_parameters(model)\n            averaged_params = updated_averaged_params\n\n        ema_params = [\n            param for param in itertools.chain(ema_model.module.parameters(),\n                                               ema_model.module.buffers())\n            if param.size() != torch.Size([])\n        ]\n        for p_target, p_ema in zip(averaged_params, ema_params):\n            assert_allclose(p_target, p_ema)\n"
  },
  {
    "path": "tests/test_models/test_layers/test_inverted_residual.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom mmcv.cnn import is_norm\nfrom torch.nn.modules import GroupNorm\n\nfrom mmdet.models.layers import InvertedResidual, SELayer\n\n\ndef test_inverted_residual():\n\n    with pytest.raises(AssertionError):\n        # stride must be in [1, 2]\n        InvertedResidual(16, 16, 32, stride=3)\n\n    with pytest.raises(AssertionError):\n        # se_cfg must be None or dict\n        InvertedResidual(16, 16, 32, se_cfg=list())\n\n    with pytest.raises(AssertionError):\n        # in_channeld and mid_channels must be the same if\n        # with_expand_conv is False\n        InvertedResidual(16, 16, 32, with_expand_conv=False)\n\n    # Test InvertedResidual forward, stride=1\n    block = InvertedResidual(16, 16, 32, stride=1)\n    x = torch.randn(1, 16, 56, 56)\n    x_out = block(x)\n    assert getattr(block, 'se', None) is None\n    assert block.with_res_shortcut\n    assert x_out.shape == torch.Size((1, 16, 56, 56))\n\n    # Test InvertedResidual forward, stride=2\n    block = InvertedResidual(16, 16, 32, stride=2)\n    x = torch.randn(1, 16, 56, 56)\n    x_out = block(x)\n    assert not block.with_res_shortcut\n    assert x_out.shape == torch.Size((1, 16, 28, 28))\n\n    # Test InvertedResidual forward with se layer\n    se_cfg = dict(channels=32)\n    block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)\n    x = torch.randn(1, 16, 56, 56)\n    x_out = block(x)\n    assert isinstance(block.se, SELayer)\n    assert x_out.shape == torch.Size((1, 16, 56, 56))\n\n    # Test InvertedResidual forward, with_expand_conv=False\n    block = InvertedResidual(32, 16, 32, with_expand_conv=False)\n    x = torch.randn(1, 32, 56, 56)\n    x_out = block(x)\n    assert getattr(block, 'expand_conv', None) is None\n    assert x_out.shape == torch.Size((1, 16, 56, 56))\n\n    # Test InvertedResidual forward with GroupNorm\n    block = InvertedResidual(\n        16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))\n    x = torch.randn(1, 16, 56, 56)\n    x_out = block(x)\n    for m in block.modules():\n        if is_norm(m):\n            assert isinstance(m, GroupNorm)\n    assert x_out.shape == torch.Size((1, 16, 56, 56))\n\n    # Test InvertedResidual forward with HSigmoid\n    block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))\n    x = torch.randn(1, 16, 56, 56)\n    x_out = block(x)\n    assert x_out.shape == torch.Size((1, 16, 56, 56))\n\n    # Test InvertedResidual forward with checkpoint\n    block = InvertedResidual(16, 16, 32, with_cp=True)\n    x = torch.randn(1, 16, 56, 56)\n    x_out = block(x)\n    assert block.with_cp\n    assert x_out.shape == torch.Size((1, 16, 56, 56))\n"
  },
  {
    "path": "tests/test_models/test_layers/test_plugins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport pytest\nimport torch\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.models.layers import DropBlock\nfrom mmdet.registry import MODELS\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\ndef test_dropblock():\n    feat = torch.rand(1, 1, 11, 11)\n    drop_prob = 1.0\n    dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)\n    out_feat = dropblock(feat)\n    assert (out_feat == 0).all() and out_feat.shape == feat.shape\n    drop_prob = 0.5\n    dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)\n    out_feat = dropblock(feat)\n    assert out_feat.shape == feat.shape\n\n    # drop_prob must be (0,1]\n    with pytest.raises(AssertionError):\n        DropBlock(1.5, 3)\n\n    # block_size cannot be an even number\n    with pytest.raises(AssertionError):\n        DropBlock(0.5, 2)\n\n    # warmup_iters cannot be less than 0\n    with pytest.raises(AssertionError):\n        DropBlock(0.5, 3, -1)\n\n\nclass TestPixelDecoder(unittest.TestCase):\n\n    def test_forward(self):\n        base_channels = 64\n        pixel_decoder_cfg = ConfigDict(\n            dict(\n                type='PixelDecoder',\n                in_channels=[base_channels * 2**i for i in range(4)],\n                feat_channels=base_channels,\n                out_channels=base_channels,\n                norm_cfg=dict(type='GN', num_groups=32),\n                act_cfg=dict(type='ReLU')))\n        self = MODELS.build(pixel_decoder_cfg)\n        self.init_weights()\n        img_metas = [{}, {}]\n        feats = [\n            torch.rand(\n                (2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))\n            for i in range(4)\n        ]\n        mask_feature, memory = self(feats, img_metas)\n\n        assert (memory == feats[-1]).all()\n        assert mask_feature.shape == feats[0].shape\n\n\nclass TestTransformerEncoderPixelDecoder(unittest.TestCase):\n\n    def test_forward(self):\n        base_channels = 64\n        pixel_decoder_cfg = ConfigDict(\n            dict(\n                type='TransformerEncoderPixelDecoder',\n                in_channels=[base_channels * 2**i for i in range(4)],\n                feat_channels=base_channels,\n                out_channels=base_channels,\n                norm_cfg=dict(type='GN', num_groups=32),\n                act_cfg=dict(type='ReLU'),\n                encoder=dict(  # DetrTransformerEncoder\n                    num_layers=6,\n                    layer_cfg=dict(  # DetrTransformerEncoderLayer\n                        self_attn_cfg=dict(  # MultiheadAttention\n                            embed_dims=base_channels,\n                            num_heads=8,\n                            attn_drop=0.1,\n                            proj_drop=0.1,\n                            dropout_layer=None,\n                            batch_first=True),\n                        ffn_cfg=dict(\n                            embed_dims=base_channels,\n                            feedforward_channels=base_channels * 8,\n                            num_fcs=2,\n                            act_cfg=dict(type='ReLU', inplace=True),\n                            ffn_drop=0.1,\n                            dropout_layer=None,\n                            add_identity=True),\n                        norm_cfg=dict(type='LN'),\n                        init_cfg=None),\n                    init_cfg=None),\n                positional_encoding=dict(\n                    num_feats=base_channels // 2, normalize=True)))\n        self = MODELS.build(pixel_decoder_cfg)\n        self.init_weights()\n        img_metas = [{\n            'batch_input_shape': (128, 160),\n            'img_shape': (120, 160),\n        }, {\n            'batch_input_shape': (128, 160),\n            'img_shape': (125, 160),\n        }]\n        feats = [\n            torch.rand(\n                (2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))\n            for i in range(4)\n        ]\n        mask_feature, memory = self(feats, img_metas)\n\n        assert memory.shape[-2:] == feats[-1].shape[-2:]\n        assert mask_feature.shape == feats[0].shape\n\n\nclass TestMSDeformAttnPixelDecoder(unittest.TestCase):\n\n    def test_forward(self):\n        base_channels = 64\n        pixel_decoder_cfg = ConfigDict(\n            dict(\n                type='MSDeformAttnPixelDecoder',\n                in_channels=[base_channels * 2**i for i in range(4)],\n                strides=[4, 8, 16, 32],\n                feat_channels=base_channels,\n                out_channels=base_channels,\n                num_outs=3,\n                norm_cfg=dict(type='GN', num_groups=32),\n                act_cfg=dict(type='ReLU'),\n                encoder=dict(  # DeformableDetrTransformerEncoder\n                    num_layers=6,\n                    layer_cfg=dict(  # DeformableDetrTransformerEncoderLayer\n                        self_attn_cfg=dict(  # MultiScaleDeformableAttention\n                            embed_dims=base_channels,\n                            num_heads=8,\n                            num_levels=3,\n                            num_points=4,\n                            im2col_step=64,\n                            dropout=0.0,\n                            batch_first=True,\n                            norm_cfg=None,\n                            init_cfg=None),\n                        ffn_cfg=dict(\n                            embed_dims=base_channels,\n                            feedforward_channels=base_channels * 4,\n                            num_fcs=2,\n                            ffn_drop=0.0,\n                            act_cfg=dict(type='ReLU', inplace=True))),\n                    init_cfg=None),\n                positional_encoding=dict(\n                    num_feats=base_channels // 2, normalize=True),\n                init_cfg=None))\n        self = MODELS.build(pixel_decoder_cfg)\n        self.init_weights()\n        feats = [\n            torch.rand(\n                (2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))\n            for i in range(4)\n        ]\n        mask_feature, multi_scale_features = self(feats)\n\n        assert mask_feature.shape == feats[0].shape\n        assert len(multi_scale_features) == 3\n        multi_scale_features = multi_scale_features[::-1]\n        for i in range(3):\n            assert multi_scale_features[i].shape[-2:] == feats[i +\n                                                               1].shape[-2:]\n"
  },
  {
    "path": "tests/test_models/test_layers/test_position_encoding.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.layers import (LearnedPositionalEncoding,\n                                 SinePositionalEncoding)\n\n\ndef test_sine_positional_encoding(num_feats=16, batch_size=2):\n    # test invalid type of scale\n    with pytest.raises(AssertionError):\n        module = SinePositionalEncoding(\n            num_feats, scale=(3., ), normalize=True)\n\n    module = SinePositionalEncoding(num_feats)\n    h, w = 10, 6\n    mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)\n    assert not module.normalize\n    out = module(mask)\n    assert out.shape == (batch_size, num_feats * 2, h, w)\n\n    # set normalize\n    module = SinePositionalEncoding(num_feats, normalize=True)\n    assert module.normalize\n    out = module(mask)\n    assert out.shape == (batch_size, num_feats * 2, h, w)\n\n\ndef test_learned_positional_encoding(num_feats=16,\n                                     row_num_embed=10,\n                                     col_num_embed=10,\n                                     batch_size=2):\n    module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)\n    assert module.row_embed.weight.shape == (row_num_embed, num_feats)\n    assert module.col_embed.weight.shape == (col_num_embed, num_feats)\n    h, w = 10, 6\n    mask = torch.rand(batch_size, h, w) > 0.5\n    out = module(mask)\n    assert out.shape == (batch_size, num_feats * 2, h, w)\n"
  },
  {
    "path": "tests/test_models/test_layers/test_se_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.model import constant_init\n\nfrom mmdet.models.layers import DyReLU, SELayer\n\n\ndef test_se_layer():\n    with pytest.raises(AssertionError):\n        # act_cfg sequence length must equal to 2\n        SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))\n\n    with pytest.raises(AssertionError):\n        # act_cfg sequence must be a tuple of dict\n        SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])\n\n    # Test SELayer forward\n    layer = SELayer(channels=32)\n    layer.init_weights()\n    layer.train()\n\n    x = torch.randn((1, 32, 10, 10))\n    x_out = layer(x)\n    assert x_out.shape == torch.Size((1, 32, 10, 10))\n\n\ndef test_dyrelu():\n    with pytest.raises(AssertionError):\n        # act_cfg sequence length must equal to 2\n        DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))\n\n    with pytest.raises(AssertionError):\n        # act_cfg sequence must be a tuple of dict\n        DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])\n\n    # Test DyReLU forward\n    layer = DyReLU(channels=32)\n    layer.init_weights()\n    layer.train()\n    x = torch.randn((1, 32, 10, 10))\n    x_out = layer(x)\n    assert x_out.shape == torch.Size((1, 32, 10, 10))\n\n    # DyReLU should act as standard (static) ReLU\n    # when eliminating the effect of SE-like module\n    layer = DyReLU(channels=32)\n    constant_init(layer.conv2.conv, 0)\n    layer.train()\n    x = torch.randn((1, 32, 10, 10))\n    x_out = layer(x)\n    relu_out = F.relu(x)\n    assert torch.equal(x_out, relu_out)\n"
  },
  {
    "path": "tests/test_models/test_layers/test_transformer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom mmengine.config import ConfigDict\n\nfrom mmdet.models.layers.transformer import (AdaptivePadding,\n                                             DetrTransformerDecoder,\n                                             DetrTransformerEncoder,\n                                             PatchEmbed, PatchMerging)\n\n\ndef test_adaptive_padding():\n\n    for padding in ('same', 'corner'):\n        kernel_size = 16\n        stride = 16\n        dilation = 1\n        input = torch.rand(1, 1, 15, 17)\n        pool = AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=padding)\n        out = pool(input)\n        # padding to divisible by 16\n        assert (out.shape[2], out.shape[3]) == (16, 32)\n        input = torch.rand(1, 1, 16, 17)\n        out = pool(input)\n        # padding to divisible by 16\n        assert (out.shape[2], out.shape[3]) == (16, 32)\n\n        kernel_size = (2, 2)\n        stride = (2, 2)\n        dilation = (1, 1)\n\n        adap_pad = AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=padding)\n        input = torch.rand(1, 1, 11, 13)\n        out = adap_pad(input)\n        # padding to divisible by 2\n        assert (out.shape[2], out.shape[3]) == (12, 14)\n\n        kernel_size = (2, 2)\n        stride = (10, 10)\n        dilation = (1, 1)\n\n        adap_pad = AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=padding)\n        input = torch.rand(1, 1, 10, 13)\n        out = adap_pad(input)\n        #  no padding\n        assert (out.shape[2], out.shape[3]) == (10, 13)\n\n        kernel_size = (11, 11)\n        adap_pad = AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=padding)\n        input = torch.rand(1, 1, 11, 13)\n        out = adap_pad(input)\n        #  all padding\n        assert (out.shape[2], out.shape[3]) == (21, 21)\n\n        # test padding as kernel is (7,9)\n        input = torch.rand(1, 1, 11, 13)\n        stride = (3, 4)\n        kernel_size = (4, 5)\n        dilation = (2, 2)\n        # actually (7, 9)\n        adap_pad = AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=padding)\n        dilation_out = adap_pad(input)\n        assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)\n        kernel_size = (7, 9)\n        dilation = (1, 1)\n        adap_pad = AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=padding)\n        kernel79_out = adap_pad(input)\n        assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)\n        assert kernel79_out.shape == dilation_out.shape\n\n    # assert only support \"same\" \"corner\"\n    with pytest.raises(AssertionError):\n        AdaptivePadding(\n            kernel_size=kernel_size,\n            stride=stride,\n            dilation=dilation,\n            padding=1)\n\n\ndef test_patch_embed():\n    B = 2\n    H = 3\n    W = 4\n    C = 3\n    embed_dims = 10\n    kernel_size = 3\n    stride = 1\n    dummy_input = torch.rand(B, C, H, W)\n    patch_merge_1 = PatchEmbed(\n        in_channels=C,\n        embed_dims=embed_dims,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=0,\n        dilation=1,\n        norm_cfg=None)\n\n    x1, shape = patch_merge_1(dummy_input)\n    # test out shape\n    assert x1.shape == (2, 2, 10)\n    # test outsize is correct\n    assert shape == (1, 2)\n    # test L = out_h * out_w\n    assert shape[0] * shape[1] == x1.shape[1]\n\n    B = 2\n    H = 10\n    W = 10\n    C = 3\n    embed_dims = 10\n    kernel_size = 5\n    stride = 2\n    dummy_input = torch.rand(B, C, H, W)\n    # test dilation\n    patch_merge_2 = PatchEmbed(\n        in_channels=C,\n        embed_dims=embed_dims,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=0,\n        dilation=2,\n        norm_cfg=None,\n    )\n\n    x2, shape = patch_merge_2(dummy_input)\n    # test out shape\n    assert x2.shape == (2, 1, 10)\n    # test outsize is correct\n    assert shape == (1, 1)\n    # test L = out_h * out_w\n    assert shape[0] * shape[1] == x2.shape[1]\n\n    stride = 2\n    input_size = (10, 10)\n\n    dummy_input = torch.rand(B, C, H, W)\n    # test stride and norm\n    patch_merge_3 = PatchEmbed(\n        in_channels=C,\n        embed_dims=embed_dims,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=0,\n        dilation=2,\n        norm_cfg=dict(type='LN'),\n        input_size=input_size)\n\n    x3, shape = patch_merge_3(dummy_input)\n    # test out shape\n    assert x3.shape == (2, 1, 10)\n    # test outsize is correct\n    assert shape == (1, 1)\n    # test L = out_h * out_w\n    assert shape[0] * shape[1] == x3.shape[1]\n\n    # test the init_out_size with nn.Unfold\n    assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -\n                                              1) // 2 + 1\n    assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -\n                                              1) // 2 + 1\n    H = 11\n    W = 12\n    input_size = (H, W)\n    dummy_input = torch.rand(B, C, H, W)\n    # test stride and norm\n    patch_merge_3 = PatchEmbed(\n        in_channels=C,\n        embed_dims=embed_dims,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=0,\n        dilation=2,\n        norm_cfg=dict(type='LN'),\n        input_size=input_size)\n\n    _, shape = patch_merge_3(dummy_input)\n    # when input_size equal to real input\n    # the out_size should be equal to `init_out_size`\n    assert shape == patch_merge_3.init_out_size\n\n    input_size = (H, W)\n    dummy_input = torch.rand(B, C, H, W)\n    # test stride and norm\n    patch_merge_3 = PatchEmbed(\n        in_channels=C,\n        embed_dims=embed_dims,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=0,\n        dilation=2,\n        norm_cfg=dict(type='LN'),\n        input_size=input_size)\n\n    _, shape = patch_merge_3(dummy_input)\n    # when input_size equal to real input\n    # the out_size should be equal to `init_out_size`\n    assert shape == patch_merge_3.init_out_size\n\n    # test adap padding\n    for padding in ('same', 'corner'):\n        in_c = 2\n        embed_dims = 3\n        B = 2\n\n        # test stride is 1\n        input_size = (5, 5)\n        kernel_size = (5, 5)\n        stride = (1, 1)\n        dilation = 1\n        bias = False\n\n        x = torch.rand(B, in_c, *input_size)\n        patch_embed = PatchEmbed(\n            in_channels=in_c,\n            embed_dims=embed_dims,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_embed(x)\n        assert x_out.size() == (B, 25, 3)\n        assert out_size == (5, 5)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n        # test kernel_size == stride\n        input_size = (5, 5)\n        kernel_size = (5, 5)\n        stride = (5, 5)\n        dilation = 1\n        bias = False\n\n        x = torch.rand(B, in_c, *input_size)\n        patch_embed = PatchEmbed(\n            in_channels=in_c,\n            embed_dims=embed_dims,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_embed(x)\n        assert x_out.size() == (B, 1, 3)\n        assert out_size == (1, 1)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n        # test kernel_size == stride\n        input_size = (6, 5)\n        kernel_size = (5, 5)\n        stride = (5, 5)\n        dilation = 1\n        bias = False\n\n        x = torch.rand(B, in_c, *input_size)\n        patch_embed = PatchEmbed(\n            in_channels=in_c,\n            embed_dims=embed_dims,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_embed(x)\n        assert x_out.size() == (B, 2, 3)\n        assert out_size == (2, 1)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n        # test different kernel_size with different stride\n        input_size = (6, 5)\n        kernel_size = (6, 2)\n        stride = (6, 2)\n        dilation = 1\n        bias = False\n\n        x = torch.rand(B, in_c, *input_size)\n        patch_embed = PatchEmbed(\n            in_channels=in_c,\n            embed_dims=embed_dims,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_embed(x)\n        assert x_out.size() == (B, 3, 3)\n        assert out_size == (1, 3)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n\ndef test_patch_merging():\n\n    # Test the model with int padding\n    in_c = 3\n    out_c = 4\n    kernel_size = 3\n    stride = 3\n    padding = 1\n    dilation = 1\n    bias = False\n    # test the case `pad_to_stride` is False\n    patch_merge = PatchMerging(\n        in_channels=in_c,\n        out_channels=out_c,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=padding,\n        dilation=dilation,\n        bias=bias)\n    B, L, C = 1, 100, 3\n    input_size = (10, 10)\n    x = torch.rand(B, L, C)\n    x_out, out_size = patch_merge(x, input_size)\n    assert x_out.size() == (1, 16, 4)\n    assert out_size == (4, 4)\n    # assert out size is consistent with real output\n    assert x_out.size(1) == out_size[0] * out_size[1]\n    in_c = 4\n    out_c = 5\n    kernel_size = 6\n    stride = 3\n    padding = 2\n    dilation = 2\n    bias = False\n    patch_merge = PatchMerging(\n        in_channels=in_c,\n        out_channels=out_c,\n        kernel_size=kernel_size,\n        stride=stride,\n        padding=padding,\n        dilation=dilation,\n        bias=bias)\n    B, L, C = 1, 100, 4\n    input_size = (10, 10)\n    x = torch.rand(B, L, C)\n    x_out, out_size = patch_merge(x, input_size)\n    assert x_out.size() == (1, 4, 5)\n    assert out_size == (2, 2)\n    # assert out size is consistent with real output\n    assert x_out.size(1) == out_size[0] * out_size[1]\n\n    # Test with adaptive padding\n    for padding in ('same', 'corner'):\n        in_c = 2\n        out_c = 3\n        B = 2\n\n        # test stride is 1\n        input_size = (5, 5)\n        kernel_size = (5, 5)\n        stride = (1, 1)\n        dilation = 1\n        bias = False\n        L = input_size[0] * input_size[1]\n\n        x = torch.rand(B, L, in_c)\n        patch_merge = PatchMerging(\n            in_channels=in_c,\n            out_channels=out_c,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_merge(x, input_size)\n        assert x_out.size() == (B, 25, 3)\n        assert out_size == (5, 5)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n        # test kernel_size == stride\n        input_size = (5, 5)\n        kernel_size = (5, 5)\n        stride = (5, 5)\n        dilation = 1\n        bias = False\n        L = input_size[0] * input_size[1]\n\n        x = torch.rand(B, L, in_c)\n        patch_merge = PatchMerging(\n            in_channels=in_c,\n            out_channels=out_c,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_merge(x, input_size)\n        assert x_out.size() == (B, 1, 3)\n        assert out_size == (1, 1)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n        # test kernel_size == stride\n        input_size = (6, 5)\n        kernel_size = (5, 5)\n        stride = (5, 5)\n        dilation = 1\n        bias = False\n        L = input_size[0] * input_size[1]\n\n        x = torch.rand(B, L, in_c)\n        patch_merge = PatchMerging(\n            in_channels=in_c,\n            out_channels=out_c,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_merge(x, input_size)\n        assert x_out.size() == (B, 2, 3)\n        assert out_size == (2, 1)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n        # test different kernel_size with different stride\n        input_size = (6, 5)\n        kernel_size = (6, 2)\n        stride = (6, 2)\n        dilation = 1\n        bias = False\n        L = input_size[0] * input_size[1]\n\n        x = torch.rand(B, L, in_c)\n        patch_merge = PatchMerging(\n            in_channels=in_c,\n            out_channels=out_c,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        x_out, out_size = patch_merge(x, input_size)\n        assert x_out.size() == (B, 3, 3)\n        assert out_size == (1, 3)\n        assert x_out.size(1) == out_size[0] * out_size[1]\n\n\ndef test_detr_transformer_encoder_decoder():\n    config = ConfigDict(\n        num_layers=6,\n        layer_cfg=dict(  # DetrTransformerDecoderLayer\n            self_attn_cfg=dict(  # MultiheadAttention\n                embed_dims=256,\n                num_heads=8,\n                dropout=0.1),\n            cross_attn_cfg=dict(  # MultiheadAttention\n                embed_dims=256,\n                num_heads=8,\n                dropout=0.1),\n            ffn_cfg=dict(\n                embed_dims=256,\n                feedforward_channels=2048,\n                num_fcs=2,\n                ffn_drop=0.1,\n                act_cfg=dict(type='ReLU', inplace=True))))\n    assert len(DetrTransformerDecoder(**config).layers) == 6\n    assert DetrTransformerDecoder(**config)\n\n    config = ConfigDict(\n        dict(\n            num_layers=6,\n            layer_cfg=dict(  # DetrTransformerEncoderLayer\n                self_attn_cfg=dict(  # MultiheadAttention\n                    embed_dims=256,\n                    num_heads=8,\n                    dropout=0.1),\n                ffn_cfg=dict(\n                    embed_dims=256,\n                    feedforward_channels=2048,\n                    num_fcs=2,\n                    ffn_drop=0.1,\n                    act_cfg=dict(type='ReLU', inplace=True)))))\n    assert len(DetrTransformerEncoder(**config).layers) == 6\n    assert DetrTransformerEncoder(**config)\n"
  },
  {
    "path": "tests/test_models/test_losses/test_gaussian_focal_loss.py",
    "content": "import unittest\n\nimport torch\n\nfrom mmdet.models.losses import GaussianFocalLoss\n\n\nclass TestGaussianFocalLoss(unittest.TestCase):\n\n    def test_forward(self):\n        pred = torch.rand((10, 4))\n        target = torch.rand((10, 4))\n        gaussian_focal_loss = GaussianFocalLoss()\n        loss1 = gaussian_focal_loss(pred, target)\n        self.assertIsInstance(loss1, torch.Tensor)\n\n        loss2 = gaussian_focal_loss(pred, target, avg_factor=0.5)\n        self.assertIsInstance(loss2, torch.Tensor)\n\n        # test reduction\n        gaussian_focal_loss = GaussianFocalLoss(reduction='none')\n        loss = gaussian_focal_loss(pred, target)\n        self.assertTrue(loss.shape == (10, 4))\n\n        # test reduction_override\n        loss = gaussian_focal_loss(pred, target, reduction_override='mean')\n        self.assertTrue(loss.ndim == 0)\n\n        # Only supports None, 'none', 'mean', 'sum'\n        with self.assertRaises(AssertionError):\n            gaussian_focal_loss(pred, target, reduction_override='max')\n\n        # test pos_inds\n        pos_inds = (torch.rand(5) * 8).long()\n        pos_labels = (torch.rand(5) * 2).long()\n        gaussian_focal_loss = GaussianFocalLoss()\n        loss = gaussian_focal_loss(pred, target, pos_inds, pos_labels)\n        self.assertIsInstance(loss, torch.Tensor)\n"
  },
  {
    "path": "tests/test_models/test_losses/test_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom mmengine.utils import digit_version\n\nfrom mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss,\n                                 DistributionFocalLoss, FocalLoss,\n                                 GaussianFocalLoss,\n                                 KnowledgeDistillationKLDivLoss, L1Loss,\n                                 MSELoss, QualityFocalLoss, SeesawLoss,\n                                 SmoothL1Loss, VarifocalLoss)\nfrom mmdet.models.losses.ghm_loss import GHMC, GHMR\nfrom mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss,\n                                          EIoULoss, GIoULoss, IoULoss)\n\n\n@pytest.mark.parametrize(\n    'loss_class',\n    [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss])\ndef test_iou_type_loss_zeros_weight(loss_class):\n    pred = torch.rand((10, 4))\n    target = torch.rand((10, 4))\n    weight = torch.zeros(10)\n\n    loss = loss_class()(pred, target, weight)\n    assert loss == 0.\n\n\n@pytest.mark.parametrize('loss_class', [\n    BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss,\n    EIoULoss, FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss,\n    GaussianFocalLoss, GIoULoss, QualityFocalLoss, IoULoss, L1Loss,\n    VarifocalLoss, GHMR, GHMC, SmoothL1Loss, KnowledgeDistillationKLDivLoss,\n    DiceLoss\n])\ndef test_loss_with_reduction_override(loss_class):\n    pred = torch.rand((10, 4))\n    target = torch.rand((10, 4)),\n    weight = None\n\n    with pytest.raises(AssertionError):\n        # only reduction_override from [None, 'none', 'mean', 'sum']\n        # is not allowed\n        reduction_override = True\n        loss_class()(\n            pred, target, weight, reduction_override=reduction_override)\n\n\n@pytest.mark.parametrize('loss_class', [QualityFocalLoss])\n@pytest.mark.parametrize('activated', [False, True])\ndef test_QualityFocalLoss_Loss(loss_class, activated):\n    input_shape = (4, 5)\n    pred = torch.rand(input_shape)\n    label = torch.Tensor([0, 1, 2, 0]).long()\n    quality_label = torch.rand(input_shape[0])\n\n    original_loss = loss_class(activated=activated)(pred,\n                                                    (label, quality_label))\n    assert isinstance(original_loss, torch.Tensor)\n\n    target = torch.nn.functional.one_hot(label, 5)\n    target = target * quality_label.reshape(input_shape[0], 1)\n\n    new_loss = loss_class(activated=activated)(pred, target)\n    assert isinstance(new_loss, torch.Tensor)\n    assert new_loss == original_loss\n\n\n@pytest.mark.parametrize('loss_class', [\n    IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss, MSELoss,\n    L1Loss, SmoothL1Loss, BalancedL1Loss\n])\n@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])\ndef test_regression_losses(loss_class, input_shape):\n    pred = torch.rand(input_shape)\n    target = torch.rand(input_shape)\n    weight = torch.rand(input_shape)\n\n    # Test loss forward\n    loss = loss_class()(pred, target)\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with weight\n    loss = loss_class()(pred, target, weight)\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with reduction_override\n    loss = loss_class()(pred, target, reduction_override='mean')\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with avg_factor\n    loss = loss_class()(pred, target, avg_factor=10)\n    assert isinstance(loss, torch.Tensor)\n\n    with pytest.raises(ValueError):\n        # loss can evaluate with avg_factor only if\n        # reduction is None, 'none' or 'mean'.\n        reduction_override = 'sum'\n        loss_class()(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n\n    # Test loss forward with avg_factor and reduction\n    for reduction_override in [None, 'none', 'mean']:\n        loss_class()(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n        assert isinstance(loss, torch.Tensor)\n\n\n@pytest.mark.parametrize('loss_class', [CrossEntropyLoss])\n@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])\ndef test_classification_losses(loss_class, input_shape):\n    if input_shape[0] == 0 and digit_version(\n            torch.__version__) < digit_version('1.5.0'):\n        pytest.skip(\n            f'CELoss in PyTorch {torch.__version__} does not support empty'\n            f'tensor.')\n\n    pred = torch.rand(input_shape)\n    target = torch.randint(0, 5, (input_shape[0], ))\n\n    # Test loss forward\n    loss = loss_class()(pred, target)\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with reduction_override\n    loss = loss_class()(pred, target, reduction_override='mean')\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with avg_factor\n    loss = loss_class()(pred, target, avg_factor=10)\n    assert isinstance(loss, torch.Tensor)\n\n    with pytest.raises(ValueError):\n        # loss can evaluate with avg_factor only if\n        # reduction is None, 'none' or 'mean'.\n        reduction_override = 'sum'\n        loss_class()(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n\n    # Test loss forward with avg_factor and reduction\n    for reduction_override in [None, 'none', 'mean']:\n        loss_class()(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n        assert isinstance(loss, torch.Tensor)\n\n\n@pytest.mark.parametrize('loss_class', [FocalLoss])\n@pytest.mark.parametrize('input_shape', [(10, 5), (3, 5, 40, 40)])\ndef test_FocalLoss_loss(loss_class, input_shape):\n    pred = torch.rand(input_shape)\n    target = torch.randint(0, 5, (input_shape[0], ))\n    if len(input_shape) == 4:\n        B, N, W, H = input_shape\n        target = F.one_hot(torch.randint(0, 5, (B * W * H, )),\n                           5).reshape(B, W, H, N).permute(0, 3, 1, 2)\n\n    # Test loss forward\n    loss = loss_class()(pred, target)\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with reduction_override\n    loss = loss_class()(pred, target, reduction_override='mean')\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with avg_factor\n    loss = loss_class()(pred, target, avg_factor=10)\n    assert isinstance(loss, torch.Tensor)\n\n    with pytest.raises(ValueError):\n        # loss can evaluate with avg_factor only if\n        # reduction is None, 'none' or 'mean'.\n        reduction_override = 'sum'\n        loss_class()(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n\n    # Test loss forward with avg_factor and reduction\n    for reduction_override in [None, 'none', 'mean']:\n        loss_class()(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n        assert isinstance(loss, torch.Tensor)\n\n\n@pytest.mark.parametrize('loss_class', [GHMR])\n@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])\ndef test_GHMR_loss(loss_class, input_shape):\n    pred = torch.rand(input_shape)\n    target = torch.rand(input_shape)\n    weight = torch.rand(input_shape)\n\n    # Test loss forward\n    loss = loss_class()(pred, target, weight)\n    assert isinstance(loss, torch.Tensor)\n\n\n@pytest.mark.parametrize('use_sigmoid', [True, False])\n@pytest.mark.parametrize('reduction', ['sum', 'mean', None])\n@pytest.mark.parametrize('avg_non_ignore', [True, False])\ndef test_loss_with_ignore_index(use_sigmoid, reduction, avg_non_ignore):\n    # Test cross_entropy loss\n    loss_class = CrossEntropyLoss(\n        use_sigmoid=use_sigmoid,\n        use_mask=False,\n        ignore_index=255,\n        avg_non_ignore=avg_non_ignore)\n    pred = torch.rand((10, 5))\n    target = torch.randint(0, 5, (10, ))\n\n    ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long)\n    target[ignored_indices] = 255\n\n    # Test loss forward with default ignore\n    loss_with_ignore = loss_class(pred, target, reduction_override=reduction)\n    assert isinstance(loss_with_ignore, torch.Tensor)\n\n    # Test loss forward with forward ignore\n    target[ignored_indices] = 255\n    loss_with_forward_ignore = loss_class(\n        pred, target, ignore_index=255, reduction_override=reduction)\n    assert isinstance(loss_with_forward_ignore, torch.Tensor)\n\n    # Verify correctness\n    if avg_non_ignore:\n        # manually remove the ignored elements\n        not_ignored_indices = (target != 255)\n        pred = pred[not_ignored_indices]\n        target = target[not_ignored_indices]\n    loss = loss_class(pred, target, reduction_override=reduction)\n\n    assert torch.allclose(loss, loss_with_ignore)\n    assert torch.allclose(loss, loss_with_forward_ignore)\n\n    # test ignore all target\n    pred = torch.rand((10, 5))\n    target = torch.ones((10, ), dtype=torch.long) * 255\n    loss = loss_class(pred, target, reduction_override=reduction)\n    assert loss == 0\n\n\n@pytest.mark.parametrize('naive_dice', [True, False])\ndef test_dice_loss(naive_dice):\n    loss_class = DiceLoss\n    pred = torch.rand((10, 4, 4))\n    target = torch.rand((10, 4, 4))\n    weight = torch.rand((10))\n\n    # Test loss forward\n    loss = loss_class(naive_dice=naive_dice)(pred, target)\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with weight\n    loss = loss_class(naive_dice=naive_dice)(pred, target, weight)\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with reduction_override\n    loss = loss_class(naive_dice=naive_dice)(\n        pred, target, reduction_override='mean')\n    assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with avg_factor\n    loss = loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10)\n    assert isinstance(loss, torch.Tensor)\n\n    with pytest.raises(ValueError):\n        # loss can evaluate with avg_factor only if\n        # reduction is None, 'none' or 'mean'.\n        reduction_override = 'sum'\n        loss_class(naive_dice=naive_dice)(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n\n    # Test loss forward with avg_factor and reduction\n    for reduction_override in [None, 'none', 'mean']:\n        loss_class(naive_dice=naive_dice)(\n            pred, target, avg_factor=10, reduction_override=reduction_override)\n        assert isinstance(loss, torch.Tensor)\n\n    # Test loss forward with has_acted=False and use_sigmoid=False\n    with pytest.raises(NotImplementedError):\n        loss_class(\n            use_sigmoid=False, activate=True, naive_dice=naive_dice)(pred,\n                                                                     target)\n\n    # Test loss forward with weight.ndim != loss.ndim\n    with pytest.raises(AssertionError):\n        weight = torch.rand((2, 8))\n        loss_class(naive_dice=naive_dice)(pred, target, weight)\n\n    # Test loss forward with len(weight) != len(pred)\n    with pytest.raises(AssertionError):\n        weight = torch.rand((8))\n        loss_class(naive_dice=naive_dice)(pred, target, weight)\n"
  },
  {
    "path": "tests/test_models/test_necks/test_ct_resnet_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport torch\n\nfrom mmdet.models.necks import CTResNetNeck\n\n\nclass TestCTResNetNeck(unittest.TestCase):\n\n    def test_init(self):\n        # num_filters/num_kernels must be same length\n        with self.assertRaises(AssertionError):\n            CTResNetNeck(\n                in_channels=10,\n                num_deconv_filters=(10, 10),\n                num_deconv_kernels=(4, ))\n\n        ct_resnet_neck = CTResNetNeck(\n            in_channels=16,\n            num_deconv_filters=(8, 8),\n            num_deconv_kernels=(4, 4),\n            use_dcn=False)\n        ct_resnet_neck.init_weights()\n\n    def test_forward(self):\n        in_channels = 16\n        num_filters = (8, 8)\n        num_kernels = (4, 4)\n        feat = torch.rand(1, 16, 4, 4)\n        ct_resnet_neck = CTResNetNeck(\n            in_channels=in_channels,\n            num_deconv_filters=num_filters,\n            num_deconv_kernels=num_kernels,\n            use_dcn=False)\n\n        # feat must be list or tuple\n        with self.assertRaises(AssertionError):\n            ct_resnet_neck(feat)\n\n        out_feat = ct_resnet_neck([feat])[0]\n        self.assertEqual(out_feat.shape, (1, num_filters[-1], 16, 16))\n\n        if torch.cuda.is_available():\n            # test dcn\n            ct_resnet_neck = CTResNetNeck(\n                in_channels=in_channels,\n                num_deconv_filters=num_filters,\n                num_deconv_kernels=num_kernels)\n            ct_resnet_neck = ct_resnet_neck.cuda()\n            feat = feat.cuda()\n            out_feat = ct_resnet_neck([feat])[0]\n            self.assertEqual(out_feat.shape, (1, num_filters[-1], 16, 16))\n"
  },
  {
    "path": "tests/test_models/test_necks/test_necks.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.models.necks import (FPG, FPN, FPN_CARAFE, NASFCOS_FPN, NASFPN, SSH,\n                                YOLOXPAFPN, ChannelMapper, DilatedEncoder,\n                                DyHead, SSDNeck, YOLOV3Neck)\n\n\ndef test_fpn():\n    \"\"\"Tests fpn.\"\"\"\n    s = 64\n    in_channels = [8, 16, 32, 64]\n    feat_sizes = [s // 2**i for i in range(4)]  # [64, 32, 16, 8]\n    out_channels = 8\n\n    # end_level=-1 is equal to end_level=3\n    FPN(in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=0,\n        end_level=-1,\n        num_outs=5)\n    FPN(in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=0,\n        end_level=3,\n        num_outs=5)\n\n    # `num_outs` is not equal to end_level - start_level + 1\n    with pytest.raises(AssertionError):\n        FPN(in_channels=in_channels,\n            out_channels=out_channels,\n            start_level=1,\n            end_level=2,\n            num_outs=3)\n\n    # `num_outs` is not equal to len(in_channels) - start_level\n    with pytest.raises(AssertionError):\n        FPN(in_channels=in_channels,\n            out_channels=out_channels,\n            start_level=1,\n            num_outs=2)\n\n    # `end_level` is larger than len(in_channels) - 1\n    with pytest.raises(AssertionError):\n        FPN(in_channels=in_channels,\n            out_channels=out_channels,\n            start_level=1,\n            end_level=4,\n            num_outs=2)\n\n    # `num_outs` is not equal to end_level - start_level\n    with pytest.raises(AssertionError):\n        FPN(in_channels=in_channels,\n            out_channels=out_channels,\n            start_level=1,\n            end_level=3,\n            num_outs=1)\n\n    # Invalid `add_extra_convs` option\n    with pytest.raises(AssertionError):\n        FPN(in_channels=in_channels,\n            out_channels=out_channels,\n            start_level=1,\n            add_extra_convs='on_xxx',\n            num_outs=5)\n\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=1,\n        add_extra_convs=True,\n        num_outs=5)\n\n    # FPN expects a multiple levels of features per image\n    feats = [\n        torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])\n        for i in range(len(in_channels))\n    ]\n    outs = fpn_model(feats)\n    assert fpn_model.add_extra_convs == 'on_input'\n    assert len(outs) == fpn_model.num_outs\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # Tests for fpn with no extra convs (pooling is used instead)\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=1,\n        add_extra_convs=False,\n        num_outs=5)\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    assert not fpn_model.add_extra_convs\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # Tests for fpn with lateral bns\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=1,\n        add_extra_convs=True,\n        no_norm_on_lateral=False,\n        norm_cfg=dict(type='BN', requires_grad=True),\n        num_outs=5)\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    assert fpn_model.add_extra_convs == 'on_input'\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n    bn_exist = False\n    for m in fpn_model.modules():\n        if isinstance(m, _BatchNorm):\n            bn_exist = True\n    assert bn_exist\n\n    # Bilinear upsample\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=1,\n        add_extra_convs=True,\n        upsample_cfg=dict(mode='bilinear', align_corners=True),\n        num_outs=5)\n    fpn_model(feats)\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    assert fpn_model.add_extra_convs == 'on_input'\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # Scale factor instead of fixed upsample size upsample\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        start_level=1,\n        add_extra_convs=True,\n        upsample_cfg=dict(scale_factor=2),\n        num_outs=5)\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # Extra convs source is 'inputs'\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        add_extra_convs='on_input',\n        start_level=1,\n        num_outs=5)\n    assert fpn_model.add_extra_convs == 'on_input'\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # Extra convs source is 'laterals'\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        add_extra_convs='on_lateral',\n        start_level=1,\n        num_outs=5)\n    assert fpn_model.add_extra_convs == 'on_lateral'\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # Extra convs source is 'outputs'\n    fpn_model = FPN(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        add_extra_convs='on_output',\n        start_level=1,\n        num_outs=5)\n    assert fpn_model.add_extra_convs == 'on_output'\n    outs = fpn_model(feats)\n    assert len(outs) == fpn_model.num_outs\n    for i in range(fpn_model.num_outs):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n\ndef test_channel_mapper():\n    \"\"\"Tests ChannelMapper.\"\"\"\n    s = 64\n    in_channels = [8, 16, 32, 64]\n    feat_sizes = [s // 2**i for i in range(4)]  # [64, 32, 16, 8]\n    out_channels = 8\n    kernel_size = 3\n    feats = [\n        torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])\n        for i in range(len(in_channels))\n    ]\n\n    # in_channels must be a list\n    with pytest.raises(AssertionError):\n        channel_mapper = ChannelMapper(\n            in_channels=10, out_channels=out_channels, kernel_size=kernel_size)\n    # the length of channel_mapper's inputs must be equal to the length of\n    # in_channels\n    with pytest.raises(AssertionError):\n        channel_mapper = ChannelMapper(\n            in_channels=in_channels[:-1],\n            out_channels=out_channels,\n            kernel_size=kernel_size)\n        channel_mapper(feats)\n\n    channel_mapper = ChannelMapper(\n        in_channels=in_channels,\n        out_channels=out_channels,\n        kernel_size=kernel_size)\n\n    outs = channel_mapper(feats)\n    assert len(outs) == len(feats)\n    for i in range(len(feats)):\n        outs[i].shape[1] == out_channels\n        outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n\ndef test_dilated_encoder():\n    in_channels = 16\n    out_channels = 32\n    out_shape = 34\n    dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2,\n                                     [2, 4, 6, 8])\n    feat = [torch.rand(1, in_channels, 34, 34)]\n    out_feat = dilated_encoder(feat)[0]\n    assert out_feat.shape == (1, out_channels, out_shape, out_shape)\n\n\ndef test_yolov3_neck():\n    # num_scales, in_channels, out_channels must be same length\n    with pytest.raises(AssertionError):\n        YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])\n\n    # len(feats) must equal to num_scales\n    with pytest.raises(AssertionError):\n        neck = YOLOV3Neck(\n            num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])\n        feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))\n        neck(feats)\n\n    # test normal channels\n    s = 32\n    in_channels = [16, 8, 4]\n    out_channels = [8, 4, 2]\n    feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]\n    feats = [\n        torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])\n        for i in range(len(in_channels) - 1, -1, -1)\n    ]\n    neck = YOLOV3Neck(\n        num_scales=3, in_channels=in_channels, out_channels=out_channels)\n    outs = neck(feats)\n\n    assert len(outs) == len(feats)\n    for i in range(len(outs)):\n        assert outs[i].shape == \\\n               (1, out_channels[i], feat_sizes[i], feat_sizes[i])\n\n    # test more flexible setting\n    s = 32\n    in_channels = [32, 8, 16]\n    out_channels = [19, 21, 5]\n    feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]\n    feats = [\n        torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])\n        for i in range(len(in_channels) - 1, -1, -1)\n    ]\n    neck = YOLOV3Neck(\n        num_scales=3, in_channels=in_channels, out_channels=out_channels)\n    outs = neck(feats)\n\n    assert len(outs) == len(feats)\n    for i in range(len(outs)):\n        assert outs[i].shape == \\\n               (1, out_channels[i], feat_sizes[i], feat_sizes[i])\n\n\ndef test_ssd_neck():\n    # level_strides/level_paddings must be same length\n    with pytest.raises(AssertionError):\n        SSDNeck(\n            in_channels=[8, 16],\n            out_channels=[8, 16, 32],\n            level_strides=[2],\n            level_paddings=[2, 1])\n\n    # length of out_channels must larger than in_channels\n    with pytest.raises(AssertionError):\n        SSDNeck(\n            in_channels=[8, 16],\n            out_channels=[8],\n            level_strides=[2],\n            level_paddings=[2])\n\n    # len(out_channels) - len(in_channels) must equal to len(level_strides)\n    with pytest.raises(AssertionError):\n        SSDNeck(\n            in_channels=[8, 16],\n            out_channels=[4, 16, 64],\n            level_strides=[2, 2],\n            level_paddings=[2, 2])\n\n    # in_channels must be same with out_channels[:len(in_channels)]\n    with pytest.raises(AssertionError):\n        SSDNeck(\n            in_channels=[8, 16],\n            out_channels=[4, 16, 64],\n            level_strides=[2],\n            level_paddings=[2])\n\n    ssd_neck = SSDNeck(\n        in_channels=[4],\n        out_channels=[4, 8, 16],\n        level_strides=[2, 1],\n        level_paddings=[1, 0])\n    feats = (torch.rand(1, 4, 16, 16), )\n    outs = ssd_neck(feats)\n    assert outs[0].shape == (1, 4, 16, 16)\n    assert outs[1].shape == (1, 8, 8, 8)\n    assert outs[2].shape == (1, 16, 6, 6)\n\n    # test SSD-Lite Neck\n    ssd_neck = SSDNeck(\n        in_channels=[4, 8],\n        out_channels=[4, 8, 16],\n        level_strides=[1],\n        level_paddings=[1],\n        l2_norm_scale=None,\n        use_depthwise=True,\n        norm_cfg=dict(type='BN'),\n        act_cfg=dict(type='ReLU6'))\n    assert not hasattr(ssd_neck, 'l2_norm')\n\n    from mmcv.cnn.bricks import DepthwiseSeparableConvModule\n    assert isinstance(ssd_neck.extra_layers[0][-1],\n                      DepthwiseSeparableConvModule)\n\n    feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))\n    outs = ssd_neck(feats)\n    assert outs[0].shape == (1, 4, 8, 8)\n    assert outs[1].shape == (1, 8, 8, 8)\n    assert outs[2].shape == (1, 16, 8, 8)\n\n\ndef test_yolox_pafpn():\n    s = 64\n    in_channels = [8, 16, 32, 64]\n    feat_sizes = [s // 2**i for i in range(4)]  # [64, 32, 16, 8]\n    out_channels = 24\n    feats = [\n        torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])\n        for i in range(len(in_channels))\n    ]\n    neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)\n    outs = neck(feats)\n    assert len(outs) == len(feats)\n    for i in range(len(feats)):\n        assert outs[i].shape[1] == out_channels\n        assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    # test depth-wise\n    neck = YOLOXPAFPN(\n        in_channels=in_channels, out_channels=out_channels, use_depthwise=True)\n\n    from mmcv.cnn.bricks import DepthwiseSeparableConvModule\n    assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule)\n\n    outs = neck(feats)\n    assert len(outs) == len(feats)\n    for i in range(len(feats)):\n        assert outs[i].shape[1] == out_channels\n        assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n\ndef test_dyhead():\n    s = 64\n    in_channels = 8\n    out_channels = 16\n    feat_sizes = [s // 2**i for i in range(4)]  # [64, 32, 16, 8]\n    feats = [\n        torch.rand(1, in_channels, feat_sizes[i], feat_sizes[i])\n        for i in range(len(feat_sizes))\n    ]\n    neck = DyHead(\n        in_channels=in_channels, out_channels=out_channels, num_blocks=3)\n    outs = neck(feats)\n    assert len(outs) == len(feats)\n    for i in range(len(outs)):\n        assert outs[i].shape[1] == out_channels\n        assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)\n\n    feat = torch.rand(1, 8, 4, 4)\n    # input feat must be tuple or list\n    with pytest.raises(AssertionError):\n        neck(feat)\n\n\ndef test_fpg():\n    # end_level=-1 is equal to end_level=3\n    norm_cfg = dict(type='BN', requires_grad=True)\n    FPG(in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        inter_channels=8,\n        num_outs=5,\n        add_extra_convs=True,\n        start_level=1,\n        end_level=-1,\n        stack_times=9,\n        paths=['bu'] * 9,\n        same_down_trans=None,\n        same_up_trans=dict(\n            type='conv',\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            norm_cfg=norm_cfg,\n            inplace=False,\n            order=('act', 'conv', 'norm')),\n        across_lateral_trans=dict(\n            type='conv',\n            kernel_size=1,\n            norm_cfg=norm_cfg,\n            inplace=False,\n            order=('act', 'conv', 'norm')),\n        across_down_trans=dict(\n            type='interpolation_conv',\n            mode='nearest',\n            kernel_size=3,\n            norm_cfg=norm_cfg,\n            order=('act', 'conv', 'norm'),\n            inplace=False),\n        across_up_trans=None,\n        across_skip_trans=dict(\n            type='conv',\n            kernel_size=1,\n            norm_cfg=norm_cfg,\n            inplace=False,\n            order=('act', 'conv', 'norm')),\n        output_trans=dict(\n            type='last_conv',\n            kernel_size=3,\n            order=('act', 'conv', 'norm'),\n            inplace=False),\n        norm_cfg=norm_cfg,\n        skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])\n    FPG(in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        inter_channels=8,\n        num_outs=5,\n        add_extra_convs=True,\n        start_level=1,\n        end_level=3,\n        stack_times=9,\n        paths=['bu'] * 9,\n        same_down_trans=None,\n        same_up_trans=dict(\n            type='conv',\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            norm_cfg=norm_cfg,\n            inplace=False,\n            order=('act', 'conv', 'norm')),\n        across_lateral_trans=dict(\n            type='conv',\n            kernel_size=1,\n            norm_cfg=norm_cfg,\n            inplace=False,\n            order=('act', 'conv', 'norm')),\n        across_down_trans=dict(\n            type='interpolation_conv',\n            mode='nearest',\n            kernel_size=3,\n            norm_cfg=norm_cfg,\n            order=('act', 'conv', 'norm'),\n            inplace=False),\n        across_up_trans=None,\n        across_skip_trans=dict(\n            type='conv',\n            kernel_size=1,\n            norm_cfg=norm_cfg,\n            inplace=False,\n            order=('act', 'conv', 'norm')),\n        output_trans=dict(\n            type='last_conv',\n            kernel_size=3,\n            order=('act', 'conv', 'norm'),\n            inplace=False),\n        norm_cfg=norm_cfg,\n        skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])\n\n    # `end_level` is larger than len(in_channels) - 1\n    with pytest.raises(AssertionError):\n        FPG(in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            stack_times=9,\n            paths=['bu'] * 9,\n            start_level=1,\n            end_level=4,\n            num_outs=2,\n            skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])\n\n    # `num_outs` is not equal to end_level - start_level + 1\n    with pytest.raises(AssertionError):\n        FPG(in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            stack_times=9,\n            paths=['bu'] * 9,\n            start_level=1,\n            end_level=2,\n            num_outs=3,\n            skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])\n\n\ndef test_fpn_carafe():\n    # end_level=-1 is equal to end_level=3\n    FPN_CARAFE(\n        in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        start_level=0,\n        end_level=3,\n        num_outs=4)\n    FPN_CARAFE(\n        in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        start_level=0,\n        end_level=-1,\n        num_outs=4)\n    # `end_level` is larger than len(in_channels) - 1\n    with pytest.raises(AssertionError):\n        FPN_CARAFE(\n            in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            start_level=1,\n            end_level=4,\n            num_outs=2)\n\n    # `num_outs` is not equal to end_level - start_level + 1\n    with pytest.raises(AssertionError):\n        FPN_CARAFE(\n            in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            start_level=1,\n            end_level=2,\n            num_outs=3)\n\n\ndef test_nas_fpn():\n    # end_level=-1 is equal to end_level=3\n    NASFPN(\n        in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        stack_times=9,\n        start_level=0,\n        end_level=3,\n        num_outs=4)\n    NASFPN(\n        in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        stack_times=9,\n        start_level=0,\n        end_level=-1,\n        num_outs=4)\n    # `end_level` is larger than len(in_channels) - 1\n    with pytest.raises(AssertionError):\n        NASFPN(\n            in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            stack_times=9,\n            start_level=1,\n            end_level=4,\n            num_outs=2)\n\n    # `num_outs` is not equal to end_level - start_level + 1\n    with pytest.raises(AssertionError):\n        NASFPN(\n            in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            stack_times=9,\n            start_level=1,\n            end_level=2,\n            num_outs=3)\n\n\ndef test_nasfcos_fpn():\n    # end_level=-1 is equal to end_level=3\n    NASFCOS_FPN(\n        in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        start_level=0,\n        end_level=3,\n        num_outs=4)\n    NASFCOS_FPN(\n        in_channels=[8, 16, 32, 64],\n        out_channels=8,\n        start_level=0,\n        end_level=-1,\n        num_outs=4)\n\n    # `end_level` is larger than len(in_channels) - 1\n    with pytest.raises(AssertionError):\n        NASFCOS_FPN(\n            in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            start_level=1,\n            end_level=4,\n            num_outs=2)\n\n    # `num_outs` is not equal to end_level - start_level + 1\n    with pytest.raises(AssertionError):\n        NASFCOS_FPN(\n            in_channels=[8, 16, 32, 64],\n            out_channels=8,\n            start_level=1,\n            end_level=2,\n            num_outs=3)\n\n\ndef test_ssh_neck():\n    \"\"\"Tests ssh.\"\"\"\n    s = 64\n    in_channels = [8, 16, 32, 64]\n    feat_sizes = [s // 2**i for i in range(4)]  # [64, 32, 16, 8]\n    out_channels = [16, 32, 64, 128]\n    ssh_model = SSH(\n        num_scales=4, in_channels=in_channels, out_channels=out_channels)\n\n    feats = [\n        torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])\n        for i in range(len(in_channels))\n    ]\n    outs = ssh_model(feats)\n    assert len(outs) == len(feats)\n    for i in range(len(outs)):\n        assert outs[i].shape == \\\n            (1, out_channels[i], feat_sizes[i], feat_sizes[i])\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_bbox_heads/test_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.roi_heads.bbox_heads import (BBoxHead, Shared2FCBBoxHead,\n                                               Shared4Conv1FCBBoxHead)\nfrom mmdet.models.task_modules.samplers import SamplingResult\n\n\nclass TestBboxHead(TestCase):\n\n    def test_init(self):\n        # Shared2FCBBoxHead\n        bbox_head = Shared2FCBBoxHead(\n            in_channels=1, fc_out_channels=1, num_classes=4)\n        self.assertTrue(bbox_head.fc_cls)\n        self.assertTrue(bbox_head.fc_reg)\n        self.assertEqual(len(bbox_head.shared_fcs), 2)\n\n        # Shared4Conv1FCBBoxHead\n        bbox_head = Shared4Conv1FCBBoxHead(\n            in_channels=1, fc_out_channels=1, num_classes=4)\n        self.assertTrue(bbox_head.fc_cls)\n        self.assertTrue(bbox_head.fc_reg)\n        self.assertEqual(len(bbox_head.shared_convs), 4)\n        self.assertEqual(len(bbox_head.shared_fcs), 1)\n\n    def test_bbox_head_get_results(self):\n        num_classes = 6\n        bbox_head = BBoxHead(reg_class_agnostic=True, num_classes=num_classes)\n        s = 128\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        num_samples = 2\n        rois = [torch.rand((num_samples, 5))]\n        cls_scores = [torch.rand((num_samples, num_classes + 1))]\n        bbox_preds = [torch.rand((num_samples, 4))]\n\n        # with nms\n        rcnn_test_cfg = ConfigDict(\n            score_thr=0.,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas,\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertLessEqual(len(result_list[0]), num_samples * num_classes)\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(result_list[0].bboxes.shape[1], 4)\n        self.assertEqual(len(result_list[0].scores.shape), 1)\n        self.assertEqual(len(result_list[0].labels.shape), 1)\n\n        # without nms\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), num_samples)\n        self.assertEqual(result_list[0].bboxes.shape, bbox_preds[0].shape)\n        self.assertEqual(result_list[0].scores.shape, cls_scores[0].shape)\n        self.assertIsNone(result_list[0].get('label', None))\n\n        # num_samples is 0\n        num_samples = 0\n        rois = [torch.rand((num_samples, 5))]\n        cls_scores = [torch.rand((num_samples, num_classes + 1))]\n        bbox_preds = [torch.rand((num_samples, 4))]\n\n        # with nms\n        rcnn_test_cfg = ConfigDict(\n            score_thr=0.,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas,\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), 0)\n        self.assertEqual(result_list[0].bboxes.shape[1], 4)\n\n        # without nms\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), 0)\n        self.assertEqual(result_list[0].bboxes.shape, bbox_preds[0].shape)\n        self.assertIsNone(result_list[0].get('label', None))\n\n    def test_bbox_head_refine_bboxes(self):\n        num_classes = 6\n        bbox_head = BBoxHead(reg_class_agnostic=True, num_classes=num_classes)\n        s = 128\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        sampling_results = [SamplingResult.random()]\n        num_samples = 20\n        rois = torch.rand((num_samples, 4))\n        roi_img_ids = torch.zeros(num_samples, 1)\n        rois = torch.cat((roi_img_ids, rois), dim=1)\n        cls_scores = torch.rand((num_samples, num_classes + 1))\n        bbox_preds = torch.rand((num_samples, 4))\n        labels = torch.randint(0, num_classes + 1, (num_samples, )).long()\n        bbox_targets = (labels, None, None, None)\n        bbox_results = dict(\n            rois=rois,\n            bbox_pred=bbox_preds,\n            cls_score=cls_scores,\n            bbox_targets=bbox_targets)\n\n        bbox_list = bbox_head.refine_bboxes(\n            sampling_results=sampling_results,\n            bbox_results=bbox_results,\n            batch_img_metas=img_metas)\n\n        self.assertGreaterEqual(num_samples, len(bbox_list[0]))\n        self.assertIsInstance(bbox_list[0], InstanceData)\n        self.assertEqual(bbox_list[0].bboxes.shape[1], 4)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_bbox_heads/test_double_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads.bbox_heads import DoubleConvFCBBoxHead\n\n\nclass TestDoubleBboxHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward_loss(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        double_bbox_head = DoubleConvFCBBoxHead(\n            num_convs=4,\n            num_fcs=2,\n            in_channels=1,\n            conv_out_channels=4,\n            fc_out_channels=4)\n        double_bbox_head = double_bbox_head.to(device=device)\n\n        num_samples = 4\n        feats = torch.rand((num_samples, 1, 7, 7)).to(device)\n        double_bbox_head(x_cls=feats, x_reg=feats)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_bbox_heads/test_multi_instance_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.roi_heads.bbox_heads import MultiInstanceBBoxHead\n\n\nclass TestMultiInstanceBBoxHead(TestCase):\n\n    def test_init(self):\n        bbox_head = MultiInstanceBBoxHead(\n            num_instance=2,\n            with_refine=True,\n            num_shared_fcs=2,\n            in_channels=1,\n            fc_out_channels=1,\n            num_classes=4)\n        self.assertTrue(bbox_head.shared_fcs_ref)\n        self.assertTrue(bbox_head.fc_reg)\n        self.assertTrue(bbox_head.fc_cls)\n        self.assertEqual(len(bbox_head.shared_fcs), 2)\n        self.assertEqual(len(bbox_head.fc_reg), 2)\n        self.assertEqual(len(bbox_head.fc_cls), 2)\n\n    def test_bbox_head_get_results(self):\n        num_classes = 1\n        num_instance = 2\n        bbox_head = MultiInstanceBBoxHead(\n            num_instance=num_instance,\n            num_shared_fcs=2,\n            reg_class_agnostic=True,\n            num_classes=num_classes)\n        s = 128\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        num_samples = 2\n        rois = [torch.rand((num_samples, 5))]\n        cls_scores = []\n        bbox_preds = []\n        for k in range(num_instance):\n            cls_scores.append(torch.rand((num_samples, num_classes + 1)))\n            bbox_preds.append(torch.rand((num_samples, 4)))\n        cls_scores = [torch.cat(cls_scores, dim=1)]\n        bbox_preds = [torch.cat(bbox_preds, dim=1)]\n\n        # with nms\n        rcnn_test_cfg = ConfigDict(\n            nms=dict(type='nms', iou_threshold=0.5),\n            score_thr=0.01,\n            max_per_img=500)\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas,\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertLessEqual(\n            len(result_list[0]), num_samples * num_instance * num_classes)\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(result_list[0].bboxes.shape[1], 4)\n        self.assertEqual(len(result_list[0].scores.shape), 1)\n        self.assertEqual(len(result_list[0].labels.shape), 1)\n\n        # without nms\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), num_samples * num_instance)\n        self.assertIsNone(result_list[0].get('label', None))\n\n        # num_samples is 0\n        num_samples = 0\n        rois = [torch.rand((num_samples, 5))]\n        cls_scores = []\n        bbox_preds = []\n        for k in range(num_instance):\n            cls_scores.append(torch.rand((num_samples, num_classes + 1)))\n            bbox_preds.append(torch.rand((num_samples, 4)))\n        cls_scores = [torch.cat(cls_scores, dim=1)]\n        bbox_preds = [torch.cat(bbox_preds, dim=1)]\n\n        # with nms\n        rcnn_test_cfg = ConfigDict(\n            score_thr=0.,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas,\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), 0)\n        self.assertEqual(result_list[0].bboxes.shape[1], 4)\n\n        # without nms\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), 0 * num_instance)\n        self.assertIsNone(result_list[0].get('label', None))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_bbox_heads/test_sabl_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.roi_heads.bbox_heads import SABLHead\nfrom mmdet.models.task_modules.samplers import SamplingResult\n\n\nclass TestSABLBboxHead(TestCase):\n\n    def test_init(self):\n        bbox_head = SABLHead(\n            cls_in_channels=1,\n            cls_out_channels=1,\n            reg_in_channels=1,\n            reg_offset_out_channels=1,\n            reg_cls_out_channels=1,\n            num_classes=4)\n        self.assertTrue(bbox_head.fc_cls)\n        self.assertTrue(hasattr(bbox_head, 'reg_cls_fcs'))\n        self.assertTrue(hasattr(bbox_head, 'reg_offset_fcs'))\n        self.assertFalse(hasattr(bbox_head, 'fc_reg'))\n\n    def test_bbox_head_get_results(self):\n        num_classes = 6\n        bbox_head = SABLHead(reg_class_agnostic=True, num_classes=num_classes)\n        s = 128\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n\n        num_samples = 2\n        rois = [torch.rand((num_samples, 5))]\n        cls_scores = [torch.rand((num_samples, num_classes + 1))]\n        bbox_preds = [(torch.rand(\n            (num_samples, 28)), torch.rand((num_samples, 28)))]\n\n        # with nms\n        rcnn_test_cfg = ConfigDict(\n            score_thr=0.,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas,\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertLessEqual(len(result_list[0]), num_samples * num_classes)\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(result_list[0].bboxes.shape[1], 4)\n        self.assertEqual(len(result_list[0].scores.shape), 1)\n        self.assertEqual(len(result_list[0].labels.shape), 1)\n\n        # without nms\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), num_samples)\n        self.assertEqual(result_list[0].scores.shape, cls_scores[0].shape)\n        self.assertIsNone(result_list[0].get('label', None))\n\n        # num_samples is 0\n        num_samples = 0\n        rois = [torch.rand((num_samples, 5))]\n        cls_scores = [torch.rand((num_samples, num_classes + 1))]\n        bbox_preds = [(torch.rand(\n            (num_samples, 28)), torch.rand((num_samples, 28)))]\n\n        # with nms\n        rcnn_test_cfg = ConfigDict(\n            score_thr=0.,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100)\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas,\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), 0)\n        self.assertEqual(result_list[0].bboxes.shape[1], 4)\n\n        # without nms\n        result_list = bbox_head.predict_by_feat(\n            rois=tuple(rois),\n            cls_scores=tuple(cls_scores),\n            bbox_preds=tuple(bbox_preds),\n            batch_img_metas=img_metas)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), 0)\n        self.assertIsNone(result_list[0].get('label', None))\n\n    def test_bbox_head_refine_bboxes(self):\n        num_classes = 8\n        bbox_head = SABLHead(reg_class_agnostic=True, num_classes=num_classes)\n        s = 20\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        sampling_results = [SamplingResult.random()]\n        num_samples = 20\n        rois = torch.rand((num_samples, 4))\n        roi_img_ids = torch.zeros(num_samples, 1)\n        rois = torch.cat((roi_img_ids, rois), dim=1)\n        cls_scores = torch.rand((num_samples, num_classes + 1))\n        cls_preds = torch.rand((num_samples, 28))\n        offset_preds = torch.rand((num_samples, 28))\n        labels = torch.randint(0, num_classes + 1, (num_samples, )).long()\n        bbox_targets = (labels, None, None, None)\n        bbox_results = dict(\n            rois=rois,\n            bbox_pred=(cls_preds, offset_preds),\n            cls_score=cls_scores,\n            bbox_targets=bbox_targets)\n\n        bbox_list = bbox_head.refine_bboxes(\n            sampling_results=sampling_results,\n            bbox_results=bbox_results,\n            batch_img_metas=img_metas)\n\n        self.assertGreaterEqual(num_samples, len(bbox_list[0]))\n        self.assertIsInstance(bbox_list[0], InstanceData)\n        self.assertEqual(bbox_list[0].bboxes.shape[1], 4)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_bbox_heads/test_scnet_bbox_head.py",
    "content": "import unittest\n\nimport torch\n\nfrom mmdet.models.roi_heads.bbox_heads import SCNetBBoxHead\n\n\nclass TestSCNetBBoxHead(unittest.TestCase):\n\n    def test_forward(self):\n        x = torch.rand((2, 1, 16, 16))\n        bbox_head = SCNetBBoxHead(\n            num_shared_fcs=2,\n            in_channels=1,\n            roi_feat_size=16,\n            conv_out_channels=1,\n            fc_out_channels=256,\n        )\n        results = bbox_head(x, return_shared_feat=False)\n        self.assertEqual(len(results), 2)\n        results = bbox_head(x, return_shared_feat=True)\n        self.assertEqual(len(results), 3)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_cascade_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads import StandardRoIHead  # noqa\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\n\n\nclass TestCascadeRoIHead(TestCase):\n\n    @parameterized.expand(\n        ['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'])\n    def test_init(self, cfg_file):\n        \"\"\"Test init standard RoI head.\"\"\"\n        # Normal Cascade Mask R-CNN RoI head\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        assert roi_head.with_bbox\n        assert roi_head.with_mask\n\n    @parameterized.expand(\n        ['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'])\n    def test_cascade_roi_head_loss(self, cfg_file):\n        \"\"\"Tests standard roi head loss when truth is empty and non-empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then both cls, box, and mask loss\n        # should be nonzero for random inputs\n        img_shape_list = [(3, s, s) for _ in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box and mask loss.\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss_cls' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n            elif 'loss_bbox' in name or 'loss_mask' in name:\n                self.assertEqual(value.sum(), 0)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_dynamic_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestDynamicRoIHead(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n        self.roi_head_cfg = get_roi_head_cfg(\n            'dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py')\n\n    def test_init(self):\n        roi_head = MODELS.build(self.roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_dynamic_roi_head_loss(self, device):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if not torch.cuda.is_available() and device == 'cuda':\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.to(device=device)\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device=device))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        loss_cls = out['loss_cls']\n        loss_bbox = out['loss_bbox']\n        self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')\n        self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')\n\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        empty_cls_loss = out['loss_cls']\n        empty_bbox_loss = out['loss_bbox']\n        self.assertGreater(empty_cls_loss.sum(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_bbox_loss.sum(), 0,\n            'there should be no box loss when there are no true boxes')\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_grid_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestGridRoIHead(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n        self.roi_head_cfg = get_roi_head_cfg(\n            'grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py')\n\n    def test_init(self):\n        roi_head = MODELS.build(self.roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_grid_roi_head_loss(self, device):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.to(device=device)\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device=device))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        loss_cls = out['loss_cls']\n        loss_grid = out['loss_grid']\n        self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')\n        self.assertGreater(loss_grid.sum(), 0, 'grid loss should be non-zero')\n\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        empty_cls_loss = out['loss_cls']\n        self.assertGreater(empty_cls_loss.sum(), 0,\n                           'cls loss should be non-zero')\n        self.assertNotIn(\n            'loss_grid', out,\n            'grid loss should be passed when there are no true boxes')\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_grid_roi_head_predict(self, device):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.to(device=device)\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device=device))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        roi_head.predict(feats, proposals_list, batch_data_samples)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_grid_roi_head_forward(self, device):\n        \"\"\"Tests trident roi head forward.\"\"\"\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.to(device=device)\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device=device))\n\n        image_shapes = [(3, s, s)]\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        roi_head.forward(feats, proposals_list)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_htc_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads import HybridTaskCascadeRoIHead  # noqa\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\n\n\nclass TestHTCRoIHead(TestCase):\n\n    @parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])\n    def test_init(self, cfg_file):\n        \"\"\"Test init htc RoI head.\"\"\"\n        # Normal HTC RoI head\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        assert roi_head.with_bbox\n        assert roi_head.with_mask\n        assert roi_head.with_semantic\n\n    @parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])\n    def test_htc_roi_head_loss(self, cfg_file):\n        \"\"\"Tests htc roi head loss when truth is empty and non-empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then both cls, box, and mask loss\n        # should be nonzero for random inputs\n        img_shape_list = [(3, s, s) for _ in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            with_semantic=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box and mask loss.\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            with_semantic=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss_cls' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n            elif 'loss_bbox' in name or 'loss_mask' in name:\n                self.assertEqual(value.sum(), 0)\n\n    @parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])\n    def test_htc_roi_head_predict(self, cfg_file):\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        img_shape_list = [(3, s, s) for _ in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        results = roi_head.predict(\n            feats, proposal_list, batch_data_samples, rescale=True)\n        self.assertEqual(results[0].masks.shape[-2:], (s, s))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_coarse_mask_head.py",
    "content": "import unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads.mask_heads import CoarseMaskHead\n\n\nclass TestCoarseMaskHead(unittest.TestCase):\n\n    def test_init(self):\n        with self.assertRaises(AssertionError):\n            CoarseMaskHead(num_fcs=0)\n\n        with self.assertRaises(AssertionError):\n            CoarseMaskHead(downsample_factor=0.5)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        x = torch.rand((1, 32, 7, 7)).to(device)\n        mask_head = CoarseMaskHead(\n            downsample_factor=2,\n            in_channels=32,\n            conv_out_channels=32,\n            roi_feat_size=7).to(device)\n        mask_head.init_weights()\n        res = mask_head(x)\n        self.assertEqual(res.shape[-2:], (3, 3))\n\n        mask_head = CoarseMaskHead(\n            downsample_factor=1,\n            in_channels=32,\n            conv_out_channels=32,\n            roi_feat_size=7).to(device)\n        mask_head.init_weights()\n        res = mask_head(x)\n        self.assertEqual(res.shape[-2:], (7, 7))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_fcn_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads.mask_heads import FCNMaskHead\n\n\nclass TestFCNMaskHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_get_seg_masks(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n        num_classes = 6\n        mask_head = FCNMaskHead(\n            num_convs=1,\n            in_channels=1,\n            conv_out_channels=1,\n            num_classes=num_classes)\n        rcnn_test_cfg = ConfigDict(\n            score_thr=0.05,\n            nms=dict(type='nms', iou_threshold=0.5),\n            max_per_img=100,\n            mask_thr_binary=0.5)\n        s = 128\n        img_metas = {\n            'img_shape': (s, s, 3),\n            'scale_factor': (1, 1),\n            'ori_shape': (s, s, 3)\n        }\n        result = InstanceData(metainfo=img_metas)\n\n        num_samples = 2\n        mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]\n        result.bboxes = torch.rand((num_samples, 4)).to(device)\n        result.labels = torch.randint(\n            num_classes, (num_samples, ), dtype=torch.long).to(device)\n        mask_head.to(device=device)\n        result_list = mask_head.predict_by_feat(\n            mask_preds=tuple(mask_pred),\n            results_list=[result],\n            batch_img_metas=[img_metas],\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), num_samples)\n        self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))\n\n        # test with activate_map, `mask_pred` has been activated before\n        num_samples = 2\n        mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]\n        mask_pred = [m.sigmoid().detach() for m in mask_pred]\n        result.bboxes = torch.rand((num_samples, 4)).to(device)\n        result.labels = torch.randint(\n            num_classes, (num_samples, ), dtype=torch.long).to(device)\n        mask_head.to(device=device)\n        result_list = mask_head.predict_by_feat(\n            mask_preds=tuple(mask_pred),\n            results_list=[result],\n            batch_img_metas=[img_metas],\n            rcnn_test_cfg=rcnn_test_cfg,\n            activate_map=True)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), num_samples)\n        self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))\n\n        # num_samples is 0\n        num_samples = 0\n        result = InstanceData(metainfo=img_metas)\n        mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]\n        result.bboxes = torch.zeros((num_samples, 4)).to(device)\n        result.labels = torch.zeros((num_samples, )).to(device)\n        result_list = mask_head.predict_by_feat(\n            mask_preds=tuple(mask_pred),\n            results_list=[result],\n            batch_img_metas=[img_metas],\n            rcnn_test_cfg=rcnn_test_cfg)\n\n        self.assertIsInstance(result_list[0], InstanceData)\n        self.assertEqual(len(result_list[0]), num_samples)\n        self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_feature_relay_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads.mask_heads import FeatureRelayHead\n\n\nclass TestFeatureRelayHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n        mask_head = FeatureRelayHead(in_channels=10, out_conv_channels=10)\n\n        x = torch.rand((1, 10))\n        results = mask_head(x)\n        self.assertIsInstance(results, Tensor)\n        x = torch.empty((0, 10))\n        results = mask_head(x)\n        self.assertEqual(results, None)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_fused_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads.mask_heads import FusedSemanticHead\n\n\nclass TestFusedSemanticHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward_loss(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        semantic_head = FusedSemanticHead(\n            num_ins=5,\n            fusion_level=1,\n            in_channels=4,\n            conv_out_channels=4,\n            num_classes=6)\n        feats = [\n            torch.rand((1, 4, 32 // 2**(i + 1), 32 // 2**(i + 1)))\n            for i in range(5)\n        ]\n        mask_pred, x = semantic_head(feats)\n        labels = torch.randint(0, 6, (1, 1, 64, 64))\n        loss = semantic_head.loss(mask_pred, labels)\n        self.assertIsInstance(loss, Tensor)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_global_context_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads.mask_heads import GlobalContextHead\n\n\nclass TestGlobalContextHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward_loss(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        head = GlobalContextHead(\n            num_convs=1, in_channels=4, conv_out_channels=4, num_classes=10)\n        feats = [\n            torch.rand((1, 4, 64 // 2**(i + 1), 64 // 2**(i + 1)))\n            for i in range(5)\n        ]\n        mc_pred, x = head(feats)\n\n        labels = [torch.randint(0, 10, (10, ))]\n        loss = head.loss(mc_pred, labels)\n        self.assertIsInstance(loss, Tensor)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_grid_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads.mask_heads import GridHead\nfrom mmdet.models.utils import unpack_gt_instances\nfrom mmdet.testing import (demo_mm_inputs, demo_mm_proposals,\n                           demo_mm_sampling_results)\n\n\nclass TestGridHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_grid_head_loss(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        grid_head = GridHead()\n        grid_head.to(device=device)\n\n        s = 256\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n\n        train_cfg = ConfigDict(dict(pos_radius=1))\n\n        # prepare ground truth\n        (batch_gt_instances, batch_gt_instances_ignore,\n         _) = unpack_gt_instances(batch_data_samples)\n        sampling_results = demo_mm_sampling_results(\n            proposals_list=proposals_list,\n            batch_gt_instances=batch_gt_instances,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        # prepare grid feats\n        pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results])\n        grid_feats = torch.rand((pos_bboxes.size(0), 256, 14, 14)).to(device)\n        sample_idx = torch.arange(0, pos_bboxes.size(0))\n        grid_pred = grid_head(grid_feats)\n\n        grid_head.loss(grid_pred, sample_idx, sampling_results, train_cfg)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_mask_iou_head_predict_by_feat(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        grid_head = GridHead()\n        grid_head.to(device=device)\n\n        s = 128\n        num_samples = 2\n        num_classes = 4\n        img_metas = {\n            'img_shape': (s, s, 3),\n            'scale_factor': (1, 1),\n            'ori_shape': (s, s, 3)\n        }\n        results = InstanceData(metainfo=img_metas)\n        results.bboxes = torch.rand((num_samples, 4)).to(device)\n        results.scores = torch.rand((num_samples, )).to(device)\n        results.labels = torch.randint(\n            num_classes, (num_samples, ), dtype=torch.long).to(device)\n\n        grid_feats = torch.rand((num_samples, 256, 14, 14)).to(device)\n        grid_preds = grid_head(grid_feats)\n        grid_head.predict_by_feat(\n            grid_preds=grid_preds,\n            results_list=[results],\n            batch_img_metas=[img_metas])\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_htc_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads.mask_heads import HTCMaskHead\n\n\nclass TestHTCMaskHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n        num_classes = 6\n        mask_head = HTCMaskHead(\n            with_conv_res=True,\n            num_convs=1,\n            in_channels=1,\n            conv_out_channels=1,\n            num_classes=num_classes)\n\n        x = torch.rand((1, 1, 10, 10))\n        res_feat = torch.rand((1, 1, 10, 10))\n\n        with self.assertRaises(AssertionError):\n            mask_head(x, return_logits=False, return_feat=False)\n\n        results = mask_head(x)\n        self.assertEqual(len(results), 2)\n        results = mask_head(x, res_feat=res_feat)\n        self.assertEqual(len(results), 2)\n\n        results = mask_head(x, return_logits=False)\n        self.assertIsInstance(results, Tensor)\n        results = mask_head(x, return_feat=False)\n        self.assertIsInstance(results, Tensor)\n\n        results = mask_head(x, res_feat=res_feat, return_logits=False)\n        self.assertIsInstance(results, Tensor)\n        results = mask_head(x, res_feat=res_feat, return_feat=False)\n        self.assertIsInstance(results, Tensor)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_maskiou_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads.mask_heads import MaskIoUHead\nfrom mmdet.models.utils import unpack_gt_instances\nfrom mmdet.structures.mask import mask_target\nfrom mmdet.testing import (demo_mm_inputs, demo_mm_proposals,\n                           demo_mm_sampling_results)\n\n\nclass TestMaskIoUHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_mask_iou_head_loss_and_target(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        mask_iou_head = MaskIoUHead(num_classes=4)\n        mask_iou_head.to(device=device)\n\n        s = 256\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        train_cfg = ConfigDict(dict(mask_size=28, mask_thr_binary=0.5))\n\n        # prepare ground truth\n        (batch_gt_instances, batch_gt_instances_ignore,\n         _) = unpack_gt_instances(batch_data_samples)\n        sampling_results = demo_mm_sampling_results(\n            proposals_list=proposals_list,\n            batch_gt_instances=batch_gt_instances,\n            batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n        # prepare mask feats, pred and target\n        pos_proposals = [res.pos_priors for res in sampling_results]\n        pos_assigned_gt_inds = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        gt_masks = [res.masks for res in batch_gt_instances]\n        mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\n                                   gt_masks, train_cfg)\n        mask_feats = torch.rand((mask_targets.size(0), 256, 14, 14)).to(device)\n        mask_preds = torch.rand((mask_targets.size(0), 4, 28, 28)).to(device)\n\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        pos_mask_pred = mask_preds[range(mask_preds.size(0)), pos_labels]\n        mask_iou_pred = mask_iou_head(mask_feats, pos_mask_pred)\n        pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),\n                                          pos_labels]\n\n        mask_iou_head.loss_and_target(pos_mask_iou_pred, pos_mask_pred,\n                                      mask_targets, sampling_results,\n                                      batch_gt_instances, train_cfg)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_mask_iou_head_predict_by_feat(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        mask_iou_head = MaskIoUHead(num_classes=4)\n        mask_iou_head.to(device=device)\n\n        s = 128\n        num_samples = 2\n        num_classes = 4\n        img_metas = {\n            'img_shape': (s, s, 3),\n            'scale_factor': (1, 1),\n            'ori_shape': (s, s, 3)\n        }\n        results = InstanceData(metainfo=img_metas)\n        results.bboxes = torch.rand((num_samples, 4)).to(device)\n        results.scores = torch.rand((num_samples, )).to(device)\n        results.labels = torch.randint(\n            num_classes, (num_samples, ), dtype=torch.long).to(device)\n\n        mask_feats = torch.rand((num_samples, 256, 14, 14)).to(device)\n        mask_preds = torch.rand((num_samples, num_classes, 28, 28)).to(device)\n        mask_iou_preds = mask_iou_head(\n            mask_feats, mask_preds[range(results.labels.size(0)),\n                                   results.labels])\n\n        mask_iou_head.predict_by_feat(\n            mask_iou_preds=[mask_iou_preds], results_list=[results])\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_scnet_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads.mask_heads import SCNetMaskHead\n\n\nclass TestSCNetMaskHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n        num_classes = 6\n        mask_head = SCNetMaskHead(\n            conv_to_res=True,\n            num_convs=1,\n            in_channels=1,\n            conv_out_channels=1,\n            num_classes=num_classes)\n\n        x = torch.rand((1, 1, 10, 10))\n        results = mask_head(x)\n        self.assertIsInstance(results, Tensor)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_heads/test_scnet_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\nfrom torch import Tensor\n\nfrom mmdet.models.roi_heads.mask_heads import SCNetSemanticHead\n\n\nclass TestSCNetSemanticHead(TestCase):\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_forward_loss(self, device):\n        if device == 'cuda':\n            if not torch.cuda.is_available():\n                return unittest.skip('test requires GPU and torch+cuda')\n\n        semantic_head = SCNetSemanticHead(\n            num_ins=5,\n            fusion_level=1,\n            in_channels=4,\n            conv_out_channels=4,\n            num_classes=6)\n        feats = [\n            torch.rand((1, 4, 32 // 2**(i + 1), 32 // 2**(i + 1)))\n            for i in range(5)\n        ]\n        mask_pred, x = semantic_head(feats)\n        labels = torch.randint(0, 6, (1, 1, 64, 64))\n        loss = semantic_head.loss(mask_pred, labels)\n        self.assertIsInstance(loss, Tensor)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_mask_scoring_roI_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestMaskScoringRoiHead(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n        self.roi_head_cfg = get_roi_head_cfg(\n            'ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py')\n\n    def test_init(self):\n        roi_head = MODELS.build(self.roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n        self.assertTrue(roi_head.with_mask)\n        self.assertTrue(roi_head.mask_iou_head)\n\n    def test_mask_scoring_roi_head_loss(self):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.cuda()\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        loss_cls = out['loss_cls']\n        loss_bbox = out['loss_bbox']\n        loss_mask = out['loss_mask']\n        self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')\n        self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')\n        self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')\n\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        empty_cls_loss = out['loss_cls']\n        empty_bbox_loss = out['loss_bbox']\n        empty_mask_loss = out['loss_mask']\n        self.assertGreater(empty_cls_loss.sum(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_bbox_loss.sum(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_mask_loss.sum(), 0,\n            'there should be no mask loss when there are no true boxes')\n\n    def test_mask_scoring_roi_head_predict(self):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.cuda()\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n        roi_head.predict(feats, proposals_list, batch_data_samples)\n\n    def test_mask_scoring_roi_head_forward(self):\n        \"\"\"Tests trident roi head forward.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.cuda()\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n\n        image_shapes = [(3, s, s)]\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n        roi_head.forward(feats, proposals_list)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_multi_instance_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import Config\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\ndef _fake_roi_head():\n    \"\"\"Set a fake roi head config.\"\"\"\n\n    roi_head = Config(\n        dict(\n            type='MultiInstanceRoIHead',\n            bbox_roi_extractor=dict(\n                type='SingleRoIExtractor',\n                roi_layer=dict(\n                    type='RoIAlign',\n                    output_size=7,\n                    sampling_ratio=-1,\n                    aligned=True,\n                    use_torchvision=True),\n                out_channels=256,\n                featmap_strides=[4, 8, 16, 32]),\n            bbox_head=dict(\n                type='MultiInstanceBBoxHead',\n                with_refine=False,\n                num_shared_fcs=2,\n                in_channels=256,\n                fc_out_channels=1024,\n                roi_feat_size=7,\n                num_classes=1,\n                bbox_coder=dict(\n                    type='DeltaXYWHBBoxCoder',\n                    target_means=[0., 0., 0., 0.],\n                    target_stds=[0.1, 0.1, 0.2, 0.2]),\n                reg_class_agnostic=False,\n                loss_cls=dict(\n                    type='CrossEntropyLoss',\n                    loss_weight=1.0,\n                    use_sigmoid=False,\n                    reduction='none'),\n                loss_bbox=dict(\n                    type='SmoothL1Loss', loss_weight=1.0, reduction='none')),\n            train_cfg=dict(\n                assigner=dict(\n                    type='MultiInstanceAssigner',\n                    pos_iou_thr=0.5,\n                    neg_iou_thr=0.5,\n                    min_pos_iou=0.3,\n                    match_low_quality=False,\n                    ignore_iof_thr=-1),\n                sampler=dict(\n                    type='MultiInsRandomSampler',\n                    num=512,\n                    pos_fraction=0.5,\n                    neg_pos_ub=-1,\n                    add_gt_as_proposals=False),\n                pos_weight=-1,\n                debug=False),\n            test_cfg=dict(\n                nms=dict(iou_threshold=0.5), score_thr=0.01, max_per_img=500)))\n\n    return roi_head\n\n\nclass TestMultiInstanceRoIHead(TestCase):\n\n    def test_init(self):\n        \"\"\"Test init multi instance RoI head.\"\"\"\n        roi_head_cfg = _fake_roi_head()\n        roi_head = MODELS.build(roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n\n    def test_standard_roi_head_loss(self):\n        \"\"\"Tests multi instance roi head loss when truth is empty and non-\n        empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        roi_head_cfg = _fake_roi_head()\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then emd loss should be nonzero for\n        # random inputs\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=False,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        loss = out['loss_rcnn_emd']\n        self.assertGreater(loss.sum(), 0, 'loss should be non-zero')\n\n        # When there is no truth, the emd loss should be zero.\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        empty_loss = out['loss_rcnn_emd']\n        self.assertEqual(\n            empty_loss.sum(), 0,\n            'there should be no emd loss when there are no true boxes')\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_pisa_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestPISARoIHead(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n        self.roi_head_cfg = get_roi_head_cfg(\n            'pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py')\n\n    def test_init(self):\n        roi_head = MODELS.build(self.roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n\n    @parameterized.expand(['cpu', 'cuda'])\n    def test_pisa_roi_head(self, device):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if not torch.cuda.is_available() and device == 'cuda':\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        roi_head = MODELS.build(self.roi_head_cfg)\n        roi_head = roi_head.to(device=device)\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device=device))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        loss_cls = out['loss_cls']\n        loss_bbox = out['loss_bbox']\n        self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')\n        self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')\n\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device=device)['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device=device)\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        empty_cls_loss = out['loss_cls']\n        empty_bbox_loss = out['loss_bbox']\n        self.assertGreater(empty_cls_loss.sum(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_bbox_loss.sum(), 0,\n            'there should be no box loss when there are no true boxes')\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_point_rend_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads import PointRendRoIHead  # noqa\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\n\n\nclass TestHTCRoIHead(TestCase):\n\n    @parameterized.expand(\n        ['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])\n    def test_init(self, cfg_file):\n        \"\"\"Test init Point rend RoI head.\"\"\"\n        # Normal HTC RoI head\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        assert roi_head.with_bbox\n        assert roi_head.with_mask\n\n    @parameterized.expand(\n        ['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])\n    def test_point_rend_roi_head_loss(self, cfg_file):\n        \"\"\"Tests htc roi head loss when truth is empty and non-empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then both cls, box, and mask loss\n        # should be nonzero for random inputs\n        img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n\n        # Positive rois must not be empty\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        with self.assertRaises(AssertionError):\n            out = roi_head.loss(feats, proposal_list, batch_data_samples)\n\n    @parameterized.expand(\n        ['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])\n    def test_point_rend_roi_head_predict(self, cfg_file):\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        results = roi_head.predict(\n            feats, proposal_list, batch_data_samples, rescale=True)\n        self.assertEqual(results[0].masks.shape[-2:], (s, s))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_roi_extractors/test_generic_roi_extractor.py",
    "content": "import unittest\n\nimport torch\n\nfrom mmdet.models.roi_heads.roi_extractors import GenericRoIExtractor\n\n\nclass TestGenericRoIExtractor(unittest.TestCase):\n\n    def test_init(self):\n        with self.assertRaises(AssertionError):\n            GenericRoIExtractor(\n                aggregation='other',\n                roi_layer=dict(\n                    type='RoIAlign', output_size=7, sampling_ratio=2),\n                out_channels=16,\n                featmap_strides=[4, 8, 16, 32])\n\n        roi_extractor = GenericRoIExtractor(\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n            out_channels=16,\n            featmap_strides=[4, 8, 16, 32])\n        self.assertFalse(roi_extractor.with_pre)\n        self.assertFalse(roi_extractor.with_post)\n\n    def test_forward(self):\n        # test with pre/post\n        cfg = dict(\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n            out_channels=16,\n            featmap_strides=[4, 8, 16, 32],\n            pre_cfg=dict(\n                type='ConvModule',\n                in_channels=16,\n                out_channels=16,\n                kernel_size=5,\n                padding=2,\n                inplace=False),\n            post_cfg=dict(\n                type='ConvModule',\n                in_channels=16,\n                out_channels=16,\n                kernel_size=5,\n                padding=2,\n                inplace=False))\n        roi_extractor = GenericRoIExtractor(**cfg)\n\n        # empty rois\n        feats = (\n            torch.rand((1, 16, 200, 336)),\n            torch.rand((1, 16, 100, 168)),\n        )\n        rois = torch.empty((0, 5), dtype=torch.float32)\n        res = roi_extractor(feats, rois)\n        self.assertEqual(len(res), 0)\n\n        # single scale feature\n        rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])\n        feats = (torch.rand((1, 16, 200, 336)), )\n        res = roi_extractor(feats, rois)\n        self.assertEqual(res.shape, (1, 16, 7, 7))\n\n        # multi-scale features\n        feats = (\n            torch.rand((1, 16, 200, 336)),\n            torch.rand((1, 16, 100, 168)),\n            torch.rand((1, 16, 50, 84)),\n            torch.rand((1, 16, 25, 42)),\n        )\n        res = roi_extractor(feats, rois)\n        self.assertEqual(res.shape, (1, 16, 7, 7))\n\n        # test w.o. pre/post concat\n        cfg = dict(\n            aggregation='concat',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n            out_channels=16 * 4,\n            featmap_strides=[4, 8, 16, 32])\n\n        roi_extractor = GenericRoIExtractor(**cfg)\n        res = roi_extractor(feats, rois)\n        self.assertEqual(res.shape, (1, 64, 7, 7))\n\n        # test concat channels number\n        cfg = dict(\n            aggregation='concat',\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n            out_channels=256 * 5,  # 256*5 != 256*4\n            featmap_strides=[4, 8, 16, 32])\n\n        roi_extractor = GenericRoIExtractor(**cfg)\n\n        feats = (\n            torch.rand((1, 256, 200, 336)),\n            torch.rand((1, 256, 100, 168)),\n            torch.rand((1, 256, 50, 84)),\n            torch.rand((1, 256, 25, 42)),\n        )\n        # out_channels does not sum of feat channels\n        with self.assertRaises(AssertionError):\n            roi_extractor(feats, rois)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_roi_extractors/test_single_level_roi_extractor.py",
    "content": "import unittest\n\nimport torch\n\nfrom mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor\n\n\nclass TestSingleRoIExtractor(unittest.TestCase):\n\n    def test_forward(self):\n        cfg = dict(\n            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),\n            out_channels=16,\n            featmap_strides=[4, 8, 16, 32])\n        roi_extractor = SingleRoIExtractor(**cfg)\n\n        # empty rois\n        feats = (torch.rand((1, 16, 200, 336)), )\n        rois = torch.empty((0, 5), dtype=torch.float32)\n        res = roi_extractor(feats, rois)\n        self.assertEqual(len(res), 0)\n\n        # single scale feature\n        rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])\n        res = roi_extractor(feats, rois)\n        self.assertEqual(res.shape, (1, 16, 7, 7))\n\n        # multi-scale features\n        feats = (\n            torch.rand((1, 16, 200, 336)),\n            torch.rand((1, 16, 100, 168)),\n            torch.rand((1, 16, 50, 84)),\n            torch.rand((1, 16, 25, 42)),\n        )\n        res = roi_extractor(feats, rois)\n        self.assertEqual(res.shape, (1, 16, 7, 7))\n\n        res = roi_extractor(feats, rois, roi_scale_factor=2.0)\n        self.assertEqual(res.shape, (1, 16, 7, 7))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_scnet_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads import SCNetRoIHead  # noqa\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\n\n\nclass TestSCNetRoIHead(TestCase):\n\n    @parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])\n    def test_init(self, cfg_file):\n        \"\"\"Test init scnet RoI head.\"\"\"\n        # Normal Cascade Mask R-CNN RoI head\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        assert roi_head.with_bbox\n        assert roi_head.with_mask\n        assert roi_head.with_semantic\n        assert roi_head.with_feat_relay\n        assert roi_head.with_glbctx\n\n    @parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])\n    def test_scnet_roi_head_loss(self, cfg_file):\n        \"\"\"Tests htc roi head loss when truth is empty and non-empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then both cls, box, and mask loss\n        # should be nonzero for random inputs\n        img_shape_list = [(3, s, s) for _ in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            with_semantic=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box and mask loss.\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            with_semantic=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss_cls' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n            elif 'loss_bbox' in name or 'loss_mask' in name:\n                self.assertEqual(value.sum(), 0)\n\n    @parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])\n    def test_scnet_roi_head_predict(self, cfg_file):\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 256, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        img_shape_list = [(3, s, s) for _ in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        results = roi_head.predict(\n            feats, proposal_list, batch_data_samples, rescale=True)\n        self.assertEqual(results[0].masks.shape[-2:], (s, s))\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_sparse_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nimport torch.nn as nn\nfrom parameterized import parameterized\n\nfrom mmdet.models.roi_heads import StandardRoIHead  # noqa\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\n\n\nclass TestCascadeRoIHead(TestCase):\n\n    @parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])\n    def test_init(self, cfg_file):\n        \"\"\"Test init standard RoI head.\"\"\"\n        # Normal Cascade Mask R-CNN RoI head\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head.init_weights()\n        assert roi_head.with_bbox\n        assert roi_head.with_mask\n\n    @parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])\n    def test_cascade_roi_head_loss(self, cfg_file):\n        \"\"\"Tests standard roi head loss when truth is empty and non-empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        img_metas = [{\n            'img_shape': (s, s, 3),\n            'scale_factor': 1,\n        }]\n        roi_head_cfg = get_roi_head_cfg(cfg_file)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 1, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then both cls, box, and mask loss\n        # should be nonzero for random inputs\n        img_shape_list = [(3, s, s) for _ in img_metas]\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        # add import elements into proposal\n        init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()\n        for proposal in proposal_list:\n            proposal.features = init_proposal_features\n            proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,\n                                                       s]]).repeat(100, 1)\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box and mask loss.\n        proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')\n        # add import elements into proposal\n        init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()\n        for proposal in proposal_list:\n            proposal.features = init_proposal_features\n            proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,\n                                                       s]]).repeat(100, 1)\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=[(3, s, s)],\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        out = roi_head.loss(feats, proposal_list, batch_data_samples)\n        for name, value in out.items():\n            if 'loss_cls' in name:\n                self.assertGreaterEqual(\n                    value.sum(), 0, msg='loss should be non-zero')\n            elif 'loss_bbox' in name or 'loss_mask' in name:\n                self.assertEqual(value.sum(), 0)\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_standard_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import Config\nfrom parameterized import parameterized\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals\nfrom mmdet.utils import register_all_modules\n\nregister_all_modules()\n\n\ndef _fake_roi_head(with_shared_head=False):\n    \"\"\"Set a fake roi head config.\"\"\"\n    if not with_shared_head:\n        roi_head = Config(\n            dict(\n                type='StandardRoIHead',\n                bbox_roi_extractor=dict(\n                    type='SingleRoIExtractor',\n                    roi_layer=dict(\n                        type='RoIAlign', output_size=7, sampling_ratio=0),\n                    out_channels=1,\n                    featmap_strides=[4, 8, 16, 32]),\n                bbox_head=dict(\n                    type='Shared2FCBBoxHead',\n                    in_channels=1,\n                    fc_out_channels=1,\n                    num_classes=4),\n                mask_roi_extractor=dict(\n                    type='SingleRoIExtractor',\n                    roi_layer=dict(\n                        type='RoIAlign', output_size=14, sampling_ratio=0),\n                    out_channels=1,\n                    featmap_strides=[4, 8, 16, 32]),\n                mask_head=dict(\n                    type='FCNMaskHead',\n                    num_convs=1,\n                    in_channels=1,\n                    conv_out_channels=1,\n                    num_classes=4),\n                train_cfg=dict(\n                    assigner=dict(\n                        type='MaxIoUAssigner',\n                        pos_iou_thr=0.5,\n                        neg_iou_thr=0.5,\n                        min_pos_iou=0.5,\n                        match_low_quality=True,\n                        ignore_iof_thr=-1),\n                    sampler=dict(\n                        type='RandomSampler',\n                        num=512,\n                        pos_fraction=0.25,\n                        neg_pos_ub=-1,\n                        add_gt_as_proposals=True),\n                    mask_size=28,\n                    pos_weight=-1,\n                    debug=False),\n                test_cfg=dict(\n                    score_thr=0.05,\n                    nms=dict(type='nms', iou_threshold=0.5),\n                    max_per_img=100,\n                    mask_thr_binary=0.5)))\n    else:\n        roi_head = Config(\n            dict(\n                type='StandardRoIHead',\n                shared_head=dict(\n                    type='ResLayer',\n                    depth=50,\n                    stage=3,\n                    stride=2,\n                    dilation=1,\n                    style='caffe',\n                    norm_cfg=dict(type='BN', requires_grad=False),\n                    norm_eval=True),\n                bbox_roi_extractor=dict(\n                    type='SingleRoIExtractor',\n                    roi_layer=dict(\n                        type='RoIAlign', output_size=14, sampling_ratio=0),\n                    out_channels=1,\n                    featmap_strides=[16]),\n                bbox_head=dict(\n                    type='BBoxHead',\n                    with_avg_pool=True,\n                    in_channels=2048,\n                    roi_feat_size=7,\n                    num_classes=4),\n                mask_roi_extractor=None,\n                mask_head=dict(\n                    type='FCNMaskHead',\n                    num_convs=0,\n                    in_channels=2048,\n                    conv_out_channels=1,\n                    num_classes=4),\n                train_cfg=dict(\n                    assigner=dict(\n                        type='MaxIoUAssigner',\n                        pos_iou_thr=0.5,\n                        neg_iou_thr=0.5,\n                        min_pos_iou=0.5,\n                        match_low_quality=False,\n                        ignore_iof_thr=-1),\n                    sampler=dict(\n                        type='RandomSampler',\n                        num=512,\n                        pos_fraction=0.25,\n                        neg_pos_ub=-1,\n                        add_gt_as_proposals=True),\n                    mask_size=14,\n                    pos_weight=-1,\n                    debug=False),\n                test_cfg=dict(\n                    score_thr=0.05,\n                    nms=dict(type='nms', iou_threshold=0.5),\n                    max_per_img=100,\n                    mask_thr_binary=0.5)))\n    return roi_head\n\n\nclass TestStandardRoIHead(TestCase):\n\n    def test_init(self):\n        \"\"\"Test init standard RoI head.\"\"\"\n        # Normal Mask R-CNN RoI head\n        roi_head_cfg = _fake_roi_head()\n        roi_head = MODELS.build(roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n        self.assertTrue(roi_head.with_mask)\n\n        # Mask R-CNN RoI head with shared_head\n        roi_head_cfg = _fake_roi_head(with_shared_head=True)\n        roi_head = MODELS.build(roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n        self.assertTrue(roi_head.with_mask)\n        self.assertTrue(roi_head.with_shared_head)\n\n    @parameterized.expand([(False, ), (True, )])\n    def test_standard_roi_head_loss(self, with_shared_head):\n        \"\"\"Tests standard roi head loss when truth is empty and non-empty.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n        s = 256\n        roi_head_cfg = _fake_roi_head(with_shared_head=with_shared_head)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            if not with_shared_head:\n                feats.append(\n                    torch.rand(1, 1, s // (2**(i + 2)),\n                               s // (2**(i + 2))).to(device='cuda'))\n            else:\n                feats.append(\n                    torch.rand(1, 1024, s // (2**(i + 2)),\n                               s // (2**(i + 2))).to(device='cuda'))\n        feats = tuple(feats)\n\n        # When truth is non-empty then both cls, box, and mask loss\n        # should be nonzero for random inputs\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[1],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        loss_cls = out['loss_cls']\n        loss_bbox = out['loss_bbox']\n        loss_mask = out['loss_mask']\n        self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')\n        self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')\n        self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')\n\n        # When there is no truth, the cls loss should be nonzero but\n        # there should be no box and mask loss.\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n        out = roi_head.loss(feats, proposals_list, batch_data_samples)\n        empty_cls_loss = out['loss_cls']\n        empty_bbox_loss = out['loss_bbox']\n        empty_mask_loss = out['loss_mask']\n        self.assertGreater(empty_cls_loss.sum(), 0,\n                           'cls loss should be non-zero')\n        self.assertEqual(\n            empty_bbox_loss.sum(), 0,\n            'there should be no box loss when there are no true boxes')\n        self.assertEqual(\n            empty_mask_loss.sum(), 0,\n            'there should be no mask loss when there are no true boxes')\n"
  },
  {
    "path": "tests/test_models/test_roi_heads/test_trident_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport unittest\nfrom unittest import TestCase\n\nimport torch\n\nfrom mmdet.registry import MODELS\nfrom mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestTridentRoIHead(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n        self.roi_head_cfg = get_roi_head_cfg(\n            'tridentnet/tridentnet_r50-caffe_1x_coco.py')\n\n    def test_init(self):\n\n        roi_head = MODELS.build(self.roi_head_cfg)\n        self.assertTrue(roi_head.with_bbox)\n        self.assertTrue(roi_head.with_shared_head)\n\n    def test_trident_roi_head_predict(self):\n        \"\"\"Tests trident roi head predict.\"\"\"\n        if not torch.cuda.is_available():\n            # RoI pooling only support in GPU\n            return unittest.skip('test requires GPU and torch+cuda')\n\n        roi_head_cfg = copy.deepcopy(self.roi_head_cfg)\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        s = 256\n        feats = []\n        for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):\n            feats.append(\n                torch.rand(1, 1024, s // (2**(i + 2)),\n                           s // (2**(i + 2))).to(device='cuda'))\n\n        image_shapes = [(3, s, s)]\n        batch_data_samples = demo_mm_inputs(\n            batch_size=1,\n            image_shapes=image_shapes,\n            num_items=[0],\n            num_classes=4,\n            with_mask=True,\n            device='cuda')['data_samples']\n        proposals_list = demo_mm_proposals(\n            image_shapes=image_shapes, num_proposals=100, device='cuda')\n        # When `test_branch_idx == 1`\n        roi_head.predict(feats, proposals_list, batch_data_samples)\n        # When `test_branch_idx == -1`\n        roi_head_cfg.test_branch_idx = -1\n        roi_head = MODELS.build(roi_head_cfg)\n        roi_head = roi_head.cuda()\n        roi_head.predict(feats, proposals_list, batch_data_samples)\n"
  },
  {
    "path": "tests/test_models/test_seg_heads/test_heuristic_fusion_head.py",
    "content": "import unittest\n\nimport torch\nfrom mmengine.config import Config\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.evaluation import INSTANCE_OFFSET\nfrom mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead\n\n\nclass TestHeuristicFusionHead(unittest.TestCase):\n\n    def test_loss(self):\n        head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)\n        result = head.loss()\n        self.assertTrue(not head.with_loss)\n        self.assertDictEqual(result, dict())\n\n    def test_predict(self):\n        test_cfg = Config(dict(mask_overlap=0.5, stuff_area_limit=1))\n        head = HeuristicFusionHead(\n            num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)\n        mask_results = InstanceData()\n        mask_results.bboxes = torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])\n        mask_results.labels = torch.tensor([0, 1])\n        mask_results.scores = torch.tensor([0.8, 0.7])\n        mask_results.masks = torch.tensor([[[1, 0], [0, 0]], [[0, 0],\n                                                              [0, 1]]]).bool()\n\n        seg_preds_list = [\n            torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],\n                          [[0.6, 0.1], [0.1, 0.8]]])\n        ]\n        target_list = [\n            torch.tensor([[0 + 1 * INSTANCE_OFFSET, 2],\n                          [3, 1 + 2 * INSTANCE_OFFSET]])\n        ]\n        results_list = head.predict([mask_results], seg_preds_list)\n        for target, result in zip(target_list, results_list):\n            assert_allclose(result.sem_seg[0], target)\n\n        # test with no thing\n        head = HeuristicFusionHead(\n            num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)\n        mask_results = InstanceData()\n        mask_results.bboxes = torch.zeros((0, 4))\n        mask_results.labels = torch.zeros((0, )).long()\n        mask_results.scores = torch.zeros((0, ))\n        mask_results.masks = torch.zeros((0, 2, 2), dtype=torch.bool)\n        seg_preds_list = [\n            torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],\n                          [[0.6, 0.1], [0.1, 0.8]]])\n        ]\n        target_list = [torch.tensor([[4, 2], [3, 4]])]\n\n        results_list = head.predict([mask_results], seg_preds_list)\n        for target, result in zip(target_list, results_list):\n            assert_allclose(result.sem_seg[0], target)\n"
  },
  {
    "path": "tests/test_models/test_seg_heads/test_maskformer_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport torch\nfrom mmengine.config import Config\n\nfrom mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead\nfrom mmdet.structures import DetDataSample\n\n\nclass TestMaskFormerFusionHead(unittest.TestCase):\n\n    def test_loss(self):\n        head = MaskFormerFusionHead(num_things_classes=2, num_stuff_classes=2)\n        result = head.loss()\n        self.assertTrue(not head.with_loss)\n        self.assertDictEqual(result, dict())\n\n    def test_predict(self):\n        mask_cls_results = torch.rand((2, 10, 5))\n        mask_pred_results = torch.rand((2, 10, 32, 32))\n        batch_data_samples = [\n            DetDataSample(\n                metainfo={\n                    'batch_input_shape': (32, 32),\n                    'img_shape': (32, 30),\n                    'ori_shape': (30, 30)\n                }),\n            DetDataSample(\n                metainfo={\n                    'batch_input_shape': (32, 32),\n                    'img_shape': (32, 30),\n                    'ori_shape': (29, 30)\n                })\n        ]\n\n        # get panoptic and instance segmentation results\n        test_cfg = Config(\n            dict(\n                panoptic_on=True,\n                semantic_on=False,\n                instance_on=True,\n                max_per_image=10,\n                object_mask_thr=0.3,\n                iou_thr=0.3,\n                filter_low_score=False))\n        head = MaskFormerFusionHead(\n            num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)\n        results = head.predict(\n            mask_cls_results,\n            mask_pred_results,\n            batch_data_samples,\n            rescale=False)\n        for i in range(len(results)):\n            self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],\n                             batch_data_samples[i].img_shape)\n            self.assertEqual(results[i]['ins_results'].masks.shape[-2:],\n                             batch_data_samples[i].img_shape)\n\n        results = head.predict(\n            mask_cls_results,\n            mask_pred_results,\n            batch_data_samples,\n            rescale=True)\n        for i in range(len(results)):\n            self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],\n                             batch_data_samples[i].ori_shape)\n            self.assertEqual(results[i]['ins_results'].masks.shape[-2:],\n                             batch_data_samples[i].ori_shape)\n\n        # get empty results\n        test_cfg = Config(\n            dict(\n                panoptic_on=False,\n                semantic_on=False,\n                instance_on=False,\n                max_per_image=10,\n                object_mask_thr=0.3,\n                iou_thr=0.3,\n                filter_low_score=False))\n        head = MaskFormerFusionHead(\n            num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)\n        results = head.predict(\n            mask_cls_results,\n            mask_pred_results,\n            batch_data_samples,\n            rescale=True)\n        for i in range(len(results)):\n            self.assertEqual(results[i], dict())\n\n        # semantic segmentation is not supported\n        test_cfg = Config(\n            dict(\n                panoptic_on=False,\n                semantic_on=True,\n                instance_on=False,\n                max_per_image=10,\n                object_mask_thr=0.3,\n                iou_thr=0.3,\n                filter_low_score=False))\n        head = MaskFormerFusionHead(\n            num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)\n        with self.assertRaises(AssertionError):\n            results = head.predict(\n                mask_cls_results,\n                mask_pred_results,\n                batch_data_samples,\n                rescale=True)\n"
  },
  {
    "path": "tests/test_models/test_seg_heads/test_panoptic_fpn_head.py",
    "content": "import unittest\n\nimport torch\nfrom mmengine.structures import PixelData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.seg_heads import PanopticFPNHead\nfrom mmdet.structures import DetDataSample\n\n\nclass TestPanopticFPNHead(unittest.TestCase):\n\n    def test_init_weights(self):\n        head = PanopticFPNHead(\n            num_things_classes=2,\n            num_stuff_classes=2,\n            in_channels=32,\n            inner_channels=32)\n        head.init_weights()\n        assert_allclose(head.conv_logits.bias.data,\n                        torch.zeros_like(head.conv_logits.bias.data))\n\n    def test_loss(self):\n        head = PanopticFPNHead(\n            num_things_classes=2,\n            num_stuff_classes=2,\n            in_channels=32,\n            inner_channels=32,\n            start_level=0,\n            end_level=1)\n        x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]\n        data_sample1 = DetDataSample()\n        data_sample1.gt_sem_seg = PixelData(\n            sem_seg=torch.randint(0, 4, (1, 7, 8)))\n        data_sample2 = DetDataSample()\n        data_sample2.gt_sem_seg = PixelData(\n            sem_seg=torch.randint(0, 4, (1, 7, 8)))\n        batch_data_samples = [data_sample1, data_sample2]\n        results = head.loss(x, batch_data_samples)\n        self.assertIsInstance(results, dict)\n\n    def test_predict(self):\n        head = PanopticFPNHead(\n            num_things_classes=2,\n            num_stuff_classes=2,\n            in_channels=32,\n            inner_channels=32,\n            start_level=0,\n            end_level=1)\n        x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]\n        img_meta1 = {\n            'batch_input_shape': (16, 16),\n            'img_shape': (14, 14),\n            'ori_shape': (12, 12),\n        }\n        img_meta2 = {\n            'batch_input_shape': (16, 16),\n            'img_shape': (16, 16),\n            'ori_shape': (16, 16),\n        }\n        batch_img_metas = [img_meta1, img_meta2]\n        head.eval()\n        with torch.no_grad():\n            seg_preds = head.predict(x, batch_img_metas, rescale=False)\n            self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))\n            self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))\n\n            seg_preds = head.predict(x, batch_img_metas, rescale=True)\n            self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))\n            self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))\n"
  },
  {
    "path": "tests/test_models/test_task_modules/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_approx_max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner\n\n\nclass TestApproxIoUAssigner(TestCase):\n\n    def test_approx_iou_assigner(self):\n        assigner = ApproxMaxIoUAssigner(\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.5,\n        )\n        bboxes = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData()\n        pred_instances.priors = bboxes\n        pred_instances.approxs = bboxes[:, None, :]\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n\n        expected_gt_inds = torch.LongTensor([1, 0, 2, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_approx_iou_assigner_with_empty_gt(self):\n        \"\"\"Test corner case where an image might have no true detections.\"\"\"\n        assigner = ApproxMaxIoUAssigner(\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.5,\n        )\n        bboxes = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([])\n        gt_labels = torch.LongTensor([])\n\n        pred_instances = InstanceData()\n        pred_instances.priors = bboxes\n        pred_instances.approxs = bboxes[:, None, :]\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_approx_iou_assigner_with_empty_boxes(self):\n        \"\"\"Test corner case where an network might predict no boxes.\"\"\"\n        assigner = ApproxMaxIoUAssigner(\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.5,\n        )\n        bboxes = torch.empty((0, 4))\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData()\n        pred_instances.priors = bboxes\n        pred_instances.approxs = bboxes[:, None, :]\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n\n        self.assertEqual(len(assign_result.gt_inds), 0)\n\n    def test_approx_iou_assigner_with_empty_boxes_and_gt(self):\n        \"\"\"Test corner case where an network might predict no boxes and no\n        gt.\"\"\"\n        assigner = ApproxMaxIoUAssigner(\n            pos_iou_thr=0.5,\n            neg_iou_thr=0.5,\n        )\n        bboxes = torch.empty((0, 4))\n        gt_bboxes = torch.empty((0, 4))\n        gt_labels = torch.LongTensor([])\n\n        pred_instances = InstanceData()\n        pred_instances.priors = bboxes\n        pred_instances.approxs = bboxes[:, None, :]\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n\n        self.assertEqual(len(assign_result.gt_inds), 0)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_atss_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import ATSSAssigner\n\n\nclass TestATSSAssigner(TestCase):\n\n    def test_atss_assigner(self):\n        atss_assigner = ATSSAssigner(topk=9)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        num_level_bboxes = [4]\n\n        assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,\n                                             gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 4)\n        self.assertEqual(len(assign_result.labels), 4)\n\n        expected_gt_inds = torch.LongTensor([1, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_atss_assigner_with_ignore(self):\n        atss_assigner = ATSSAssigner(topk=9)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [30, 32, 40, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n        gt_bboxes_ignore = torch.Tensor([\n            [30, 30, 40, 40],\n        ])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n        num_level_bboxes = [4]\n        assign_result = atss_assigner.assign(\n            pred_instances,\n            num_level_bboxes,\n            gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n\n        expected_gt_inds = torch.LongTensor([1, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_atss_assigner_with_empty_gt(self):\n        \"\"\"Test corner case where an image might have no true detections.\"\"\"\n        atss_assigner = ATSSAssigner(topk=9)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.empty(0, 4)\n        gt_labels = torch.empty(0)\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        num_level_bboxes = [4]\n        assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,\n                                             gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_atss_assigner_with_empty_boxes(self):\n        \"\"\"Test corner case where a network might predict no boxes.\"\"\"\n        atss_assigner = ATSSAssigner(topk=9)\n        priors = torch.empty((0, 4))\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        num_level_bboxes = [0]\n        assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,\n                                             gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n        self.assertTrue(tuple(assign_result.labels.shape) == (0, ))\n\n    def test_atss_assigner_with_empty_boxes_and_ignore(self):\n        \"\"\"Test corner case where a network might predict no boxes and\n        ignore_iof_thr is on.\"\"\"\n        atss_assigner = ATSSAssigner(topk=9)\n        priors = torch.empty((0, 4))\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_bboxes_ignore = torch.Tensor([\n            [30, 30, 40, 40],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n        num_level_bboxes = [0]\n\n        assign_result = atss_assigner.assign(\n            pred_instances,\n            num_level_bboxes,\n            gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n        self.assertTrue(tuple(assign_result.labels.shape) == (0, ))\n\n    def test_atss_assigner_with_empty_boxes_and_gt(self):\n        \"\"\"Test corner case where a network might predict no boxes and no\n        gt.\"\"\"\n        atss_assigner = ATSSAssigner(topk=9)\n        priors = torch.empty((0, 4))\n        gt_bboxes = torch.empty((0, 4))\n        gt_labels = torch.empty(0)\n        num_level_bboxes = [0]\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,\n                                             gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_center_region_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import CenterRegionAssigner\n\n\nclass TestCenterRegionAssigner(TestCase):\n\n    def test_center_region_assigner(self):\n        center_region_assigner = CenterRegionAssigner(\n            pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n\n        assign_result = center_region_assigner.assign(pred_instances,\n                                                      gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 4)\n        self.assertEqual(len(assign_result.labels), 4)\n\n        expected_gt_inds = torch.LongTensor([1, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n        expected_shadowed_labels = torch.LongTensor([[2, 3]])\n        shadowed_labels = assign_result.get_extra_property('shadowed_labels')\n        self.assertTrue(torch.all(shadowed_labels == expected_shadowed_labels))\n\n    def test_center_region_assigner_with_ignore(self):\n        center_region_assigner = CenterRegionAssigner(\n            pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [30, 32, 40, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n        gt_bboxes_ignore = torch.Tensor([\n            [30, 30, 40, 40],\n        ])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n        assign_result = center_region_assigner.assign(\n            pred_instances,\n            gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n\n        expected_gt_inds = torch.LongTensor([1, 0, 0, -1])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_center_region_assigner_with_empty_gt(self):\n        \"\"\"Test corner case where an image might have no true detections.\"\"\"\n        center_region_assigner = CenterRegionAssigner(\n            pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.empty(0, 4)\n        gt_labels = torch.empty(0)\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        assign_result = center_region_assigner.assign(pred_instances,\n                                                      gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_center_region_assigner_with_empty_boxes(self):\n        \"\"\"Test corner case where a network might predict no boxes.\"\"\"\n        center_region_assigner = CenterRegionAssigner(\n            pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)\n        priors = torch.empty((0, 4))\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        assign_result = center_region_assigner.assign(pred_instances,\n                                                      gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n        self.assertTrue(tuple(assign_result.labels.shape) == (0, ))\n\n    def test_center_region_assigner_with_empty_boxes_and_ignore(self):\n        \"\"\"Test corner case where a network might predict no boxes and\n        ignore_iof_thr is on.\"\"\"\n        center_region_assigner = CenterRegionAssigner(\n            pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)\n        priors = torch.empty((0, 4))\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_bboxes_ignore = torch.Tensor([\n            [30, 30, 40, 40],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n\n        assign_result = center_region_assigner.assign(\n            pred_instances,\n            gt_instances,\n            gt_instances_ignore=gt_instances_ignore)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n        self.assertTrue(tuple(assign_result.labels.shape) == (0, ))\n\n    def test_center_region_assigner_with_empty_boxes_and_gt(self):\n        \"\"\"Test corner case where a network might predict no boxes and no\n        gt.\"\"\"\n        center_region_assigner = CenterRegionAssigner(\n            pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)\n        priors = torch.empty((0, 4))\n        gt_bboxes = torch.empty((0, 4))\n        gt_labels = torch.empty(0)\n\n        pred_instances = InstanceData(priors=priors)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        assign_result = center_region_assigner.assign(pred_instances,\n                                                      gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_dynamic_soft_label_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.task_modules.assigners import DynamicSoftLabelAssigner\nfrom mmdet.structures.bbox import HorizontalBoxes\n\n\nclass TestDynamicSoftLabelAssigner(TestCase):\n\n    def test_assign(self):\n        assigner = DynamicSoftLabelAssigner(\n            soft_center_radius=3.0, topk=1, iou_weight=3.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43]]),\n            labels=torch.LongTensor([0]))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n\n        expected_gt_inds = torch.LongTensor([1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_assign_with_no_valid_bboxes(self):\n        assigner = DynamicSoftLabelAssigner(\n            soft_center_radius=3.0, topk=1, iou_weight=3.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]]))\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[0, 0, 1, 1]]), labels=torch.LongTensor([0]))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_assign_with_empty_gt(self):\n        assigner = DynamicSoftLabelAssigner(\n            soft_center_radius=3.0, topk=1, iou_weight=3.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]))\n        gt_instances = InstanceData(\n            bboxes=torch.empty(0, 4), labels=torch.empty(0))\n\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_box_type_input(self):\n        assigner = DynamicSoftLabelAssigner(\n            soft_center_radius=3.0, topk=1, iou_weight=3.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))\n        gt_instances = InstanceData(\n            bboxes=HorizontalBoxes(torch.Tensor([[23, 23, 43, 43]])),\n            labels=torch.LongTensor([0]))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n\n        expected_gt_inds = torch.LongTensor([1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_grid_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.task_modules.assigners import GridAssigner\n\n\nclass TestGridAssigner(TestCase):\n\n    def test_assign(self):\n        assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)\n        pred_instances = InstanceData(\n            priors=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),\n            responsible_flags=torch.BoolTensor([1, 1]))\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43]]),\n            labels=torch.LongTensor([0]))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n        # invalid neg_iou_thr\n        with self.assertRaises(AssertionError):\n            assigner = GridAssigner(\n                pos_iou_thr=0.5, neg_iou_thr=[0.3, 0.1, 0.4])\n            assigner.assign(\n                pred_instances=pred_instances, gt_instances=gt_instances)\n\n        # multi-neg_iou_thr\n        assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=(0.1, 0.3))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([1, -1])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n        # gt_max_assign_all=False\n        assigner = GridAssigner(\n            pos_iou_thr=0.5, neg_iou_thr=0.3, gt_max_assign_all=False)\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n        # large min_pos_iou\n        assigner = GridAssigner(\n            pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=1)\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_assign_with_empty_gt(self):\n        assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)\n        pred_instances = InstanceData(\n            priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]),\n            responsible_flags=torch.BoolTensor([1, 1]))\n        gt_instances = InstanceData(\n            bboxes=torch.empty(0, 4), labels=torch.empty(0))\n\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_assign_with_empty_priors(self):\n        assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)\n        pred_instances = InstanceData(\n            priors=torch.Tensor(torch.empty(0, 4)),\n            responsible_flags=torch.empty(0))\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43]]),\n            labels=torch.LongTensor([0]))\n\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_hungarian_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import HungarianAssigner\n\n\nclass TestHungarianAssigner(TestCase):\n\n    def test_init(self):\n        with self.assertRaises(AssertionError):\n            HungarianAssigner([])\n\n    def test_hungarian_match_assigner(self):\n        assigner = HungarianAssigner([\n            dict(type='ClassificationCost', weight=1.),\n            dict(type='BBoxL1Cost', weight=5.0),\n            dict(type='IoUCost', iou_mode='giou', weight=2.0)\n        ])\n\n        # test no gt bboxes\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.empty((0, 4)).float()\n        gt_instances.labels = torch.empty((0, )).long()\n        pred_instances = InstanceData()\n        pred_instances.scores = torch.rand((10, 81))\n        pred_instances.bboxes = torch.rand((10, 4))\n        img_meta = dict(img_shape=(10, 8))\n\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds == 0))\n        self.assertTrue(torch.all(assign_result.labels == -1))\n\n        # test with gt bboxes\n        gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])\n        gt_instances.labels = torch.LongTensor([1, 20])\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.bboxes.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.bboxes.size(0))\n\n    def test_bbox_match_cost(self):\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])\n        gt_instances.labels = torch.LongTensor([1, 20])\n        pred_instances = InstanceData()\n        pred_instances.scores = torch.rand((10, 81))\n        pred_instances.bboxes = torch.rand((10, 4))\n        img_meta = dict(img_shape=(10, 8))\n\n        # test IoUCost\n        assigner = HungarianAssigner(\n            ConfigDict(dict(type='IoUCost', iou_mode='iou')))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.bboxes.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.bboxes.size(0))\n\n        # test BBoxL1Cost\n        assigner = HungarianAssigner(ConfigDict(dict(type='BBoxL1Cost')))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.bboxes.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.bboxes.size(0))\n\n    def test_cls_match_cost(self):\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])\n        gt_instances.labels = torch.LongTensor([1, 20])\n        pred_instances = InstanceData()\n        pred_instances.scores = torch.rand((10, 81))\n        pred_instances.bboxes = torch.rand((10, 4))\n        img_meta = dict(img_shape=(10, 8))\n\n        # test FocalLossCost\n        assigner = HungarianAssigner(dict(type='FocalLossCost'))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.bboxes.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.bboxes.size(0))\n\n        # test ClassificationCost\n        assigner = HungarianAssigner(dict(type='ClassificationCost'))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.bboxes.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.bboxes.size(0))\n\n    def test_mask_match_cost(self):\n        gt_instances = InstanceData()\n        gt_instances.masks = torch.randint(0, 2, (2, 10, 10)).long()\n        gt_instances.labels = torch.LongTensor([1, 20])\n\n        pred_instances = InstanceData()\n        pred_instances.masks = torch.rand((4, 10, 10))\n        pred_instances.scores = torch.rand((4, 25))\n        img_meta = dict(img_shape=(10, 10))\n\n        # test DiceCost\n        assigner = HungarianAssigner(dict(type='DiceCost'))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.masks.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.masks.size(0))\n\n        # test CrossEntropyLossCost\n        assigner = HungarianAssigner(dict(type='CrossEntropyLossCost'))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.masks.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.masks.size(0))\n\n        # test FocalLossCost\n        assigner = HungarianAssigner(\n            dict(type='FocalLossCost', binary_input=True))\n        assign_result = assigner.assign(\n            pred_instances, gt_instances, img_meta=img_meta)\n        self.assertTrue(torch.all(assign_result.gt_inds > -1))\n        self.assertEqual((assign_result.gt_inds > 0).sum(),\n                         gt_instances.masks.size(0))\n        self.assertEqual((assign_result.labels > -1).sum(),\n                         gt_instances.masks.size(0))\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Tests the Assigner objects.\n\nCommandLine:\n    pytest  tests/test_core/test_bbox/test_assigners/test_max_iou_assigner.py\n    xdoctest  tests/test_core/test_bbox/test_assigners/test_max_iou_assigner.py zero\n\"\"\" # noqa\nimport pytest\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import MaxIoUAssigner\n\n\n@pytest.mark.parametrize('neg_iou_thr', [0.5, (0, 0.5)])\ndef test_max_iou_assigner(neg_iou_thr):\n    self = MaxIoUAssigner(\n        pos_iou_thr=0.5,\n        neg_iou_thr=neg_iou_thr,\n    )\n    priors = torch.FloatTensor([\n        [0, 0, 10, 10],\n        [10, 10, 20, 20],\n        [5, 5, 15, 15],\n        [32, 32, 38, 42],\n    ])\n    gt_bboxes = torch.FloatTensor([\n        [0, 0, 10, 9],\n        [0, 10, 10, 19],\n    ])\n    gt_labels = torch.LongTensor([2, 3])\n\n    pred_instances = InstanceData(priors=priors)\n    gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n\n    assign_result = self.assign(pred_instances, gt_instances)\n    assert len(assign_result.gt_inds) == 4\n    assert len(assign_result.labels) == 4\n\n    expected_gt_inds = torch.LongTensor([1, 0, 2, 0])\n    assert torch.all(assign_result.gt_inds == expected_gt_inds)\n\n\ndef test_max_iou_assigner_with_ignore():\n    self = MaxIoUAssigner(\n        pos_iou_thr=0.5,\n        neg_iou_thr=0.5,\n        ignore_iof_thr=0.5,\n        ignore_wrt_candidates=False,\n    )\n    priors = torch.FloatTensor([\n        [0, 0, 10, 10],\n        [10, 10, 20, 20],\n        [5, 5, 15, 15],\n        [30, 32, 40, 42],\n    ])\n    gt_bboxes = torch.FloatTensor([\n        [0, 0, 10, 9],\n        [0, 10, 10, 19],\n    ])\n    gt_labels = torch.LongTensor([2, 3])\n    gt_bboxes_ignore = torch.Tensor([\n        [30, 30, 40, 40],\n    ])\n\n    pred_instances = InstanceData(priors=priors)\n    gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n    gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n    assign_result = self.assign(\n        pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)\n\n    expected_gt_inds = torch.LongTensor([1, 0, 2, -1])\n    assert torch.all(assign_result.gt_inds == expected_gt_inds)\n\n\ndef test_max_iou_assigner_with_empty_gt():\n    \"\"\"Test corner case where an image might have no true detections.\"\"\"\n    self = MaxIoUAssigner(\n        pos_iou_thr=0.5,\n        neg_iou_thr=0.5,\n    )\n    priors = torch.FloatTensor([\n        [0, 0, 10, 10],\n        [10, 10, 20, 20],\n        [5, 5, 15, 15],\n        [32, 32, 38, 42],\n    ])\n    gt_bboxes = torch.empty(0, 4)\n    gt_labels = torch.empty(0)\n\n    pred_instances = InstanceData(priors=priors)\n    gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n    assign_result = self.assign(pred_instances, gt_instances)\n\n    expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n    assert torch.all(assign_result.gt_inds == expected_gt_inds)\n\n\ndef test_max_iou_assigner_with_empty_priors():\n    \"\"\"Test corner case where a network might predict no boxes.\"\"\"\n    self = MaxIoUAssigner(\n        pos_iou_thr=0.5,\n        neg_iou_thr=0.5,\n    )\n    priors = torch.empty((0, 4))\n    gt_bboxes = torch.FloatTensor([\n        [0, 0, 10, 9],\n        [0, 10, 10, 19],\n    ])\n    gt_labels = torch.LongTensor([2, 3])\n\n    pred_instances = InstanceData(priors=priors)\n    gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n    # Test with gt_labels\n    assign_result = self.assign(pred_instances, gt_instances)\n    assert len(assign_result.gt_inds) == 0\n    assert tuple(assign_result.labels.shape) == (0, )\n\n\ndef test_max_iou_assigner_with_empty_boxes_and_ignore():\n    \"\"\"Test corner case where a network might predict no boxes and\n    ignore_iof_thr is on.\"\"\"\n    self = MaxIoUAssigner(\n        pos_iou_thr=0.5,\n        neg_iou_thr=0.5,\n        ignore_iof_thr=0.5,\n    )\n    priors = torch.empty((0, 4))\n    gt_bboxes = torch.FloatTensor([\n        [0, 0, 10, 9],\n        [0, 10, 10, 19],\n    ])\n    gt_bboxes_ignore = torch.Tensor([\n        [30, 30, 40, 40],\n    ])\n    gt_labels = torch.LongTensor([2, 3])\n\n    pred_instances = InstanceData(priors=priors)\n    gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n    gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n\n    # Test with gt_labels\n    assign_result = self.assign(\n        pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)\n    assert len(assign_result.gt_inds) == 0\n    assert tuple(assign_result.labels.shape) == (0, )\n\n\ndef test_max_iou_assigner_with_empty_priors_and_gt():\n    \"\"\"Test corner case where a network might predict no boxes and no gt.\"\"\"\n    self = MaxIoUAssigner(\n        pos_iou_thr=0.5,\n        neg_iou_thr=0.5,\n    )\n    priors = torch.empty(0, 4)\n    gt_bboxes = torch.empty(0, 4)\n    gt_labels = torch.empty(0)\n\n    pred_instances = InstanceData(priors=priors)\n    gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n    assign_result = self.assign(pred_instances, gt_instances)\n    assert len(assign_result.gt_inds) == 0\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_point_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport unittest\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.task_modules.assigners import PointAssigner\n\n\nclass TestPointAssigner(unittest.TestCase):\n\n    def test_point_assigner(self):\n        assigner = PointAssigner()\n        pred_instances = InstanceData()\n        pred_instances.priors = torch.FloatTensor([\n            # [x, y, stride]\n            [0, 0, 1],\n            [10, 10, 1],\n            [5, 5, 1],\n            [32, 32, 1],\n        ])\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_instances.labels = torch.LongTensor([0, 1])\n        assign_result = assigner.assign(pred_instances, gt_instances)\n        expected_gt_inds = torch.LongTensor([1, 2, 1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_point_assigner_with_empty_gt(self):\n        \"\"\"Test corner case where an image might have no true detections.\"\"\"\n        assigner = PointAssigner()\n        pred_instances = InstanceData()\n        pred_instances.priors = torch.FloatTensor([\n            # [x, y, stride]\n            [0, 0, 1],\n            [10, 10, 1],\n            [5, 5, 1],\n            [32, 32, 1],\n        ])\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.FloatTensor([])\n        gt_instances.labels = torch.LongTensor([])\n        assign_result = assigner.assign(pred_instances, gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_point_assigner_with_empty_boxes_and_gt(self):\n        \"\"\"Test corner case where an image might predict no points and no\n        gt.\"\"\"\n        assigner = PointAssigner()\n        pred_instances = InstanceData()\n        pred_instances.priors = torch.FloatTensor([])\n        gt_instances = InstanceData()\n        gt_instances.bboxes = torch.FloatTensor([])\n        gt_instances.labels = torch.LongTensor([])\n        assign_result = assigner.assign(pred_instances, gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_region_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.config import ConfigDict\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import RegionAssigner\n\n\nclass TestRegionAssigner(TestCase):\n\n    def setUp(self):\n        self.img_meta = ConfigDict(dict(img_shape=(256, 256)))\n        self.featmap_sizes = [(64, 64)]\n        self.anchor_scale = 10\n        self.anchor_strides = [1]\n\n    def test_region_assigner(self):\n        region_assigner = RegionAssigner(center_ratio=0.5, ignore_ratio=0.8)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        valid_flags = torch.BoolTensor([1, 1, 1, 1])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        num_level_anchors = [4]\n\n        assign_result = region_assigner.assign(\n            pred_instances, gt_instances, self.img_meta, self.featmap_sizes,\n            num_level_anchors, self.anchor_scale, self.anchor_strides)\n        self.assertEqual(len(assign_result.gt_inds), 4)\n        self.assertEqual(len(assign_result.labels), 4)\n\n        expected_gt_inds = torch.LongTensor([1, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_region_assigner_with_ignore(self):\n        region_assigner = RegionAssigner(center_ratio=0.5)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [30, 32, 40, 42],\n        ])\n        valid_flags = torch.BoolTensor([1, 1, 1, 1])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n        gt_bboxes_ignore = torch.Tensor([\n            [30, 30, 40, 40],\n        ])\n\n        pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)\n        num_level_anchors = [4]\n        with self.assertRaises(NotImplementedError):\n            region_assigner.assign(\n                pred_instances,\n                gt_instances,\n                self.img_meta,\n                self.featmap_sizes,\n                num_level_anchors,\n                self.anchor_scale,\n                self.anchor_strides,\n                gt_instances_ignore=gt_instances_ignore)\n\n    def test_region_assigner_with_empty_gt(self):\n        \"\"\"Test corner case where an image might have no true detections.\"\"\"\n        region_assigner = RegionAssigner(center_ratio=0.5)\n        priors = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        valid_flags = torch.BoolTensor([1, 1, 1, 1])\n        gt_bboxes = torch.empty(0, 4)\n        gt_labels = torch.empty(0)\n\n        pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        num_level_anchors = [4]\n        assign_result = region_assigner.assign(\n            pred_instances, gt_instances, self.img_meta, self.featmap_sizes,\n            num_level_anchors, self.anchor_scale, self.anchor_strides)\n\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n\n    def test_atss_assigner_with_empty_boxes(self):\n        \"\"\"Test corner case where a network might predict no boxes.\"\"\"\n        region_assigner = RegionAssigner(center_ratio=0.5)\n        priors = torch.empty((0, 4))\n        valid_flags = torch.BoolTensor([])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n\n        pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        num_level_anchors = [0]\n        assign_result = region_assigner.assign(\n            pred_instances, gt_instances, self.img_meta, self.featmap_sizes,\n            num_level_anchors, self.anchor_scale, self.anchor_strides)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n        self.assertTrue(tuple(assign_result.labels.shape) == (0, ))\n\n    def test_atss_assigner_with_empty_boxes_and_gt(self):\n        \"\"\"Test corner case where a network might predict no boxes and no\n        gt.\"\"\"\n        region_assigner = RegionAssigner(center_ratio=0.5)\n        priors = torch.empty((0, 4))\n        valid_flags = torch.BoolTensor([])\n        gt_bboxes = torch.empty((0, 4))\n        gt_labels = torch.empty(0)\n        num_level_anchors = [0]\n\n        pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)\n        gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)\n        assign_result = region_assigner.assign(\n            pred_instances, gt_instances, self.img_meta, self.featmap_sizes,\n            num_level_anchors, self.anchor_scale, self.anchor_strides)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_simota_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.task_modules.assigners import SimOTAAssigner\n\n\nclass TestSimOTAAssigner(TestCase):\n\n    def test_assign(self):\n        assigner = SimOTAAssigner(\n            center_radius=2.5,\n            candidate_topk=1,\n            iou_weight=3.0,\n            cls_weight=1.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[23, 23, 43, 43]]),\n            labels=torch.LongTensor([0]))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n\n        expected_gt_inds = torch.LongTensor([1, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_assign_with_no_valid_bboxes(self):\n        assigner = SimOTAAssigner(\n            center_radius=2.5,\n            candidate_topk=1,\n            iou_weight=3.0,\n            cls_weight=1.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]]))\n        gt_instances = InstanceData(\n            bboxes=torch.Tensor([[0, 0, 1, 1]]), labels=torch.LongTensor([0]))\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_assign_with_empty_gt(self):\n        assigner = SimOTAAssigner(\n            center_radius=2.5,\n            candidate_topk=1,\n            iou_weight=3.0,\n            cls_weight=1.0)\n        pred_instances = InstanceData(\n            bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]),\n            scores=torch.FloatTensor([[0.2], [0.8]]),\n            priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]))\n        gt_instances = InstanceData(\n            bboxes=torch.empty(0, 4), labels=torch.empty(0))\n\n        assign_result = assigner.assign(\n            pred_instances=pred_instances, gt_instances=gt_instances)\n        expected_gt_inds = torch.LongTensor([0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_task_aligned_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.task_modules.assigners import TaskAlignedAssigner\n\n\nclass TestTaskAlignedAssigner(TestCase):\n\n    def test_task_aligned_assigner(self):\n\n        with self.assertRaises(AssertionError):\n            TaskAlignedAssigner(topk=0)\n\n        assigner = TaskAlignedAssigner(topk=13)\n        pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],\n                                        [0.4, 0.5]])\n        pred_bbox = torch.FloatTensor([\n            [1, 1, 12, 8],\n            [4, 4, 20, 20],\n            [1, 5, 15, 15],\n            [30, 5, 32, 42],\n        ])\n        anchor = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([0, 1])\n        pred_instances = InstanceData()\n        pred_instances.priors = anchor\n        pred_instances.bboxes = pred_bbox\n        pred_instances.scores = pred_score\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n\n        assign_result = assigner.assign(pred_instances, gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 4)\n        self.assertEqual(len(assign_result.labels), 4)\n\n        # test empty gt\n        gt_bboxes = torch.empty(0, 4)\n        gt_labels = torch.empty(0, 2).long()\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_assigners/test_task_uniform_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine.structures import InstanceData\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.models.task_modules.assigners import UniformAssigner\n\n\nclass TestUniformAssigner(TestCase):\n\n    def test_uniform_assigner(self):\n        assigner = UniformAssigner(0.15, 0.7, 1)\n        pred_bbox = torch.FloatTensor([\n            [1, 1, 12, 8],\n            [4, 4, 20, 20],\n            [1, 5, 15, 15],\n            [30, 5, 32, 42],\n        ])\n        anchor = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n        pred_instances = InstanceData()\n        pred_instances.priors = anchor\n        pred_instances.decoder_priors = pred_bbox\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 4)\n        self.assertEqual(len(assign_result.labels), 4)\n\n        expected_gt_inds = torch.LongTensor([-1, 0, 2, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_uniform_assigner_with_empty_gt(self):\n        \"\"\"Test corner case where an image might have no true detections.\"\"\"\n        assigner = UniformAssigner(0.15, 0.7, 1)\n        pred_bbox = torch.FloatTensor([\n            [1, 1, 12, 8],\n            [4, 4, 20, 20],\n            [1, 5, 15, 15],\n            [30, 5, 32, 42],\n        ])\n        anchor = torch.FloatTensor([\n            [0, 0, 10, 10],\n            [10, 10, 20, 20],\n            [5, 5, 15, 15],\n            [32, 32, 38, 42],\n        ])\n        gt_bboxes = torch.empty(0, 4)\n        gt_labels = torch.empty(0)\n        pred_instances = InstanceData()\n        pred_instances.priors = anchor\n        pred_instances.decoder_priors = pred_bbox\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n\n        expected_gt_inds = torch.LongTensor([0, 0, 0, 0])\n        assert_allclose(assign_result.gt_inds, expected_gt_inds)\n\n    def test_uniform_assigner_with_empty_boxes(self):\n        \"\"\"Test corner case where a network might predict no boxes.\"\"\"\n        assigner = UniformAssigner(0.15, 0.7, 1)\n        pred_bbox = torch.empty((0, 4))\n        anchor = torch.empty((0, 4))\n        gt_bboxes = torch.FloatTensor([\n            [0, 0, 10, 9],\n            [0, 10, 10, 19],\n        ])\n        gt_labels = torch.LongTensor([2, 3])\n        pred_instances = InstanceData()\n        pred_instances.priors = anchor\n        pred_instances.decoder_priors = pred_bbox\n        gt_instances = InstanceData()\n        gt_instances.bboxes = gt_bboxes\n        gt_instances.labels = gt_labels\n\n        # Test with gt_labels\n        assign_result = assigner.assign(pred_instances, gt_instances)\n        self.assertEqual(len(assign_result.gt_inds), 0)\n        self.assertEqual(tuple(assign_result.labels.shape), (0, ))\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_coder/test_delta_xywh_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder\n\n\ndef test_delta_bbox_coder():\n    coder = DeltaXYWHBBoxCoder()\n\n    rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],\n                         [5., 5., 5., 5.]])\n    deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],\n                           [0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])\n    expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                                           [0.1409, 0.1409, 2.8591, 2.8591],\n                                           [0.0000, 0.3161, 4.1945, 0.6839],\n                                           [5.0000, 5.0000, 5.0000, 5.0000]])\n\n    out = coder.decode(rois, deltas, max_shape=(32, 32))\n    assert expected_decode_bboxes.allclose(out, atol=1e-04)\n    out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))\n    assert expected_decode_bboxes.allclose(out, atol=1e-04)\n\n    batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)\n    batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)\n    batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]\n    assert out.allclose(batch_out)\n    batch_out = coder.decode(\n        batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]\n    assert out.allclose(batch_out)\n\n    # test max_shape is not equal to batch\n    with pytest.raises(AssertionError):\n        coder.decode(\n            batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])\n\n    rois = torch.zeros((0, 4))\n    deltas = torch.zeros((0, 4))\n    out = coder.decode(rois, deltas, max_shape=(32, 32))\n    assert rois.shape == out.shape\n\n    # test add_ctr_clamp\n    coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)\n\n    rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],\n                         [5., 5., 5., 5.]])\n    deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],\n                           [0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])\n    expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],\n                                           [0.1409, 0.1409, 2.8591, 2.8591],\n                                           [0.0000, 0.3161, 4.1945, 0.6839],\n                                           [5.0000, 5.0000, 5.0000, 5.0000]])\n\n    out = coder.decode(rois, deltas, max_shape=(32, 32))\n    assert expected_decode_bboxes.allclose(out, atol=1e-04)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_iou2d_calculator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmdet.evaluation import bbox_overlaps as recall_overlaps\nfrom mmdet.models.task_modules import BboxOverlaps2D\nfrom mmdet.structures.bbox import bbox_overlaps\n\n\ndef test_bbox_overlaps_2d(eps=1e-7):\n\n    def _construct_bbox(num_bbox=None):\n        img_h = int(np.random.randint(3, 1000))\n        img_w = int(np.random.randint(3, 1000))\n        if num_bbox is None:\n            num_bbox = np.random.randint(1, 10)\n        x1y1 = torch.rand((num_bbox, 2))\n        x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)\n        bboxes = torch.cat((x1y1, x2y2), -1)\n        bboxes[:, 0::2] *= img_w\n        bboxes[:, 1::2] *= img_h\n        return bboxes, num_bbox\n\n    # is_aligned is True, bboxes.size(-1) == 5 (include score)\n    self = BboxOverlaps2D()\n    bboxes1, num_bbox = _construct_bbox()\n    bboxes2, _ = _construct_bbox(num_bbox)\n    bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)\n    bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)\n    gious = self(bboxes1, bboxes2, 'giou', True)\n    assert gious.size() == (num_bbox, ), gious.size()\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n\n    # is_aligned is True, bboxes1.size(-2) == 0\n    bboxes1 = torch.empty((0, 4))\n    bboxes2 = torch.empty((0, 4))\n    gious = self(bboxes1, bboxes2, 'giou', True)\n    assert gious.size() == (0, ), gious.size()\n    assert torch.all(gious == torch.empty((0, )))\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n\n    # is_aligned is True, and bboxes.ndims > 2\n    bboxes1, num_bbox = _construct_bbox()\n    bboxes2, _ = _construct_bbox(num_bbox)\n    bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)\n    # test assertion when batch dim is not the same\n    with pytest.raises(AssertionError):\n        self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)\n    bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)\n    gious = self(bboxes1, bboxes2, 'giou', True)\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n    assert gious.size() == (2, num_bbox)\n    bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)\n    bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)\n    gious = self(bboxes1, bboxes2, 'giou', True)\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n    assert gious.size() == (2, 2, num_bbox)\n\n    # is_aligned is False\n    bboxes1, num_bbox1 = _construct_bbox()\n    bboxes2, num_bbox2 = _construct_bbox()\n    gious = self(bboxes1, bboxes2, 'giou')\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n    assert gious.size() == (num_bbox1, num_bbox2)\n\n    # is_aligned is False, and bboxes.ndims > 2\n    bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)\n    bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)\n    gious = self(bboxes1, bboxes2, 'giou')\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n    assert gious.size() == (2, num_bbox1, num_bbox2)\n    bboxes1 = bboxes1.unsqueeze(0)\n    bboxes2 = bboxes2.unsqueeze(0)\n    gious = self(bboxes1, bboxes2, 'giou')\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n    assert gious.size() == (1, 2, num_bbox1, num_bbox2)\n\n    # is_aligned is False, bboxes1.size(-2) == 0\n    gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')\n    assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))\n    assert torch.all(gious >= -1) and torch.all(gious <= 1)\n\n    # test allclose between bbox_overlaps and the original official\n    # implementation.\n    bboxes1 = torch.FloatTensor([\n        [0, 0, 10, 10],\n        [10, 10, 20, 20],\n        [32, 32, 38, 42],\n    ])\n    bboxes2 = torch.FloatTensor([\n        [0, 0, 10, 20],\n        [0, 10, 10, 19],\n        [10, 10, 20, 20],\n    ])\n    gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)\n    gious = gious.numpy().round(4)\n    # the gt is got with four decimal precision.\n    expected_gious = np.array([0.5000, -0.0500, -0.8214])\n    assert np.allclose(gious, expected_gious, rtol=0, atol=eps)\n\n    # test mode 'iof'\n    ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)\n    assert torch.all(ious >= -1) and torch.all(ious <= 1)\n    assert ious.size() == (bboxes1.size(0), )\n    ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)\n    assert torch.all(ious >= -1) and torch.all(ious <= 1)\n    assert ious.size() == (bboxes1.size(0), bboxes2.size(0))\n\n\ndef test_voc_recall_overlaps():\n\n    def _construct_bbox(num_bbox=None):\n        img_h = int(np.random.randint(3, 1000))\n        img_w = int(np.random.randint(3, 1000))\n        if num_bbox is None:\n            num_bbox = np.random.randint(1, 10)\n        x1y1 = torch.rand((num_bbox, 2))\n        x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)\n        bboxes = torch.cat((x1y1, x2y2), -1)\n        bboxes[:, 0::2] *= img_w\n        bboxes[:, 1::2] *= img_h\n        return bboxes.numpy(), num_bbox\n\n    bboxes1, num_bbox = _construct_bbox()\n    bboxes2, _ = _construct_bbox(num_bbox)\n    ious = recall_overlaps(\n        bboxes1, bboxes2, 'iou', use_legacy_coordinate=False)\n    assert ious.shape == (num_bbox, num_bbox)\n    assert np.all(ious >= -1) and np.all(ious <= 1)\n\n    ious = recall_overlaps(bboxes1, bboxes2, 'iou', use_legacy_coordinate=True)\n    assert ious.shape == (num_bbox, num_bbox)\n    assert np.all(ious >= -1) and np.all(ious <= 1)\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_prior_generators/test_anchor_generator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"\nCommandLine:\n    pytest tests/test_utils/test_anchor.py\n    xdoctest tests/test_utils/test_anchor.py zero\n\n\"\"\"\nimport pytest\nimport torch\n\n\ndef test_standard_points_generator():\n    from mmdet.models.task_modules import build_prior_generator\n\n    # teat init\n    anchor_generator_cfg = dict(\n        type='MlvlPointGenerator', strides=[4, 8], offset=0)\n    anchor_generator = build_prior_generator(anchor_generator_cfg)\n    assert anchor_generator is not None\n    assert anchor_generator.num_base_priors == [1, 1]\n    # test_stride\n    from mmdet.models.task_modules.prior_generators import MlvlPointGenerator\n\n    # Square strides\n    mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)\n    mlvl_points_half_stride_generator = MlvlPointGenerator(\n        strides=[4, 10], offset=0.5)\n    assert mlvl_points.num_levels == 2\n\n    # assert self.num_levels == len(featmap_sizes)\n    with pytest.raises(AssertionError):\n        mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu')\n    priors = mlvl_points.grid_priors(\n        featmap_sizes=[(2, 2), (4, 8)], device='cpu')\n    priors_with_stride = mlvl_points.grid_priors(\n        featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu')\n    assert len(priors) == 2\n\n    # assert last dimension is (coord_x, coord_y, stride_w, stride_h).\n    assert priors_with_stride[0].size(1) == 4\n    assert priors_with_stride[0][0][2] == 4\n    assert priors_with_stride[0][0][3] == 4\n    assert priors_with_stride[1][0][2] == 10\n    assert priors_with_stride[1][0][3] == 10\n\n    stride_4_feat_2_2 = priors[0]\n    assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4\n    assert stride_4_feat_2_2.size(0) == 4\n    assert stride_4_feat_2_2.size(1) == 2\n\n    stride_10_feat_4_8 = priors[1]\n    assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10\n    assert stride_10_feat_4_8.size(0) == 4 * 8\n    assert stride_10_feat_4_8.size(1) == 2\n\n    # assert the offset of 0.5 * stride\n    priors_half_offset = mlvl_points_half_stride_generator.grid_priors(\n        featmap_sizes=[(2, 2), (4, 8)], device='cpu')\n\n    assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2\n    assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2\n    if torch.cuda.is_available():\n        anchor_generator_cfg = dict(\n            type='MlvlPointGenerator', strides=[4, 8], offset=0)\n        anchor_generator = build_prior_generator(anchor_generator_cfg)\n        assert anchor_generator is not None\n        # Square strides\n        mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)\n        mlvl_points_half_stride_generator = MlvlPointGenerator(\n            strides=[4, 10], offset=0.5)\n        assert mlvl_points.num_levels == 2\n\n        # assert self.num_levels == len(featmap_sizes)\n        with pytest.raises(AssertionError):\n            mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda')\n        priors = mlvl_points.grid_priors(\n            featmap_sizes=[(2, 2), (4, 8)], device='cuda')\n        priors_with_stride = mlvl_points.grid_priors(\n            featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda')\n        assert len(priors) == 2\n\n        # assert last dimension is (coord_x, coord_y, stride_w, stride_h).\n        assert priors_with_stride[0].size(1) == 4\n        assert priors_with_stride[0][0][2] == 4\n        assert priors_with_stride[0][0][3] == 4\n        assert priors_with_stride[1][0][2] == 10\n        assert priors_with_stride[1][0][3] == 10\n\n        stride_4_feat_2_2 = priors[0]\n        assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4\n        assert stride_4_feat_2_2.size(0) == 4\n        assert stride_4_feat_2_2.size(1) == 2\n\n        stride_10_feat_4_8 = priors[1]\n        assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10\n        assert stride_10_feat_4_8.size(0) == 4 * 8\n        assert stride_10_feat_4_8.size(1) == 2\n\n        # assert the offset of 0.5 * stride\n        priors_half_offset = mlvl_points_half_stride_generator.grid_priors(\n            featmap_sizes=[(2, 2), (4, 8)], device='cuda')\n\n        assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2\n        assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2\n\n\ndef test_sparse_prior():\n    from mmdet.models.task_modules.prior_generators import MlvlPointGenerator\n    mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)\n    prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()\n\n    featmap_sizes = [(3, 5), (6, 4)]\n    grid_anchors = mlvl_points.grid_priors(\n        featmap_sizes=featmap_sizes, with_stride=False, device='cpu')\n    sparse_prior = mlvl_points.sparse_priors(\n        prior_idxs=prior_indexs,\n        featmap_size=featmap_sizes[0],\n        level_idx=0,\n        device='cpu')\n\n    assert not sparse_prior.is_cuda\n    assert (sparse_prior == grid_anchors[0][prior_indexs]).all()\n    sparse_prior = mlvl_points.sparse_priors(\n        prior_idxs=prior_indexs,\n        featmap_size=featmap_sizes[1],\n        level_idx=1,\n        device='cpu')\n    assert (sparse_prior == grid_anchors[1][prior_indexs]).all()\n\n    from mmdet.models.task_modules.prior_generators import AnchorGenerator\n    mlvl_anchors = AnchorGenerator(\n        strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8])\n    prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()\n\n    featmap_sizes = [(3, 5), (6, 4)]\n    grid_anchors = mlvl_anchors.grid_priors(\n        featmap_sizes=featmap_sizes, device='cpu')\n    sparse_prior = mlvl_anchors.sparse_priors(\n        prior_idxs=prior_indexs,\n        featmap_size=featmap_sizes[0],\n        level_idx=0,\n        device='cpu')\n    assert (sparse_prior == grid_anchors[0][prior_indexs]).all()\n    sparse_prior = mlvl_anchors.sparse_priors(\n        prior_idxs=prior_indexs,\n        featmap_size=featmap_sizes[1],\n        level_idx=1,\n        device='cpu')\n    assert (sparse_prior == grid_anchors[1][prior_indexs]).all()\n\n    # for ssd\n    from mmdet.models.task_modules.prior_generators import SSDAnchorGenerator\n    featmap_sizes = [(38, 38), (19, 19), (10, 10)]\n    anchor_generator = SSDAnchorGenerator(\n        scale_major=False,\n        input_size=300,\n        basesize_ratio_range=(0.15, 0.9),\n        strides=[8, 16, 32],\n        ratios=[[2], [2, 3], [2, 3]])\n    ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')\n    for i in range(len(featmap_sizes)):\n        sparse_ssd_anchors = anchor_generator.sparse_priors(\n            prior_idxs=prior_indexs,\n            level_idx=i,\n            featmap_size=featmap_sizes[i],\n            device='cpu')\n        assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()\n\n    # for yolo\n    from mmdet.models.task_modules.prior_generators import YOLOAnchorGenerator\n    featmap_sizes = [(38, 38), (19, 19), (10, 10)]\n    anchor_generator = YOLOAnchorGenerator(\n        strides=[32, 16, 8],\n        base_sizes=[\n            [(116, 90), (156, 198), (373, 326)],\n            [(30, 61), (62, 45), (59, 119)],\n            [(10, 13), (16, 30), (33, 23)],\n        ])\n    yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')\n    for i in range(len(featmap_sizes)):\n        sparse_yolo_anchors = anchor_generator.sparse_priors(\n            prior_idxs=prior_indexs,\n            level_idx=i,\n            featmap_size=featmap_sizes[i],\n            device='cpu')\n        assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()\n\n    if torch.cuda.is_available():\n        mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)\n        prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6,\n                                     9]).long().cuda()\n\n        featmap_sizes = [(6, 8), (6, 4)]\n        grid_anchors = mlvl_points.grid_priors(\n            featmap_sizes=featmap_sizes, with_stride=False, device='cuda')\n        sparse_prior = mlvl_points.sparse_priors(\n            prior_idxs=prior_indexs,\n            featmap_size=featmap_sizes[0],\n            level_idx=0,\n            device='cuda')\n        assert (sparse_prior == grid_anchors[0][prior_indexs]).all()\n        sparse_prior = mlvl_points.sparse_priors(\n            prior_idxs=prior_indexs,\n            featmap_size=featmap_sizes[1],\n            level_idx=1,\n            device='cuda')\n        assert (sparse_prior == grid_anchors[1][prior_indexs]).all()\n        assert sparse_prior.is_cuda\n        mlvl_anchors = AnchorGenerator(\n            strides=[16, 32],\n            ratios=[1., 2.5],\n            scales=[1., 5.],\n            base_sizes=[4, 8])\n        prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6,\n                                     9]).long().cuda()\n\n        featmap_sizes = [(13, 5), (16, 4)]\n        grid_anchors = mlvl_anchors.grid_priors(\n            featmap_sizes=featmap_sizes, device='cuda')\n        sparse_prior = mlvl_anchors.sparse_priors(\n            prior_idxs=prior_indexs,\n            featmap_size=featmap_sizes[0],\n            level_idx=0,\n            device='cuda')\n        assert (sparse_prior == grid_anchors[0][prior_indexs]).all()\n        sparse_prior = mlvl_anchors.sparse_priors(\n            prior_idxs=prior_indexs,\n            featmap_size=featmap_sizes[1],\n            level_idx=1,\n            device='cuda')\n        assert (sparse_prior == grid_anchors[1][prior_indexs]).all()\n\n        # for ssd\n        from mmdet.models.task_modules.prior_generators import \\\n            SSDAnchorGenerator\n        featmap_sizes = [(38, 38), (19, 19), (10, 10)]\n        anchor_generator = SSDAnchorGenerator(\n            scale_major=False,\n            input_size=300,\n            basesize_ratio_range=(0.15, 0.9),\n            strides=[8, 16, 32],\n            ratios=[[2], [2, 3], [2, 3]])\n        ssd_anchors = anchor_generator.grid_anchors(\n            featmap_sizes, device='cuda')\n        for i in range(len(featmap_sizes)):\n            sparse_ssd_anchors = anchor_generator.sparse_priors(\n                prior_idxs=prior_indexs,\n                level_idx=i,\n                featmap_size=featmap_sizes[i],\n                device='cuda')\n            assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()\n\n        # for yolo\n        from mmdet.models.task_modules.prior_generators import \\\n            YOLOAnchorGenerator\n        featmap_sizes = [(38, 38), (19, 19), (10, 10)]\n        anchor_generator = YOLOAnchorGenerator(\n            strides=[32, 16, 8],\n            base_sizes=[\n                [(116, 90), (156, 198), (373, 326)],\n                [(30, 61), (62, 45), (59, 119)],\n                [(10, 13), (16, 30), (33, 23)],\n            ])\n        yolo_anchors = anchor_generator.grid_anchors(\n            featmap_sizes, device='cuda')\n        for i in range(len(featmap_sizes)):\n            sparse_yolo_anchors = anchor_generator.sparse_priors(\n                prior_idxs=prior_indexs,\n                level_idx=i,\n                featmap_size=featmap_sizes[i],\n                device='cuda')\n            assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()\n\n\ndef test_standard_anchor_generator():\n    from mmdet.models.task_modules import build_anchor_generator\n    anchor_generator_cfg = dict(\n        type='AnchorGenerator',\n        scales=[8],\n        ratios=[0.5, 1.0, 2.0],\n        strides=[4, 8])\n\n    anchor_generator = build_anchor_generator(anchor_generator_cfg)\n    assert anchor_generator.num_base_priors == \\\n           anchor_generator.num_base_anchors\n    assert anchor_generator.num_base_priors == [3, 3]\n    assert anchor_generator is not None\n\n\ndef test_strides():\n    from mmdet.models.task_modules.prior_generators import AnchorGenerator\n\n    # Square strides\n    self = AnchorGenerator([10], [1.], [1.], [10])\n    anchors = self.grid_anchors([(2, 2)], device='cpu')\n\n    expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],\n                                     [-5., 5., 5., 15.], [5., 5., 15., 15.]])\n\n    assert torch.equal(anchors[0], expected_anchors)\n\n    # Different strides in x and y direction\n    self = AnchorGenerator([(10, 20)], [1.], [1.], [10])\n    anchors = self.grid_anchors([(2, 2)], device='cpu')\n\n    expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],\n                                     [-5., 15., 5., 25.], [5., 15., 15., 25.]])\n\n    assert torch.equal(anchors[0], expected_anchors)\n\n\ndef test_ssd_anchor_generator():\n    from mmdet.models.task_modules import build_anchor_generator\n    if torch.cuda.is_available():\n        device = 'cuda'\n    else:\n        device = 'cpu'\n\n    # min_sizes max_sizes must set at the same time\n    with pytest.raises(AssertionError):\n        anchor_generator_cfg = dict(\n            type='SSDAnchorGenerator',\n            scale_major=False,\n            min_sizes=[48, 100, 150, 202, 253, 300],\n            max_sizes=None,\n            strides=[8, 16, 32, 64, 100, 300],\n            ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])\n        build_anchor_generator(anchor_generator_cfg)\n\n    # length of min_sizes max_sizes must be the same\n    with pytest.raises(AssertionError):\n        anchor_generator_cfg = dict(\n            type='SSDAnchorGenerator',\n            scale_major=False,\n            min_sizes=[48, 100, 150, 202, 253, 300],\n            max_sizes=[100, 150, 202, 253],\n            strides=[8, 16, 32, 64, 100, 300],\n            ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])\n        build_anchor_generator(anchor_generator_cfg)\n\n    # test setting anchor size manually\n    anchor_generator_cfg = dict(\n        type='SSDAnchorGenerator',\n        scale_major=False,\n        min_sizes=[48, 100, 150, 202, 253, 304],\n        max_sizes=[100, 150, 202, 253, 304, 320],\n        strides=[16, 32, 64, 107, 160, 320],\n        ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])\n\n    featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]\n    anchor_generator = build_anchor_generator(anchor_generator_cfg)\n\n    expected_base_anchors = [\n        torch.Tensor([[-16.0000, -16.0000, 32.0000, 32.0000],\n                      [-26.6410, -26.6410, 42.6410, 42.6410],\n                      [-25.9411, -8.9706, 41.9411, 24.9706],\n                      [-8.9706, -25.9411, 24.9706, 41.9411],\n                      [-33.5692, -5.8564, 49.5692, 21.8564],\n                      [-5.8564, -33.5692, 21.8564, 49.5692]]),\n        torch.Tensor([[-34.0000, -34.0000, 66.0000, 66.0000],\n                      [-45.2372, -45.2372, 77.2372, 77.2372],\n                      [-54.7107, -19.3553, 86.7107, 51.3553],\n                      [-19.3553, -54.7107, 51.3553, 86.7107],\n                      [-70.6025, -12.8675, 102.6025, 44.8675],\n                      [-12.8675, -70.6025, 44.8675, 102.6025]]),\n        torch.Tensor([[-43.0000, -43.0000, 107.0000, 107.0000],\n                      [-55.0345, -55.0345, 119.0345, 119.0345],\n                      [-74.0660, -21.0330, 138.0660, 85.0330],\n                      [-21.0330, -74.0660, 85.0330, 138.0660],\n                      [-97.9038, -11.3013, 161.9038, 75.3013],\n                      [-11.3013, -97.9038, 75.3013, 161.9038]]),\n        torch.Tensor([[-47.5000, -47.5000, 154.5000, 154.5000],\n                      [-59.5332, -59.5332, 166.5332, 166.5332],\n                      [-89.3356, -17.9178, 196.3356, 124.9178],\n                      [-17.9178, -89.3356, 124.9178, 196.3356],\n                      [-121.4371, -4.8124, 228.4371, 111.8124],\n                      [-4.8124, -121.4371, 111.8124, 228.4371]]),\n        torch.Tensor([[-46.5000, -46.5000, 206.5000, 206.5000],\n                      [-58.6651, -58.6651, 218.6651, 218.6651],\n                      [-98.8980, -9.4490, 258.8980, 169.4490],\n                      [-9.4490, -98.8980, 169.4490, 258.8980],\n                      [-139.1044, 6.9652, 299.1044, 153.0348],\n                      [6.9652, -139.1044, 153.0348, 299.1044]]),\n        torch.Tensor([[8.0000, 8.0000, 312.0000, 312.0000],\n                      [4.0513, 4.0513, 315.9487, 315.9487],\n                      [-54.9605, 52.5198, 374.9604, 267.4802],\n                      [52.5198, -54.9605, 267.4802, 374.9604],\n                      [-103.2717, 72.2428, 423.2717, 247.7572],\n                      [72.2428, -103.2717, 247.7572, 423.2717]])\n    ]\n\n    base_anchors = anchor_generator.base_anchors\n    for i, base_anchor in enumerate(base_anchors):\n        assert base_anchor.allclose(expected_base_anchors[i])\n\n    # check valid flags\n    expected_valid_pixels = [2400, 600, 150, 54, 24, 6]\n    multi_level_valid_flags = anchor_generator.valid_flags(\n        featmap_sizes, (320, 320), device)\n    for i, single_level_valid_flag in enumerate(multi_level_valid_flags):\n        assert single_level_valid_flag.sum() == expected_valid_pixels[i]\n\n    # check number of base anchors for each level\n    assert anchor_generator.num_base_anchors == [6, 6, 6, 6, 6, 6]\n\n    # check anchor generation\n    anchors = anchor_generator.grid_anchors(featmap_sizes, device)\n    assert len(anchors) == 6\n\n    # test vgg ssd anchor setting\n    anchor_generator_cfg = dict(\n        type='SSDAnchorGenerator',\n        scale_major=False,\n        input_size=300,\n        basesize_ratio_range=(0.15, 0.9),\n        strides=[8, 16, 32, 64, 100, 300],\n        ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])\n\n    featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]\n    anchor_generator = build_anchor_generator(anchor_generator_cfg)\n\n    # check base anchors\n    expected_base_anchors = [\n        torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],\n                      [-11.3704, -11.3704, 19.3704, 19.3704],\n                      [-10.8492, -3.4246, 18.8492, 11.4246],\n                      [-3.4246, -10.8492, 11.4246, 18.8492]]),\n        torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],\n                      [-25.3729, -25.3729, 41.3729, 41.3729],\n                      [-23.8198, -7.9099, 39.8198, 23.9099],\n                      [-7.9099, -23.8198, 23.9099, 39.8198],\n                      [-30.9711, -4.9904, 46.9711, 20.9904],\n                      [-4.9904, -30.9711, 20.9904, 46.9711]]),\n        torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],\n                      [-45.5366, -45.5366, 77.5366, 77.5366],\n                      [-54.0036, -19.0018, 86.0036, 51.0018],\n                      [-19.0018, -54.0036, 51.0018, 86.0036],\n                      [-69.7365, -12.5788, 101.7365, 44.5788],\n                      [-12.5788, -69.7365, 44.5788, 101.7365]]),\n        torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],\n                      [-56.9817, -56.9817, 120.9817, 120.9817],\n                      [-76.1873, -22.0937, 140.1873, 86.0937],\n                      [-22.0937, -76.1873, 86.0937, 140.1873],\n                      [-100.5019, -12.1673, 164.5019, 76.1673],\n                      [-12.1673, -100.5019, 76.1673, 164.5019]]),\n        torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],\n                      [-66.2185, -66.2185, 166.2185, 166.2185],\n                      [-96.3711, -23.1855, 196.3711, 123.1855],\n                      [-23.1855, -96.3711, 123.1855, 196.3711]]),\n        torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],\n                      [6.6342, 6.6342, 293.3658, 293.3658],\n                      [-34.5549, 57.7226, 334.5549, 242.2774],\n                      [57.7226, -34.5549, 242.2774, 334.5549]]),\n    ]\n    base_anchors = anchor_generator.base_anchors\n    for i, base_anchor in enumerate(base_anchors):\n        assert base_anchor.allclose(expected_base_anchors[i])\n\n    # check valid flags\n    expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]\n    multi_level_valid_flags = anchor_generator.valid_flags(\n        featmap_sizes, (300, 300), device)\n    for i, single_level_valid_flag in enumerate(multi_level_valid_flags):\n        assert single_level_valid_flag.sum() == expected_valid_pixels[i]\n\n    # check number of base anchors for each level\n    assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]\n\n    # check anchor generation\n    anchors = anchor_generator.grid_anchors(featmap_sizes, device)\n    assert len(anchors) == 6\n\n\ndef test_anchor_generator_with_tuples():\n    from mmdet.models.task_modules import build_anchor_generator\n    if torch.cuda.is_available():\n        device = 'cuda'\n    else:\n        device = 'cpu'\n\n    anchor_generator_cfg = dict(\n        type='SSDAnchorGenerator',\n        scale_major=False,\n        input_size=300,\n        basesize_ratio_range=(0.15, 0.9),\n        strides=[8, 16, 32, 64, 100, 300],\n        ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])\n\n    featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]\n    anchor_generator = build_anchor_generator(anchor_generator_cfg)\n    anchors = anchor_generator.grid_anchors(featmap_sizes, device)\n\n    anchor_generator_cfg_tuples = dict(\n        type='SSDAnchorGenerator',\n        scale_major=False,\n        input_size=300,\n        basesize_ratio_range=(0.15, 0.9),\n        strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],\n        ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])\n\n    anchor_generator_tuples = build_anchor_generator(\n        anchor_generator_cfg_tuples)\n    anchors_tuples = anchor_generator_tuples.grid_anchors(\n        featmap_sizes, device)\n    for anchor, anchor_tuples in zip(anchors, anchors_tuples):\n        assert torch.equal(anchor, anchor_tuples)\n\n\ndef test_yolo_anchor_generator():\n    from mmdet.models.task_modules import build_anchor_generator\n    if torch.cuda.is_available():\n        device = 'cuda'\n    else:\n        device = 'cpu'\n\n    anchor_generator_cfg = dict(\n        type='YOLOAnchorGenerator',\n        strides=[32, 16, 8],\n        base_sizes=[\n            [(116, 90), (156, 198), (373, 326)],\n            [(30, 61), (62, 45), (59, 119)],\n            [(10, 13), (16, 30), (33, 23)],\n        ])\n\n    featmap_sizes = [(14, 18), (28, 36), (56, 72)]\n    anchor_generator = build_anchor_generator(anchor_generator_cfg)\n\n    # check base anchors\n    expected_base_anchors = [\n        torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],\n                      [-62.0000, -83.0000, 94.0000, 115.0000],\n                      [-170.5000, -147.0000, 202.5000, 179.0000]]),\n        torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],\n                      [-23.0000, -14.5000, 39.0000, 30.5000],\n                      [-21.5000, -51.5000, 37.5000, 67.5000]]),\n        torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],\n                      [-4.0000, -11.0000, 12.0000, 19.0000],\n                      [-12.5000, -7.5000, 20.5000, 15.5000]])\n    ]\n    base_anchors = anchor_generator.base_anchors\n    for i, base_anchor in enumerate(base_anchors):\n        assert base_anchor.allclose(expected_base_anchors[i])\n\n    # check number of base anchors for each level\n    assert anchor_generator.num_base_anchors == [3, 3, 3]\n\n    # check anchor generation\n    anchors = anchor_generator.grid_anchors(featmap_sizes, device)\n    assert len(anchors) == 3\n\n\ndef test_retina_anchor():\n    from mmdet.registry import MODELS\n    if torch.cuda.is_available():\n        device = 'cuda'\n    else:\n        device = 'cpu'\n\n    # head configs modified from\n    # configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py\n    bbox_head = dict(\n        type='RetinaSepBNHead',\n        num_classes=4,\n        num_ins=5,\n        in_channels=4,\n        stacked_convs=1,\n        feat_channels=4,\n        anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        bbox_coder=dict(\n            type='DeltaXYWHBBoxCoder',\n            target_means=[.0, .0, .0, .0],\n            target_stds=[1.0, 1.0, 1.0, 1.0]))\n\n    retina_head = MODELS.build(bbox_head)\n    assert retina_head.anchor_generator is not None\n\n    # use the featmap sizes in NASFPN setting to test retina head\n    featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]\n    # check base anchors\n    expected_base_anchors = [\n        torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],\n                      [-28.5088, -14.2544, 28.5088, 14.2544],\n                      [-35.9188, -17.9594, 35.9188, 17.9594],\n                      [-16.0000, -16.0000, 16.0000, 16.0000],\n                      [-20.1587, -20.1587, 20.1587, 20.1587],\n                      [-25.3984, -25.3984, 25.3984, 25.3984],\n                      [-11.3137, -22.6274, 11.3137, 22.6274],\n                      [-14.2544, -28.5088, 14.2544, 28.5088],\n                      [-17.9594, -35.9188, 17.9594, 35.9188]]),\n        torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],\n                      [-57.0175, -28.5088, 57.0175, 28.5088],\n                      [-71.8376, -35.9188, 71.8376, 35.9188],\n                      [-32.0000, -32.0000, 32.0000, 32.0000],\n                      [-40.3175, -40.3175, 40.3175, 40.3175],\n                      [-50.7968, -50.7968, 50.7968, 50.7968],\n                      [-22.6274, -45.2548, 22.6274, 45.2548],\n                      [-28.5088, -57.0175, 28.5088, 57.0175],\n                      [-35.9188, -71.8376, 35.9188, 71.8376]]),\n        torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],\n                      [-114.0350, -57.0175, 114.0350, 57.0175],\n                      [-143.6751, -71.8376, 143.6751, 71.8376],\n                      [-64.0000, -64.0000, 64.0000, 64.0000],\n                      [-80.6349, -80.6349, 80.6349, 80.6349],\n                      [-101.5937, -101.5937, 101.5937, 101.5937],\n                      [-45.2548, -90.5097, 45.2548, 90.5097],\n                      [-57.0175, -114.0350, 57.0175, 114.0350],\n                      [-71.8376, -143.6751, 71.8376, 143.6751]]),\n        torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],\n                      [-228.0701, -114.0350, 228.0701, 114.0350],\n                      [-287.3503, -143.6751, 287.3503, 143.6751],\n                      [-128.0000, -128.0000, 128.0000, 128.0000],\n                      [-161.2699, -161.2699, 161.2699, 161.2699],\n                      [-203.1873, -203.1873, 203.1873, 203.1873],\n                      [-90.5097, -181.0193, 90.5097, 181.0193],\n                      [-114.0350, -228.0701, 114.0350, 228.0701],\n                      [-143.6751, -287.3503, 143.6751, 287.3503]]),\n        torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],\n                      [-456.1401, -228.0701, 456.1401, 228.0701],\n                      [-574.7006, -287.3503, 574.7006, 287.3503],\n                      [-256.0000, -256.0000, 256.0000, 256.0000],\n                      [-322.5398, -322.5398, 322.5398, 322.5398],\n                      [-406.3747, -406.3747, 406.3747, 406.3747],\n                      [-181.0193, -362.0387, 181.0193, 362.0387],\n                      [-228.0701, -456.1401, 228.0701, 456.1401],\n                      [-287.3503, -574.7006, 287.3503, 574.7006]])\n    ]\n    base_anchors = retina_head.anchor_generator.base_anchors\n    for i, base_anchor in enumerate(base_anchors):\n        assert base_anchor.allclose(expected_base_anchors[i])\n\n    # check valid flags\n    expected_valid_pixels = [57600, 14400, 3600, 900, 225]\n    multi_level_valid_flags = retina_head.anchor_generator.valid_flags(\n        featmap_sizes, (640, 640), device)\n    for i, single_level_valid_flag in enumerate(multi_level_valid_flags):\n        assert single_level_valid_flag.sum() == expected_valid_pixels[i]\n\n    # check number of base anchors for each level\n    assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]\n\n    # check anchor generation\n    anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)\n    assert len(anchors) == 5\n\n\ndef test_guided_anchor():\n    from mmdet.registry import MODELS\n    if torch.cuda.is_available():\n        device = 'cuda'\n    else:\n        device = 'cpu'\n    # head configs modified from\n    # configs/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py\n    bbox_head = dict(\n        type='GARetinaHead',\n        num_classes=8,\n        in_channels=4,\n        stacked_convs=1,\n        feat_channels=4,\n        approx_anchor_generator=dict(\n            type='AnchorGenerator',\n            octave_base_scale=4,\n            scales_per_octave=3,\n            ratios=[0.5, 1.0, 2.0],\n            strides=[8, 16, 32, 64, 128]),\n        square_anchor_generator=dict(\n            type='AnchorGenerator',\n            ratios=[1.0],\n            scales=[4],\n            strides=[8, 16, 32, 64, 128]))\n\n    ga_retina_head = MODELS.build(bbox_head)\n    assert ga_retina_head.approx_anchor_generator is not None\n\n    # use the featmap sizes in NASFPN setting to test ga_retina_head\n    featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]\n    # check base anchors\n    expected_approxs = [\n        torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],\n                      [-28.5088, -14.2544, 28.5088, 14.2544],\n                      [-35.9188, -17.9594, 35.9188, 17.9594],\n                      [-16.0000, -16.0000, 16.0000, 16.0000],\n                      [-20.1587, -20.1587, 20.1587, 20.1587],\n                      [-25.3984, -25.3984, 25.3984, 25.3984],\n                      [-11.3137, -22.6274, 11.3137, 22.6274],\n                      [-14.2544, -28.5088, 14.2544, 28.5088],\n                      [-17.9594, -35.9188, 17.9594, 35.9188]]),\n        torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],\n                      [-57.0175, -28.5088, 57.0175, 28.5088],\n                      [-71.8376, -35.9188, 71.8376, 35.9188],\n                      [-32.0000, -32.0000, 32.0000, 32.0000],\n                      [-40.3175, -40.3175, 40.3175, 40.3175],\n                      [-50.7968, -50.7968, 50.7968, 50.7968],\n                      [-22.6274, -45.2548, 22.6274, 45.2548],\n                      [-28.5088, -57.0175, 28.5088, 57.0175],\n                      [-35.9188, -71.8376, 35.9188, 71.8376]]),\n        torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],\n                      [-114.0350, -57.0175, 114.0350, 57.0175],\n                      [-143.6751, -71.8376, 143.6751, 71.8376],\n                      [-64.0000, -64.0000, 64.0000, 64.0000],\n                      [-80.6349, -80.6349, 80.6349, 80.6349],\n                      [-101.5937, -101.5937, 101.5937, 101.5937],\n                      [-45.2548, -90.5097, 45.2548, 90.5097],\n                      [-57.0175, -114.0350, 57.0175, 114.0350],\n                      [-71.8376, -143.6751, 71.8376, 143.6751]]),\n        torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],\n                      [-228.0701, -114.0350, 228.0701, 114.0350],\n                      [-287.3503, -143.6751, 287.3503, 143.6751],\n                      [-128.0000, -128.0000, 128.0000, 128.0000],\n                      [-161.2699, -161.2699, 161.2699, 161.2699],\n                      [-203.1873, -203.1873, 203.1873, 203.1873],\n                      [-90.5097, -181.0193, 90.5097, 181.0193],\n                      [-114.0350, -228.0701, 114.0350, 228.0701],\n                      [-143.6751, -287.3503, 143.6751, 287.3503]]),\n        torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],\n                      [-456.1401, -228.0701, 456.1401, 228.0701],\n                      [-574.7006, -287.3503, 574.7006, 287.3503],\n                      [-256.0000, -256.0000, 256.0000, 256.0000],\n                      [-322.5398, -322.5398, 322.5398, 322.5398],\n                      [-406.3747, -406.3747, 406.3747, 406.3747],\n                      [-181.0193, -362.0387, 181.0193, 362.0387],\n                      [-228.0701, -456.1401, 228.0701, 456.1401],\n                      [-287.3503, -574.7006, 287.3503, 574.7006]])\n    ]\n    approxs = ga_retina_head.approx_anchor_generator.base_anchors\n    for i, base_anchor in enumerate(approxs):\n        assert base_anchor.allclose(expected_approxs[i])\n\n    # check valid flags\n    expected_valid_pixels = [136800, 34200, 8550, 2223, 630]\n    multi_level_valid_flags = ga_retina_head.approx_anchor_generator \\\n        .valid_flags(featmap_sizes, (800, 1216), device)\n    for i, single_level_valid_flag in enumerate(multi_level_valid_flags):\n        assert single_level_valid_flag.sum() == expected_valid_pixels[i]\n\n    # check number of base anchors for each level\n    assert ga_retina_head.approx_anchor_generator.num_base_anchors == [\n        9, 9, 9, 9, 9\n    ]\n\n    # check approx generation\n    squares = ga_retina_head.square_anchor_generator.grid_anchors(\n        featmap_sizes, device)\n    assert len(squares) == 5\n\n    expected_squares = [\n        torch.Tensor([[-16., -16., 16., 16.]]),\n        torch.Tensor([[-32., -32., 32., 32]]),\n        torch.Tensor([[-64., -64., 64., 64.]]),\n        torch.Tensor([[-128., -128., 128., 128.]]),\n        torch.Tensor([[-256., -256., 256., 256.]])\n    ]\n    squares = ga_retina_head.square_anchor_generator.base_anchors\n    for i, base_anchor in enumerate(squares):\n        assert base_anchor.allclose(expected_squares[i])\n\n    # square_anchor_generator does not check valid flags\n    # check number of base anchors for each level\n    assert (ga_retina_head.square_anchor_generator.num_base_anchors == [\n        1, 1, 1, 1, 1\n    ])\n\n    # check square generation\n    anchors = ga_retina_head.square_anchor_generator.grid_anchors(\n        featmap_sizes, device)\n    assert len(anchors) == 5\n"
  },
  {
    "path": "tests/test_models/test_task_modules/test_samplers/test_pesudo_sampler.py",
    "content": "# TODO: follow up\n"
  },
  {
    "path": "tests/test_models/test_tta/test_det_tta.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest import TestCase\n\nimport torch\nfrom mmengine import ConfigDict\n\nfrom mmdet.models import DetTTAModel\nfrom mmdet.registry import MODELS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.testing import get_detector_cfg\nfrom mmdet.utils import register_all_modules\n\n\nclass TestDetTTAModel(TestCase):\n\n    def setUp(self):\n        register_all_modules()\n\n    def test_det_tta_model(self):\n\n        detector_cfg = get_detector_cfg(\n            'retinanet/retinanet_r18_fpn_1x_coco.py')\n        cfg = ConfigDict(\n            type='DetTTAModel',\n            module=detector_cfg,\n            tta_cfg=dict(\n                nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))\n\n        model: DetTTAModel = MODELS.build(cfg)\n\n        imgs = []\n        data_samples = []\n        directions = ['horizontal', 'vertical']\n        for i in range(12):\n            flip_direction = directions[0] if i % 3 == 0 else directions[1]\n            imgs.append(torch.randn(1, 3, 100 + 10 * i, 100 + 10 * i))\n            data_samples.append([\n                DetDataSample(\n                    metainfo=dict(\n                        ori_shape=(100, 100),\n                        img_shape=(100 + 10 * i, 100 + 10 * i),\n                        scale_factor=((100 + 10 * i) / 100,\n                                      (100 + 10 * i) / 100),\n                        flip=(i % 2 == 0),\n                        flip_direction=flip_direction), )\n            ])\n\n        model.test_step(dict(inputs=imgs, data_samples=data_samples))\n"
  },
  {
    "path": "tests/test_models/test_utils/test_misc.py",
    "content": "import copy\n\nimport pytest\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.models.utils import (empty_instances, filter_gt_instances,\n                                rename_loss_dict, reweight_loss_dict,\n                                unpack_gt_instances)\nfrom mmdet.testing import demo_mm_inputs\n\n\ndef test_parse_gt_instance_info():\n    packed_inputs = demo_mm_inputs()['data_samples']\n    batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \\\n        = unpack_gt_instances(packed_inputs)\n    assert len(batch_gt_instances) == len(packed_inputs)\n    assert len(batch_gt_instances_ignore) == len(packed_inputs)\n    assert len(batch_img_metas) == len(packed_inputs)\n\n\ndef test_process_empty_roi():\n    batch_size = 2\n    batch_img_metas = [{'ori_shape': (10, 12)}] * batch_size\n    device = torch.device('cpu')\n\n    results_list = empty_instances(batch_img_metas, device, task_type='bbox')\n    assert len(results_list) == batch_size\n    for results in results_list:\n        assert isinstance(results, InstanceData)\n        assert len(results) == 0\n        assert torch.allclose(results.bboxes, torch.zeros(0, 4, device=device))\n\n    results_list = empty_instances(\n        batch_img_metas,\n        device,\n        task_type='mask',\n        instance_results=results_list,\n        mask_thr_binary=0.5)\n    assert len(results_list) == batch_size\n    for results in results_list:\n        assert isinstance(results, InstanceData)\n        assert len(results) == 0\n        assert results.masks.shape == (0, 10, 12)\n\n    # batch_img_metas and instance_results length must be the same\n    with pytest.raises(AssertionError):\n        empty_instances(\n            batch_img_metas,\n            device,\n            task_type='mask',\n            instance_results=[results_list[0]] * 3)\n\n\ndef test_filter_gt_instances():\n    packed_inputs = demo_mm_inputs()['data_samples']\n    score_thr = 0.7\n    with pytest.raises(AssertionError):\n        filter_gt_instances(packed_inputs, score_thr=score_thr)\n\n    # filter no instances by score\n    for inputs in packed_inputs:\n        inputs.gt_instances.scores = torch.ones_like(\n            inputs.gt_instances.labels).float()\n    filtered_packed_inputs = filter_gt_instances(\n        copy.deepcopy(packed_inputs), score_thr=score_thr)\n    for filtered_inputs, inputs in zip(filtered_packed_inputs, packed_inputs):\n        assert len(filtered_inputs.gt_instances) == len(inputs.gt_instances)\n\n    # filter all instances\n    for inputs in packed_inputs:\n        inputs.gt_instances.scores = torch.zeros_like(\n            inputs.gt_instances.labels).float()\n    filtered_packed_inputs = filter_gt_instances(\n        copy.deepcopy(packed_inputs), score_thr=score_thr)\n    for filtered_inputs in filtered_packed_inputs:\n        assert len(filtered_inputs.gt_instances) == 0\n\n    packed_inputs = demo_mm_inputs()['data_samples']\n    # filter no instances by size\n    wh_thr = (0, 0)\n    filtered_packed_inputs = filter_gt_instances(\n        copy.deepcopy(packed_inputs), wh_thr=wh_thr)\n    for filtered_inputs, inputs in zip(filtered_packed_inputs, packed_inputs):\n        assert len(filtered_inputs.gt_instances) == len(inputs.gt_instances)\n\n    # filter all instances by size\n    for inputs in packed_inputs:\n        img_shape = inputs.img_shape\n        wh_thr = (max(wh_thr[0], img_shape[0]), max(wh_thr[1], img_shape[1]))\n    filtered_packed_inputs = filter_gt_instances(\n        copy.deepcopy(packed_inputs), wh_thr=wh_thr)\n    for filtered_inputs in filtered_packed_inputs:\n        assert len(filtered_inputs.gt_instances) == 0\n\n\ndef test_rename_loss_dict():\n    prefix = 'sup_'\n    losses = {'cls_loss': torch.tensor(2.), 'reg_loss': torch.tensor(1.)}\n    sup_losses = rename_loss_dict(prefix, losses)\n    for name in losses.keys():\n        assert sup_losses[prefix + name] == losses[name]\n\n\ndef test_reweight_loss_dict():\n    weight = 4\n    losses = {'cls_loss': torch.tensor(2.), 'reg_loss': torch.tensor(1.)}\n    weighted_losses = reweight_loss_dict(copy.deepcopy(losses), weight)\n    for name in losses.keys():\n        assert weighted_losses[name] == losses[name] * weight\n"
  },
  {
    "path": "tests/test_models/test_utils/test_model_misc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom torch.autograd import gradcheck\n\nfrom mmdet.models.utils import interpolate_as, sigmoid_geometric_mean\n\n\ndef test_interpolate_as():\n    source = torch.rand((1, 5, 4, 4))\n    target = torch.rand((1, 1, 16, 16))\n\n    # Test 4D source and target\n    result = interpolate_as(source, target)\n    assert result.shape == torch.Size((1, 5, 16, 16))\n\n    # Test 3D target\n    result = interpolate_as(source, target.squeeze(0))\n    assert result.shape == torch.Size((1, 5, 16, 16))\n\n    # Test 3D source\n    result = interpolate_as(source.squeeze(0), target)\n    assert result.shape == torch.Size((5, 16, 16))\n\n    # Test type(target) == np.ndarray\n    target = np.random.rand(16, 16)\n    result = interpolate_as(source.squeeze(0), target)\n    assert result.shape == torch.Size((5, 16, 16))\n\n\ndef test_sigmoid_geometric_mean():\n    x = torch.randn(20, 20, dtype=torch.double, requires_grad=True)\n    y = torch.randn(20, 20, dtype=torch.double, requires_grad=True)\n    inputs = (x, y)\n    test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4)\n    assert test\n"
  },
  {
    "path": "tests/test_structures/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_structures/test_bbox/__init__.py",
    "content": ""
  },
  {
    "path": "tests/test_structures/test_bbox/test_base_boxes.py",
    "content": "from unittest import TestCase\n\nimport numpy as np\nimport torch\nfrom mmengine.testing import assert_allclose\n\nfrom .utils import ToyBaseBoxes\n\n\nclass TestBaseBoxes(TestCase):\n\n    def test_init(self):\n        box_tensor = torch.rand((3, 4, 4))\n        boxes = ToyBaseBoxes(box_tensor)\n\n        boxes = ToyBaseBoxes(box_tensor, dtype=torch.float64)\n        self.assertEqual(boxes.tensor.dtype, torch.float64)\n\n        if torch.cuda.is_available():\n            boxes = ToyBaseBoxes(box_tensor, device='cuda')\n            self.assertTrue(boxes.tensor.is_cuda)\n\n        with self.assertRaises(AssertionError):\n            box_tensor = torch.rand((4, ))\n            boxes = ToyBaseBoxes(box_tensor)\n\n        with self.assertRaises(AssertionError):\n            box_tensor = torch.rand((3, 4, 3))\n            boxes = ToyBaseBoxes(box_tensor)\n\n    def test_getitem(self):\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n\n        # test single dimension index\n        # int\n        new_boxes = boxes[0]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (4, 4))\n        # list\n        new_boxes = boxes[[0, 2]]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))\n        # slice\n        new_boxes = boxes[0:2]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))\n        # torch.LongTensor\n        new_boxes = boxes[torch.LongTensor([0, 1])]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))\n        # torch.BoolTensor\n        new_boxes = boxes[torch.BoolTensor([True, False, True])]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))\n        with self.assertRaises(AssertionError):\n            index = torch.rand((2, 4, 4)) > 0\n            new_boxes = boxes[index]\n\n        # test multiple dimension index\n        # select single box\n        new_boxes = boxes[1, 2]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (1, 4))\n        # select the last dimension\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes[1, 2, 1]\n        # has Ellipsis\n        new_boxes = boxes[None, ...]\n        self.assertIsInstance(new_boxes, ToyBaseBoxes)\n        self.assertEqual(new_boxes.tensor.shape, (1, 3, 4, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes[..., None]\n\n    def test_setitem(self):\n        values = ToyBaseBoxes(torch.rand(3, 4, 4))\n        tensor = torch.rand(3, 4, 4)\n\n        # only support BaseBoxes type\n        with self.assertRaises(AssertionError):\n            boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n            boxes[0:2] = tensor[0:2]\n\n        # test single dimension index\n        # int\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        boxes[1] = values[1]\n        assert_allclose(boxes.tensor[1], values.tensor[1])\n        # list\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        boxes[[1, 2]] = values[[1, 2]]\n        assert_allclose(boxes.tensor[[1, 2]], values.tensor[[1, 2]])\n        # slice\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        boxes[0:2] = values[0:2]\n        assert_allclose(boxes.tensor[0:2], values.tensor[0:2])\n        # torch.BoolTensor\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        index = torch.rand(3, 4) > 0.5\n        boxes[index] = values[index]\n        assert_allclose(boxes.tensor[index], values.tensor[index])\n\n        # multiple dimension index\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        boxes[0:2, 0:2] = values[0:2, 0:2]\n        assert_allclose(boxes.tensor[0:2, 0:2], values.tensor[0:2, 0:2])\n        # select single box\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        boxes[1, 1] = values[1, 1]\n        assert_allclose(boxes.tensor[1, 1], values.tensor[1, 1])\n        # select the last dimension\n        with self.assertRaises(AssertionError):\n            boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n            boxes[1, 1, 1] = values[1, 1, 1]\n        # has Ellipsis\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        boxes[0:2, ...] = values[0:2, ...]\n        assert_allclose(boxes.tensor[0:2, ...], values.tensor[0:2, ...])\n\n    def test_tensor_like_functions(self):\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        # new_tensor\n        boxes.new_tensor([1, 2, 3])\n        # new_full\n        boxes.new_full((3, 4), 0)\n        # new_empty\n        boxes.new_empty((3, 4))\n        # new_ones\n        boxes.new_ones((3, 4))\n        # new_zeros\n        boxes.new_zeros((3, 4))\n        # size\n        self.assertEqual(boxes.size(0), 3)\n        self.assertEqual(tuple(boxes.size()), (3, 4, 4))\n        # dim\n        self.assertEqual(boxes.dim(), 3)\n        # device\n        self.assertIsInstance(boxes.device, torch.device)\n        # dtype\n        self.assertIsInstance(boxes.dtype, torch.dtype)\n        # numpy\n        np_boxes = boxes.numpy()\n        self.assertIsInstance(np_boxes, np.ndarray)\n        self.assertTrue((np_boxes == np_boxes).all())\n        # to\n        new_boxes = boxes.to(torch.uint8)\n        self.assertEqual(new_boxes.tensor.dtype, torch.uint8)\n        if torch.cuda.is_available():\n            new_boxes = boxes.to(device='cuda')\n            self.assertTrue(new_boxes.tensor.is_cuda)\n        # cpu\n        if torch.cuda.is_available():\n            new_boxes = boxes.to(device='cuda')\n            new_boxes = new_boxes.cpu()\n            self.assertFalse(new_boxes.tensor.is_cuda)\n        # cuda\n        if torch.cuda.is_available():\n            new_boxes = boxes.cuda()\n            self.assertTrue(new_boxes.tensor.is_cuda)\n        # clone\n        boxes.clone()\n        # detach\n        boxes.detach()\n        # view\n        new_boxes = boxes.view(12, 4)\n        self.assertEqual(tuple(new_boxes.size()), (12, 4))\n        new_boxes = boxes.view(-1, 4)\n        self.assertEqual(tuple(new_boxes.size()), (12, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.view(-1)\n        # reshape\n        new_boxes = boxes.reshape(12, 4)\n        self.assertEqual(tuple(new_boxes.size()), (12, 4))\n        new_boxes = boxes.reshape(-1, 4)\n        self.assertEqual(tuple(new_boxes.size()), (12, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.reshape(-1)\n        # expand\n        new_boxes = boxes[None, ...].expand(4, -1, -1, -1)\n        self.assertEqual(tuple(new_boxes.size()), (4, 3, 4, 4))\n        # repeat\n        new_boxes = boxes.repeat(2, 2, 1)\n        self.assertEqual(tuple(new_boxes.size()), (6, 8, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.repeat(2, 2, 2)\n        # transpose\n        new_boxes = boxes.transpose(0, 1)\n        self.assertEqual(tuple(new_boxes.size()), (4, 3, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.transpose(1, 2)\n        # permute\n        new_boxes = boxes.permute(1, 0, 2)\n        self.assertEqual(tuple(new_boxes.size()), (4, 3, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.permute(2, 1, 0)\n        # split\n        boxes_list = boxes.split(1, dim=0)\n        for box in boxes_list:\n            self.assertIsInstance(box, ToyBaseBoxes)\n            self.assertEqual(tuple(box.size()), (1, 4, 4))\n        boxes_list = boxes.split([1, 2], dim=0)\n        with self.assertRaises(AssertionError):\n            boxes_list = boxes.split(1, dim=2)\n        # chunk\n        boxes_list = boxes.split(3, dim=1)\n        self.assertEqual(len(boxes_list), 2)\n        for box in boxes_list:\n            self.assertIsInstance(box, ToyBaseBoxes)\n        with self.assertRaises(AssertionError):\n            boxes_list = boxes.split(3, dim=2)\n        # unbind\n        boxes_list = boxes.unbind(dim=1)\n        self.assertEqual(len(boxes_list), 4)\n        for box in boxes_list:\n            self.assertIsInstance(box, ToyBaseBoxes)\n            self.assertEqual(tuple(box.size()), (3, 4))\n        with self.assertRaises(AssertionError):\n            boxes_list = boxes.unbind(dim=2)\n        # flatten\n        new_boxes = boxes.flatten()\n        self.assertEqual(tuple(new_boxes.size()), (12, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.flatten(end_dim=2)\n        # squeeze\n        boxes = ToyBaseBoxes(torch.rand(1, 3, 1, 4, 4))\n        new_boxes = boxes.squeeze()\n        self.assertEqual(tuple(new_boxes.size()), (3, 4, 4))\n        new_boxes = boxes.squeeze(dim=2)\n        self.assertEqual(tuple(new_boxes.size()), (1, 3, 4, 4))\n        # unsqueeze\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        new_boxes = boxes.unsqueeze(0)\n        self.assertEqual(tuple(new_boxes.size()), (1, 3, 4, 4))\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.unsqueeze(3)\n        # cat\n        with self.assertRaises(ValueError):\n            ToyBaseBoxes.cat([])\n        box_list = []\n        box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))\n        box_list.append(ToyBaseBoxes(torch.rand(1, 4, 4)))\n        with self.assertRaises(AssertionError):\n            ToyBaseBoxes.cat(box_list, dim=2)\n        cat_boxes = ToyBaseBoxes.cat(box_list, dim=0)\n        self.assertIsInstance(cat_boxes, ToyBaseBoxes)\n        self.assertEqual((cat_boxes.size()), (4, 4, 4))\n        # stack\n        with self.assertRaises(ValueError):\n            ToyBaseBoxes.stack([])\n        box_list = []\n        box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))\n        box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))\n        with self.assertRaises(AssertionError):\n            ToyBaseBoxes.stack(box_list, dim=3)\n        stack_boxes = ToyBaseBoxes.stack(box_list, dim=1)\n        self.assertIsInstance(stack_boxes, ToyBaseBoxes)\n        self.assertEqual((stack_boxes.size()), (3, 2, 4, 4))\n\n    def test_misc(self):\n        boxes = ToyBaseBoxes(torch.rand(3, 4, 4))\n        # __len__\n        self.assertEqual(len(boxes), 3)\n        # __repr__\n        repr(boxes)\n        # fake_boxes\n        new_boxes = boxes.fake_boxes((3, 4, 4), 1)\n        self.assertEqual(tuple(new_boxes.size()), (3, 4, 4))\n        self.assertEqual(boxes.dtype, new_boxes.dtype)\n        self.assertEqual(boxes.device, new_boxes.device)\n        self.assertTrue((new_boxes.tensor == 1).all())\n        with self.assertRaises(AssertionError):\n            new_boxes = boxes.fake_boxes((3, 4, 1))\n        new_boxes = boxes.fake_boxes((3, 4, 4), dtype=torch.uint8)\n        self.assertEqual(new_boxes.dtype, torch.uint8)\n        if torch.cuda.is_available():\n            new_boxes = boxes.fake_boxes((3, 4, 4), device='cuda')\n            self.assertTrue(new_boxes.tensor.is_cuda)\n"
  },
  {
    "path": "tests/test_structures/test_bbox/test_box_type.py",
    "content": "from unittest import TestCase\nfrom unittest.mock import MagicMock\n\nimport torch\n\nfrom mmdet.structures.bbox.box_type import (_box_type_to_name, box_converters,\n                                            box_types, convert_box_type,\n                                            get_box_type, register_box,\n                                            register_box_converter)\nfrom .utils import ToyBaseBoxes\n\n\nclass TestBoxType(TestCase):\n\n    def setUp(self):\n        self.box_types = box_types.copy()\n        self.box_converters = box_converters.copy()\n        self._box_type_to_name = _box_type_to_name.copy()\n\n    def tearDown(self):\n        # Clear registered items\n        box_types.clear()\n        box_converters.clear()\n        _box_type_to_name.clear()\n        # Restore original items\n        box_types.update(self.box_types)\n        box_converters.update(self.box_converters)\n        _box_type_to_name.update(self._box_type_to_name)\n\n    def test_register_box(self):\n        # test usage of decorator\n        @register_box('A')\n        class A(ToyBaseBoxes):\n            pass\n\n        # test usage of normal function\n        class B(ToyBaseBoxes):\n            pass\n\n        register_box('B', B)\n\n        # register class without inheriting from BaseBoxes\n        with self.assertRaises(AssertionError):\n\n            @register_box('C')\n            class C:\n                pass\n\n        # test register registered class\n        with self.assertRaises(KeyError):\n\n            @register_box('A')\n            class AA(ToyBaseBoxes):\n                pass\n\n        with self.assertRaises(KeyError):\n            register_box('BB', B)\n\n        @register_box('A', force=True)\n        class AAA(ToyBaseBoxes):\n            pass\n\n        self.assertIs(box_types['a'], AAA)\n        self.assertEqual(_box_type_to_name[AAA], 'a')\n        register_box('BB', B, force=True)\n        self.assertIs(box_types['bb'], B)\n        self.assertEqual(_box_type_to_name[B], 'bb')\n        self.assertEqual(len(box_types), len(_box_type_to_name))\n\n    def test_register_box_converter(self):\n\n        @register_box('A')\n        class A(ToyBaseBoxes):\n            pass\n\n        @register_box('B')\n        class B(ToyBaseBoxes):\n            pass\n\n        @register_box('C')\n        class C(ToyBaseBoxes):\n            pass\n\n        # test usage of decorator\n        @register_box_converter('A', 'B')\n        def converter_A(bboxes):\n            return bboxes\n\n        # test usage of normal function\n        def converter_B(bboxes):\n            return bboxes\n\n        register_box_converter('B'\n                               'A', converter_B)\n\n        # register uncallable object\n        with self.assertRaises(AssertionError):\n            register_box_converter('A', 'C', 'uncallable str')\n\n        # test register unregistered bbox mode\n        with self.assertRaises(AssertionError):\n\n            @register_box_converter('A', 'D')\n            def converter_C(bboxes):\n                return bboxes\n\n        # test register registered converter\n        with self.assertRaises(KeyError):\n\n            @register_box_converter('A', 'B')\n            def converter_D(bboxes):\n                return bboxes\n\n        @register_box_converter('A', 'B', force=True)\n        def converter_E(bboxes):\n            return bboxes\n\n        self.assertIs(box_converters['a2b'], converter_E)\n\n    def test_get_box_type(self):\n\n        @register_box('A')\n        class A(ToyBaseBoxes):\n            pass\n\n        mode_name, mode_cls = get_box_type('A')\n        self.assertEqual(mode_name, 'a')\n        self.assertIs(mode_cls, A)\n        mode_name, mode_cls = get_box_type(A)\n        self.assertEqual(mode_name, 'a')\n        self.assertIs(mode_cls, A)\n\n        # get unregistered mode\n        class B(ToyBaseBoxes):\n            pass\n\n        with self.assertRaises(AssertionError):\n            mode_name, mode_cls = get_box_type('B')\n        with self.assertRaises(AssertionError):\n            mode_name, mode_cls = get_box_type(B)\n\n    def test_convert_box_type(self):\n\n        @register_box('A')\n        class A(ToyBaseBoxes):\n            pass\n\n        @register_box('B')\n        class B(ToyBaseBoxes):\n            pass\n\n        @register_box('C')\n        class C(ToyBaseBoxes):\n            pass\n\n        converter = MagicMock()\n        converter.return_value = torch.rand(3, 4, 4)\n        register_box_converter('A', 'B', converter)\n\n        bboxes_a = A(torch.rand(3, 4, 4))\n        th_bboxes_a = bboxes_a.tensor\n        np_bboxes_a = th_bboxes_a.numpy()\n\n        # test convert to mode\n        convert_box_type(bboxes_a, dst_type='B')\n        self.assertTrue(converter.called)\n        converted_bboxes = convert_box_type(bboxes_a, dst_type='A')\n        self.assertIs(converted_bboxes, bboxes_a)\n        # test convert to unregistered mode\n        with self.assertRaises(AssertionError):\n            convert_box_type(bboxes_a, dst_type='C')\n\n        # test convert tensor and ndarray\n        # without specific src_type\n        with self.assertRaises(AssertionError):\n            convert_box_type(th_bboxes_a, dst_type='B')\n        with self.assertRaises(AssertionError):\n            convert_box_type(np_bboxes_a, dst_type='B')\n        # test np.ndarray\n        convert_box_type(np_bboxes_a, src_type='A', dst_type='B')\n        converted_bboxes = convert_box_type(\n            np_bboxes_a, src_type='A', dst_type='A')\n        self.assertIs(converted_bboxes, np_bboxes_a)\n        # test tensor\n        convert_box_type(th_bboxes_a, src_type='A', dst_type='B')\n        converted_bboxes = convert_box_type(\n            th_bboxes_a, src_type='A', dst_type='A')\n        self.assertIs(converted_bboxes, th_bboxes_a)\n        # test other type\n        with self.assertRaises(TypeError):\n            convert_box_type([[1, 2, 3, 4]], src_type='A', dst_type='B')\n"
  },
  {
    "path": "tests/test_structures/test_bbox/test_horizontal_boxes.py",
    "content": "import random\nfrom math import sqrt\nfrom unittest import TestCase\n\nimport cv2\nimport numpy as np\nimport torch\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.structures.bbox import HorizontalBoxes\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\n\n\nclass TestHorizontalBoxes(TestCase):\n\n    def test_init(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        th_boxes_cxcywh = torch.Tensor([15, 15, 10, 10]).reshape(1, 1, 4)\n\n        boxes = HorizontalBoxes(th_boxes)\n        assert_allclose(boxes.tensor, th_boxes)\n        boxes = HorizontalBoxes(th_boxes, in_mode='xyxy')\n        assert_allclose(boxes.tensor, th_boxes)\n        boxes = HorizontalBoxes(th_boxes_cxcywh, in_mode='cxcywh')\n        assert_allclose(boxes.tensor, th_boxes)\n        with self.assertRaises(ValueError):\n            boxes = HorizontalBoxes(th_boxes, in_mode='invalid')\n\n    def test_cxcywh(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        th_boxes_cxcywh = torch.Tensor([15, 15, 10, 10]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n\n        assert_allclose(\n            HorizontalBoxes.xyxy_to_cxcywh(th_boxes), th_boxes_cxcywh)\n        assert_allclose(th_boxes,\n                        HorizontalBoxes.cxcywh_to_xyxy(th_boxes_cxcywh))\n        assert_allclose(boxes.cxcywh, th_boxes_cxcywh)\n\n    def test_propoerty(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n\n        # Centers\n        centers = torch.Tensor([15, 15]).reshape(1, 1, 2)\n        assert_allclose(boxes.centers, centers)\n        # Areas\n        areas = torch.Tensor([100]).reshape(1, 1)\n        assert_allclose(boxes.areas, areas)\n        # widths\n        widths = torch.Tensor([10]).reshape(1, 1)\n        assert_allclose(boxes.widths, widths)\n        # heights\n        heights = torch.Tensor([10]).reshape(1, 1)\n        assert_allclose(boxes.heights, heights)\n\n    def test_flip(self):\n        img_shape = [50, 85]\n        # horizontal flip\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        flipped_boxes_th = torch.Tensor([65, 10, 75, 20]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.flip_(img_shape, direction='horizontal')\n        assert_allclose(boxes.tensor, flipped_boxes_th)\n        # vertical flip\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        flipped_boxes_th = torch.Tensor([10, 30, 20, 40]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.flip_(img_shape, direction='vertical')\n        assert_allclose(boxes.tensor, flipped_boxes_th)\n        # diagonal flip\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        flipped_boxes_th = torch.Tensor([65, 30, 75, 40]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.flip_(img_shape, direction='diagonal')\n        assert_allclose(boxes.tensor, flipped_boxes_th)\n\n    def test_translate(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.translate_([23, 46])\n        translated_boxes_th = torch.Tensor([33, 56, 43, 66]).reshape(1, 1, 4)\n        assert_allclose(boxes.tensor, translated_boxes_th)\n\n    def test_clip(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        img_shape = [13, 14]\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.clip_(img_shape)\n        cliped_boxes_th = torch.Tensor([10, 10, 14, 13]).reshape(1, 1, 4)\n        assert_allclose(boxes.tensor, cliped_boxes_th)\n\n    def test_rotate(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        center = (15, 15)\n        angle = -45\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.rotate_(center, angle)\n        rotated_boxes_th = torch.Tensor([\n            15 - 5 * sqrt(2), 15 - 5 * sqrt(2), 15 + 5 * sqrt(2),\n            15 + 5 * sqrt(2)\n        ]).reshape(1, 1, 4)\n        assert_allclose(boxes.tensor, rotated_boxes_th)\n\n    def test_project(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        boxes1 = HorizontalBoxes(th_boxes)\n        boxes2 = boxes1.clone()\n\n        matrix = np.zeros((3, 3), dtype=np.float32)\n        center = [random.random() * 80, random.random() * 80]\n        angle = random.random() * 180\n        matrix[:2, :3] = cv2.getRotationMatrix2D(center, angle, 1)\n        x_translate = random.random() * 40\n        y_translate = random.random() * 40\n        matrix[0, 2] = matrix[0, 2] + x_translate\n        matrix[1, 2] = matrix[1, 2] + y_translate\n        scale_factor = random.random() * 2\n        matrix[2, 2] = 1 / scale_factor\n        boxes1.project_(matrix)\n\n        boxes2.rotate_(center, -angle)\n        boxes2.translate_([x_translate, y_translate])\n        boxes2.rescale_([scale_factor, scale_factor])\n        assert_allclose(boxes1.tensor, boxes2.tensor)\n        # test empty boxes\n        empty_boxes = HorizontalBoxes(torch.zeros((0, 4)))\n        empty_boxes.project_(matrix)\n\n    def test_rescale(self):\n        scale_factor = [0.4, 0.8]\n        # rescale\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.rescale_(scale_factor)\n        rescaled_boxes_th = torch.Tensor([4, 8, 8, 16]).reshape(1, 1, 4)\n        assert_allclose(boxes.tensor, rescaled_boxes_th)\n\n    def test_resize(self):\n        scale_factor = [0.4, 0.8]\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        boxes.resize_(scale_factor)\n        resized_boxes_th = torch.Tensor([13, 11, 17, 19]).reshape(1, 1, 4)\n        assert_allclose(boxes.tensor, resized_boxes_th)\n\n    def test_is_inside(self):\n        th_boxes = torch.Tensor([[10, 10, 20, 20], [-5, -5, 15, 15],\n                                 [45, 45, 55, 55]]).reshape(1, 3, 4)\n        img_shape = [30, 30]\n        boxes = HorizontalBoxes(th_boxes)\n\n        index = boxes.is_inside(img_shape)\n        index_th = torch.BoolTensor([True, True, False]).reshape(1, 3)\n        assert_allclose(index, index_th)\n\n    def test_find_inside_points(self):\n        th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 4)\n        boxes = HorizontalBoxes(th_boxes)\n        points = torch.Tensor([[0, 0], [0, 15], [15, 0], [15, 15]])\n        index = boxes.find_inside_points(points)\n        index_th = torch.BoolTensor([False, False, False, True]).reshape(4, 1)\n        assert_allclose(index, index_th)\n        # is_aligned\n        boxes = boxes.expand(4, 4)\n        index = boxes.find_inside_points(points, is_aligned=True)\n        index_th = torch.BoolTensor([False, False, False, True])\n        assert_allclose(index, index_th)\n\n    def test_from_instance_masks(self):\n        bitmap_masks = BitmapMasks.random()\n        boxes = HorizontalBoxes.from_instance_masks(bitmap_masks)\n        self.assertIsInstance(boxes, HorizontalBoxes)\n        self.assertEqual(len(boxes), len(bitmap_masks))\n        polygon_masks = PolygonMasks.random()\n        boxes = HorizontalBoxes.from_instance_masks(polygon_masks)\n        self.assertIsInstance(boxes, HorizontalBoxes)\n        self.assertEqual(len(boxes), len(bitmap_masks))\n        # zero length masks\n        bitmap_masks = BitmapMasks.random(num_masks=0)\n        boxes = HorizontalBoxes.from_instance_masks(bitmap_masks)\n        self.assertIsInstance(boxes, HorizontalBoxes)\n        self.assertEqual(len(boxes), 0)\n        polygon_masks = PolygonMasks.random(num_masks=0)\n        boxes = HorizontalBoxes.from_instance_masks(polygon_masks)\n        self.assertIsInstance(boxes, HorizontalBoxes)\n        self.assertEqual(len(boxes), 0)\n"
  },
  {
    "path": "tests/test_structures/test_bbox/utils.py",
    "content": "from mmdet.structures.bbox import BaseBoxes\n\n\nclass ToyBaseBoxes(BaseBoxes):\n\n    box_dim = 4\n\n    @property\n    def centers(self):\n        pass\n\n    @property\n    def areas(self):\n        pass\n\n    @property\n    def widths(self):\n        pass\n\n    @property\n    def heights(self):\n        pass\n\n    def flip_(self, img_shape, direction='horizontal'):\n        pass\n\n    def translate_(self, distances):\n        pass\n\n    def clip_(self, img_shape):\n        pass\n\n    def rotate_(self, center, angle):\n        pass\n\n    def project_(self, homography_matrix):\n        pass\n\n    def rescale_(self, scale_factor):\n        pass\n\n    def resize_(self, scale_factor):\n        pass\n\n    def is_inside(self, img_shape):\n        pass\n\n    def find_inside_points(self, points, is_aligned=False):\n        pass\n\n    def overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n        pass\n\n    def from_instance_masks(masks):\n        pass\n"
  },
  {
    "path": "tests/test_structures/test_det_data_sample.py",
    "content": "from unittest import TestCase\n\nimport numpy as np\nimport pytest\nimport torch\nfrom mmengine.structures import InstanceData, PixelData\n\nfrom mmdet.structures import DetDataSample\n\n\ndef _equal(a, b):\n    if isinstance(a, (torch.Tensor, np.ndarray)):\n        return (a == b).all()\n    else:\n        return a == b\n\n\nclass TestDetDataSample(TestCase):\n\n    def test_init(self):\n        meta_info = dict(\n            img_size=[256, 256],\n            scale_factor=np.array([1.5, 1.5]),\n            img_shape=torch.rand(4))\n\n        det_data_sample = DetDataSample(metainfo=meta_info)\n        assert 'img_size' in det_data_sample\n        assert det_data_sample.img_size == [256, 256]\n        assert det_data_sample.get('img_size') == [256, 256]\n\n    def test_setter(self):\n        det_data_sample = DetDataSample()\n        # test gt_instances\n        gt_instances_data = dict(\n            bboxes=torch.rand(4, 4),\n            labels=torch.rand(4),\n            masks=np.random.rand(4, 2, 2))\n        gt_instances = InstanceData(**gt_instances_data)\n        det_data_sample.gt_instances = gt_instances\n        assert 'gt_instances' in det_data_sample\n        assert _equal(det_data_sample.gt_instances.bboxes,\n                      gt_instances_data['bboxes'])\n        assert _equal(det_data_sample.gt_instances.labels,\n                      gt_instances_data['labels'])\n        assert _equal(det_data_sample.gt_instances.masks,\n                      gt_instances_data['masks'])\n\n        # test pred_instances\n        pred_instances_data = dict(\n            bboxes=torch.rand(2, 4),\n            labels=torch.rand(2),\n            masks=np.random.rand(2, 2, 2))\n        pred_instances = InstanceData(**pred_instances_data)\n        det_data_sample.pred_instances = pred_instances\n        assert 'pred_instances' in det_data_sample\n        assert _equal(det_data_sample.pred_instances.bboxes,\n                      pred_instances_data['bboxes'])\n        assert _equal(det_data_sample.pred_instances.labels,\n                      pred_instances_data['labels'])\n        assert _equal(det_data_sample.pred_instances.masks,\n                      pred_instances_data['masks'])\n\n        # test proposals\n        proposals_data = dict(bboxes=torch.rand(4, 4), labels=torch.rand(4))\n        proposals = InstanceData(**proposals_data)\n        det_data_sample.proposals = proposals\n        assert 'proposals' in det_data_sample\n        assert _equal(det_data_sample.proposals.bboxes,\n                      proposals_data['bboxes'])\n        assert _equal(det_data_sample.proposals.labels,\n                      proposals_data['labels'])\n\n        # test ignored_instances\n        ignored_instances_data = dict(\n            bboxes=torch.rand(4, 4), labels=torch.rand(4))\n        ignored_instances = InstanceData(**ignored_instances_data)\n        det_data_sample.ignored_instances = ignored_instances\n        assert 'ignored_instances' in det_data_sample\n        assert _equal(det_data_sample.ignored_instances.bboxes,\n                      ignored_instances_data['bboxes'])\n        assert _equal(det_data_sample.ignored_instances.labels,\n                      ignored_instances_data['labels'])\n\n        # test gt_panoptic_seg\n        gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(5, 4))\n        gt_panoptic_seg = PixelData(**gt_panoptic_seg_data)\n        det_data_sample.gt_panoptic_seg = gt_panoptic_seg\n        assert 'gt_panoptic_seg' in det_data_sample\n        assert _equal(det_data_sample.gt_panoptic_seg.panoptic_seg,\n                      gt_panoptic_seg_data['panoptic_seg'])\n\n        # test pred_panoptic_seg\n        pred_panoptic_seg_data = dict(panoptic_seg=torch.rand(5, 4))\n        pred_panoptic_seg = PixelData(**pred_panoptic_seg_data)\n        det_data_sample.pred_panoptic_seg = pred_panoptic_seg\n        assert 'pred_panoptic_seg' in det_data_sample\n        assert _equal(det_data_sample.pred_panoptic_seg.panoptic_seg,\n                      pred_panoptic_seg_data['panoptic_seg'])\n\n        # test gt_sem_seg\n        gt_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))\n        gt_segm_seg = PixelData(**gt_segm_seg_data)\n        det_data_sample.gt_segm_seg = gt_segm_seg\n        assert 'gt_segm_seg' in det_data_sample\n        assert _equal(det_data_sample.gt_segm_seg.segm_seg,\n                      gt_segm_seg_data['segm_seg'])\n\n        # test pred_segm_seg\n        pred_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))\n        pred_segm_seg = PixelData(**pred_segm_seg_data)\n        det_data_sample.pred_segm_seg = pred_segm_seg\n        assert 'pred_segm_seg' in det_data_sample\n        assert _equal(det_data_sample.pred_segm_seg.segm_seg,\n                      pred_segm_seg_data['segm_seg'])\n\n        # test type error\n        with pytest.raises(AssertionError):\n            det_data_sample.pred_instances = torch.rand(2, 4)\n\n        with pytest.raises(AssertionError):\n            det_data_sample.pred_panoptic_seg = torch.rand(2, 4)\n\n        with pytest.raises(AssertionError):\n            det_data_sample.pred_sem_seg = torch.rand(2, 4)\n\n    def test_deleter(self):\n        gt_instances_data = dict(\n            bboxes=torch.rand(4, 4),\n            labels=torch.rand(4),\n            masks=np.random.rand(4, 2, 2))\n\n        det_data_sample = DetDataSample()\n        gt_instances = InstanceData(data=gt_instances_data)\n        det_data_sample.gt_instances = gt_instances\n        assert 'gt_instances' in det_data_sample\n        del det_data_sample.gt_instances\n        assert 'gt_instances' not in det_data_sample\n\n        pred_panoptic_seg_data = torch.rand(5, 4)\n        pred_panoptic_seg = PixelData(data=pred_panoptic_seg_data)\n        det_data_sample.pred_panoptic_seg = pred_panoptic_seg\n        assert 'pred_panoptic_seg' in det_data_sample\n        del det_data_sample.pred_panoptic_seg\n        assert 'pred_panoptic_seg' not in det_data_sample\n\n        pred_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))\n        pred_segm_seg = PixelData(**pred_segm_seg_data)\n        det_data_sample.pred_segm_seg = pred_segm_seg\n        assert 'pred_segm_seg' in det_data_sample\n        del det_data_sample.pred_segm_seg\n        assert 'pred_segm_seg' not in det_data_sample\n"
  },
  {
    "path": "tests/test_structures/test_mask/test_mask_structures.py",
    "content": "from unittest import TestCase\n\nimport numpy as np\nfrom mmengine.testing import assert_allclose\n\nfrom mmdet.structures.mask import BitmapMasks, PolygonMasks\n\n\nclass TestMaskStructures(TestCase):\n\n    def test_bitmap_translate_same_size(self):\n        mask_array = np.zeros((5, 10, 10), dtype=np.uint8)\n        mask_array[:, 0:5, 0:5] = 1\n        mask_target = np.zeros((5, 10, 10), dtype=np.uint8)\n        mask_target[:, 0:5, 5:10] = 1\n\n        mask = BitmapMasks(mask_array, 10, 10)\n        mask = mask.translate((10, 10), 5)\n        assert mask.masks.shape == (5, 10, 10)\n        assert_allclose(mask_target, mask.masks)\n\n    def test_bitmap_translate_diff_size(self):\n        # test out shape larger\n        mask_array = np.zeros((5, 10, 10), dtype=np.uint8)\n        mask_array[:, 0:5, 0:5] = 1\n\n        mask_target = np.zeros((5, 20, 20), dtype=np.uint8)\n        mask_target[:, 0:5, 5:10] = 1\n        mask = BitmapMasks(mask_array, 10, 10)\n        mask = mask.translate((20, 20), 5)\n        assert mask.masks.shape == (5, 20, 20)\n        assert_allclose(mask_target, mask.masks)\n\n        # test out shape smaller\n        mask_array = np.zeros((5, 10, 10), dtype=np.uint8)\n        mask_array[:, 0:5, 0:5] = 1\n\n        mask_target = np.zeros((5, 20, 8), dtype=np.uint8)\n        mask_target[:, 0:5, 5:] = 1\n        mask = BitmapMasks(mask_array, 10, 10)\n        mask = mask.translate((20, 8), 5)\n        assert mask.masks.shape == (5, 20, 8)\n        assert_allclose(mask_target, mask.masks)\n\n    def test_bitmap_cat(self):\n        # test invalid inputs\n        with self.assertRaises(AssertionError):\n            BitmapMasks.cat(BitmapMasks.random(4))\n        with self.assertRaises(ValueError):\n            BitmapMasks.cat([])\n        with self.assertRaises(AssertionError):\n            BitmapMasks.cat([BitmapMasks.random(2), PolygonMasks.random(3)])\n\n        masks = [BitmapMasks.random(num_masks=3) for _ in range(5)]\n        cat_mask = BitmapMasks.cat(masks)\n        assert len(cat_mask) == 3 * 5\n        for i, m in enumerate(masks):\n            assert_allclose(m.masks, cat_mask.masks[i * 3:(i + 1) * 3])\n\n    def test_polygon_cat(self):\n        # test invalid inputs\n        with self.assertRaises(AssertionError):\n            PolygonMasks.cat(PolygonMasks.random(4))\n        with self.assertRaises(ValueError):\n            PolygonMasks.cat([])\n        with self.assertRaises(AssertionError):\n            PolygonMasks.cat([BitmapMasks.random(2), PolygonMasks.random(3)])\n\n        masks = [PolygonMasks.random(num_masks=3) for _ in range(5)]\n        cat_mask = PolygonMasks.cat(masks)\n        assert len(cat_mask) == 3 * 5\n        for i, m in enumerate(masks):\n            assert_allclose(m.masks, cat_mask.masks[i * 3:(i + 1) * 3])\n"
  },
  {
    "path": "tests/test_utils/test_benchmark.py",
    "content": "import copy\nimport os\nimport tempfile\nimport unittest\n\nimport torch\nimport torch.nn as nn\nfrom mmengine import Config, MMLogger\nfrom mmengine.dataset import Compose\nfrom torch.utils.data import Dataset\n\nfrom mmdet.registry import DATASETS, MODELS\nfrom mmdet.utils import register_all_modules\nfrom mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,\n                                   InferenceBenchmark)\n\n\n@MODELS.register_module()\nclass ToyDetector(nn.Module):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__()\n\n    def forward(self, data_batch, return_loss=False):\n        pass\n\n\n@DATASETS.register_module()\nclass ToyDataset(Dataset):\n    METAINFO = dict()  # type: ignore\n    data = torch.randn(12, 2)\n    label = torch.ones(12)\n\n    def __init__(self):\n        self.pipeline = Compose([lambda x: x])\n\n    def __len__(self):\n        return self.data.size(0)\n\n    def get_data_info(self, index):\n        return dict(inputs=self.data[index], data_sample=self.label[index])\n\n    def __getitem__(self, index):\n        return dict(inputs=self.data[index], data_sample=self.label[index])\n\n\n@DATASETS.register_module()\nclass ToyFullInitDataset(Dataset):\n    METAINFO = dict()  # type: ignore\n    data = torch.randn(12, 2)\n    label = torch.ones(12)\n\n    def __init__(self):\n        self.pipeline = Compose([lambda x: x])\n\n    def __len__(self):\n        return self.data.size(0)\n\n    def get_data_info(self, index):\n        return dict(inputs=self.data[index], data_sample=self.label[index])\n\n    def full_init(self):\n        pass\n\n    def __getitem__(self, index):\n        return dict(inputs=self.data[index], data_sample=self.label[index])\n\n\nclass TestInferenceBenchmark(unittest.TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n\n        self.cfg = Config(\n            dict(\n                model=dict(type='ToyDetector'),\n                test_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=False),\n                    batch_size=3,\n                    num_workers=1),\n                env_cfg=dict(dist_cfg=dict(backend='nccl'))))\n        self.max_iter = 10\n        self.log_interval = 5\n\n    @unittest.skipIf(not torch.cuda.is_available(),\n                     'test requires GPU and torch+cuda')\n    def test_init_and_run(self):\n        checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')\n        torch.save(ToyDetector().state_dict(), checkpoint_path)\n\n        cfg = copy.deepcopy(self.cfg)\n        inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,\n                                                 False, self.max_iter,\n                                                 self.log_interval)\n        results = inference_benchmark.run()\n\n        self.assertTrue(isinstance(results, dict))\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 1)\n        self.assertTrue(inference_benchmark.data_loader.num_workers == 0)\n        self.assertTrue(inference_benchmark.data_loader.batch_size == 1)\n\n        results = inference_benchmark.run(1)\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 1)\n        self.assertTrue(inference_benchmark.data_loader.num_workers == 0)\n        self.assertTrue(inference_benchmark.data_loader.batch_size == 1)\n\n        # test repeat\n        results = inference_benchmark.run(3)\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 3)\n\n        # test cudnn_benchmark\n        cfg = copy.deepcopy(self.cfg)\n        cfg.env_cfg.cudnn_benchmark = True\n        inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,\n                                                 False, self.max_iter,\n                                                 self.log_interval)\n        inference_benchmark.run(1)\n\n        # test mp_cfg\n        cfg = copy.deepcopy(self.cfg)\n        cfg.env_cfg.cudnn_benchmark = True\n        cfg.env_cfg.mp_cfg = {\n            'mp_start_method': 'fork',\n            'opencv_num_threads': 1\n        }\n        inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,\n                                                 False, self.max_iter,\n                                                 self.log_interval)\n        inference_benchmark.run(1)\n\n        # test fp16\n        cfg = copy.deepcopy(self.cfg)\n        cfg.fp16 = True\n        inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,\n                                                 False, self.max_iter,\n                                                 self.log_interval)\n        inference_benchmark.run(1)\n\n        # test logger\n        logger = MMLogger.get_instance(\n            'mmdet', log_file='temp.log', log_level='INFO')\n        inference_benchmark = InferenceBenchmark(\n            cfg,\n            checkpoint_path,\n            False,\n            False,\n            self.max_iter,\n            self.log_interval,\n            logger=logger)\n        inference_benchmark.run(1)\n        self.assertTrue(os.path.exists('temp.log'))\n\n        os.remove(checkpoint_path)\n        os.remove('temp.log')\n\n\nclass TestDataLoaderBenchmark(unittest.TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n\n        self.cfg = Config(\n            dict(\n                model=dict(type='ToyDetector'),\n                train_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=True),\n                    batch_size=2,\n                    num_workers=1),\n                val_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=False),\n                    batch_size=1,\n                    num_workers=2),\n                test_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=False),\n                    batch_size=3,\n                    num_workers=1),\n                env_cfg=dict(dist_cfg=dict(backend='nccl'))))\n        self.max_iter = 5\n        self.log_interval = 1\n        self.num_warmup = 1\n\n    def test_init_and_run(self):\n        cfg = copy.deepcopy(self.cfg)\n        dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',\n                                                   self.max_iter,\n                                                   self.log_interval,\n                                                   self.num_warmup)\n        results = dataloader_benchmark.run(1)\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 1)\n        self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)\n        self.assertTrue(dataloader_benchmark.data_loader.batch_size == 2)\n\n        # test repeat\n        results = dataloader_benchmark.run(3)\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 3)\n\n        # test dataset_type input parameters error\n        with self.assertRaises(AssertionError):\n            DataLoaderBenchmark(cfg, False, 'training', self.max_iter,\n                                self.log_interval, self.num_warmup)\n\n        dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'val',\n                                                   self.max_iter,\n                                                   self.log_interval,\n                                                   self.num_warmup)\n        self.assertTrue(dataloader_benchmark.data_loader.num_workers == 2)\n        self.assertTrue(dataloader_benchmark.data_loader.batch_size == 1)\n\n        dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'test',\n                                                   self.max_iter,\n                                                   self.log_interval,\n                                                   self.num_warmup)\n        self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)\n        self.assertTrue(dataloader_benchmark.data_loader.batch_size == 3)\n\n        # test mp_cfg\n        cfg = copy.deepcopy(self.cfg)\n        cfg.env_cfg.mp_cfg = {\n            'mp_start_method': 'fork',\n            'opencv_num_threads': 1\n        }\n        dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',\n                                                   self.max_iter,\n                                                   self.log_interval,\n                                                   self.num_warmup)\n        dataloader_benchmark.run(1)\n\n\nclass TestDatasetBenchmark(unittest.TestCase):\n\n    def setUp(self) -> None:\n        register_all_modules()\n\n        self.cfg = Config(\n            dict(\n                model=dict(type='ToyDetector'),\n                train_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=True),\n                    batch_size=2,\n                    num_workers=1),\n                val_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=False),\n                    batch_size=1,\n                    num_workers=2),\n                test_dataloader=dict(\n                    dataset=dict(type='ToyDataset'),\n                    sampler=dict(type='DefaultSampler', shuffle=False),\n                    batch_size=3,\n                    num_workers=1)))\n        self.max_iter = 5\n        self.log_interval = 1\n        self.num_warmup = 1\n\n    def test_init_and_run(self):\n        cfg = copy.deepcopy(self.cfg)\n        dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,\n                                             self.log_interval,\n                                             self.num_warmup)\n        results = dataset_benchmark.run(1)\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 1)\n\n        # test repeat\n        results = dataset_benchmark.run(3)\n        self.assertTrue('avg_fps' in results)\n        self.assertTrue('fps_list' in results)\n        self.assertEqual(len(results['fps_list']), 3)\n\n        # test test dataset\n        dataset_benchmark = DatasetBenchmark(cfg, 'test', self.max_iter,\n                                             self.log_interval,\n                                             self.num_warmup)\n        dataset_benchmark.run(1)\n\n        # test val dataset\n        dataset_benchmark = DatasetBenchmark(cfg, 'val', self.max_iter,\n                                             self.log_interval,\n                                             self.num_warmup)\n        dataset_benchmark.run(1)\n\n        # test dataset_type input parameters error\n        with self.assertRaises(AssertionError):\n            DatasetBenchmark(cfg, 'training', self.max_iter, self.log_interval,\n                             self.num_warmup)\n\n        # test full_init\n        cfg = copy.deepcopy(self.cfg)\n        cfg.test_dataloader.dataset = dict(type='ToyFullInitDataset')\n        dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,\n                                             self.log_interval,\n                                             self.num_warmup)\n        dataset_benchmark.run(1)\n"
  },
  {
    "path": "tests/test_utils/test_memory.py",
    "content": "import numpy as np\nimport pytest\nimport torch\n\nfrom mmdet.utils import AvoidOOM\nfrom mmdet.utils.memory import cast_tensor_type\n\n\ndef test_avoidoom():\n    tensor = torch.from_numpy(np.random.random((20, 20)))\n    if torch.cuda.is_available():\n        tensor = tensor.cuda()\n        # get default result\n        default_result = torch.mm(tensor, tensor.transpose(1, 0))\n\n        # when not occurred OOM error\n        AvoidCudaOOM = AvoidOOM()\n        result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,\n                                                          tensor.transpose(\n                                                              1, 0))\n        assert default_result.device == result.device and \\\n               default_result.dtype == result.dtype and \\\n               torch.equal(default_result, result)\n\n        # calculate with fp16 and convert back to source type\n        AvoidCudaOOM = AvoidOOM(test=True)\n        result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,\n                                                          tensor.transpose(\n                                                              1, 0))\n        assert default_result.device == result.device and \\\n               default_result.dtype == result.dtype and \\\n               torch.allclose(default_result, result, 1e-3)\n\n        # calculate on cpu and convert back to source device\n        AvoidCudaOOM = AvoidOOM(test=True)\n        result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,\n                                                          tensor.transpose(\n                                                              1, 0))\n        assert result.dtype == default_result.dtype and \\\n               result.device == default_result.device and \\\n               torch.allclose(default_result, result)\n\n        # do not calculate on cpu and the outputs will be same as input\n        AvoidCudaOOM = AvoidOOM(test=True, to_cpu=False)\n        result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,\n                                                          tensor.transpose(\n                                                              1, 0))\n        assert result.dtype == default_result.dtype and \\\n               result.device == default_result.device\n\n    else:\n        default_result = torch.mm(tensor, tensor.transpose(1, 0))\n        AvoidCudaOOM = AvoidOOM()\n        result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,\n                                                          tensor.transpose(\n                                                              1, 0))\n        assert default_result.device == result.device and \\\n               default_result.dtype == result.dtype and \\\n               torch.equal(default_result, result)\n\n\ndef test_cast_tensor_type():\n    inputs = torch.rand(10)\n    if torch.cuda.is_available():\n        inputs = inputs.cuda()\n    with pytest.raises(AssertionError):\n        cast_tensor_type(inputs, src_type=None, dst_type=None)\n    # input is a float\n    out = cast_tensor_type(10., dst_type=torch.half)\n    assert out == 10. and isinstance(out, float)\n    # convert Tensor to fp16 and re-convert to fp32\n    fp16_out = cast_tensor_type(inputs, dst_type=torch.half)\n    assert fp16_out.dtype == torch.half\n    fp32_out = cast_tensor_type(fp16_out, dst_type=torch.float32)\n    assert fp32_out.dtype == torch.float32\n\n    # input is a list\n    list_input = [inputs, inputs]\n    list_outs = cast_tensor_type(list_input, dst_type=torch.half)\n    assert len(list_outs) == len(list_input) and \\\n           isinstance(list_outs, list)\n    for out in list_outs:\n        assert out.dtype == torch.half\n    # input is a dict\n    dict_input = {'test1': inputs, 'test2': inputs}\n    dict_outs = cast_tensor_type(dict_input, dst_type=torch.half)\n    assert len(dict_outs) == len(dict_input) and \\\n           isinstance(dict_outs, dict)\n\n    # convert the input tensor to CPU and re-convert to GPU\n    if torch.cuda.is_available():\n        cpu_device = torch.empty(0).device\n        gpu_device = inputs.device\n        cpu_out = cast_tensor_type(inputs, dst_type=cpu_device)\n        assert cpu_out.device == cpu_device\n\n        gpu_out = cast_tensor_type(inputs, dst_type=gpu_device)\n        assert gpu_out.device == gpu_device\n"
  },
  {
    "path": "tests/test_utils/test_replace_cfg_vals.py",
    "content": "import os.path as osp\nimport tempfile\nfrom copy import deepcopy\n\nimport pytest\nfrom mmengine.config import Config\n\nfrom mmdet.utils import replace_cfg_vals\n\n\ndef test_replace_cfg_vals():\n    temp_file = tempfile.NamedTemporaryFile()\n    cfg_path = f'{temp_file.name}.py'\n    with open(cfg_path, 'w') as f:\n        f.write('configs')\n\n    ori_cfg_dict = dict()\n    ori_cfg_dict['cfg_name'] = osp.basename(temp_file.name)\n    ori_cfg_dict['work_dir'] = 'work_dirs/${cfg_name}/${percent}/${fold}'\n    ori_cfg_dict['percent'] = 5\n    ori_cfg_dict['fold'] = 1\n    ori_cfg_dict['model_wrapper'] = dict(\n        type='SoftTeacher', detector='${model}')\n    ori_cfg_dict['model'] = dict(\n        type='FasterRCNN',\n        backbone=dict(type='ResNet'),\n        neck=dict(type='FPN'),\n        rpn_head=dict(type='RPNHead'),\n        roi_head=dict(type='StandardRoIHead'),\n        train_cfg=dict(\n            rpn=dict(\n                assigner=dict(type='MaxIoUAssigner'),\n                sampler=dict(type='RandomSampler'),\n            ),\n            rpn_proposal=dict(nms=dict(type='nms', iou_threshold=0.7)),\n            rcnn=dict(\n                assigner=dict(type='MaxIoUAssigner'),\n                sampler=dict(type='RandomSampler'),\n            ),\n        ),\n        test_cfg=dict(\n            rpn=dict(nms=dict(type='nms', iou_threshold=0.7)),\n            rcnn=dict(nms=dict(type='nms', iou_threshold=0.5)),\n        ),\n    )\n    ori_cfg_dict['iou_threshold'] = dict(\n        rpn_proposal_nms='${model.train_cfg.rpn_proposal.nms.iou_threshold}',\n        test_rpn_nms='${model.test_cfg.rpn.nms.iou_threshold}',\n        test_rcnn_nms='${model.test_cfg.rcnn.nms.iou_threshold}',\n    )\n\n    ori_cfg_dict['str'] = 'Hello, world!'\n    ori_cfg_dict['dict'] = {'Hello': 'world!'}\n    ori_cfg_dict['list'] = [\n        'Hello, world!',\n    ]\n    ori_cfg_dict['tuple'] = ('Hello, world!', )\n    ori_cfg_dict['test_str'] = 'xxx${str}xxx'\n\n    ori_cfg = Config(ori_cfg_dict, filename=cfg_path)\n    updated_cfg = replace_cfg_vals(deepcopy(ori_cfg))\n\n    assert updated_cfg.work_dir \\\n        == f'work_dirs/{osp.basename(temp_file.name)}/5/1'\n    assert updated_cfg.model.detector == ori_cfg.model\n    assert updated_cfg.iou_threshold.rpn_proposal_nms \\\n        == ori_cfg.model.train_cfg.rpn_proposal.nms.iou_threshold\n    assert updated_cfg.test_str == 'xxxHello, world!xxx'\n    ori_cfg_dict['test_dict'] = 'xxx${dict}xxx'\n    ori_cfg_dict['test_list'] = 'xxx${list}xxx'\n    ori_cfg_dict['test_tuple'] = 'xxx${tuple}xxx'\n    with pytest.raises(AssertionError):\n        cfg = deepcopy(ori_cfg)\n        cfg['test_dict'] = 'xxx${dict}xxx'\n        updated_cfg = replace_cfg_vals(cfg)\n    with pytest.raises(AssertionError):\n        cfg = deepcopy(ori_cfg)\n        cfg['test_list'] = 'xxx${list}xxx'\n        updated_cfg = replace_cfg_vals(cfg)\n    with pytest.raises(AssertionError):\n        cfg = deepcopy(ori_cfg)\n        cfg['test_tuple'] = 'xxx${tuple}xxx'\n        updated_cfg = replace_cfg_vals(cfg)\n"
  },
  {
    "path": "tests/test_utils/test_setup_env.py",
    "content": "import datetime\nimport sys\nfrom unittest import TestCase\n\nfrom mmengine import DefaultScope\n\nfrom mmdet.utils import register_all_modules\n\n\nclass TestSetupEnv(TestCase):\n\n    def test_register_all_modules(self):\n        from mmdet.registry import DATASETS\n\n        # not init default scope\n        sys.modules.pop('mmdet.datasets', None)\n        sys.modules.pop('mmdet.datasets.coco', None)\n        DATASETS._module_dict.pop('CocoDataset', None)\n        self.assertFalse('CocoDataset' in DATASETS.module_dict)\n        register_all_modules(init_default_scope=False)\n        self.assertTrue('CocoDataset' in DATASETS.module_dict)\n\n        # init default scope\n        sys.modules.pop('mmdet.datasets')\n        sys.modules.pop('mmdet.datasets.coco')\n        DATASETS._module_dict.pop('CocoDataset', None)\n        self.assertFalse('CocoDataset' in DATASETS.module_dict)\n        register_all_modules(init_default_scope=True)\n        self.assertTrue('CocoDataset' in DATASETS.module_dict)\n        self.assertEqual(DefaultScope.get_current_instance().scope_name,\n                         'mmdet')\n\n        # init default scope when another scope is init\n        name = f'test-{datetime.datetime.now()}'\n        DefaultScope.get_instance(name, scope_name='test')\n        with self.assertWarnsRegex(\n                Warning, 'The current default scope \"test\" is not \"mmdet\"'):\n            register_all_modules(init_default_scope=True)\n"
  },
  {
    "path": "tests/test_visualization/test_local_visualizer.py",
    "content": "import os\nfrom unittest import TestCase\n\nimport cv2\nimport numpy as np\nimport torch\nfrom mmengine.structures import InstanceData, PixelData\n\nfrom mmdet.evaluation import INSTANCE_OFFSET\nfrom mmdet.structures import DetDataSample\nfrom mmdet.visualization import DetLocalVisualizer\n\n\ndef _rand_bboxes(num_boxes, h, w):\n    cx, cy, bw, bh = torch.rand(num_boxes, 4).T\n\n    tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)\n    tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)\n    br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)\n    br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)\n\n    bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T\n    return bboxes\n\n\ndef _create_panoptic_data(num_boxes, h, w):\n    sem_seg = np.zeros((h, w), dtype=np.int64) + 2\n    bboxes = _rand_bboxes(num_boxes, h, w).int()\n    labels = torch.randint(2, (num_boxes, ))\n    for i in range(num_boxes):\n        x, y, w, h = bboxes[i]\n        sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]\n\n    return sem_seg[None]\n\n\nclass TestDetLocalVisualizer(TestCase):\n\n    def test_add_datasample(self):\n        h = 12\n        w = 10\n        num_class = 3\n        num_bboxes = 5\n        out_file = 'out_file.jpg'\n\n        image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')\n\n        # test gt_instances\n        gt_instances = InstanceData()\n        gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)\n        gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))\n        det_data_sample = DetDataSample()\n        det_data_sample.gt_instances = gt_instances\n\n        det_local_visualizer = DetLocalVisualizer()\n        det_local_visualizer.add_datasample(\n            'image', image, det_data_sample, draw_pred=False)\n\n        # test out_file\n        det_local_visualizer.add_datasample(\n            'image',\n            image,\n            det_data_sample,\n            draw_pred=False,\n            out_file=out_file)\n        assert os.path.exists(out_file)\n        drawn_img = cv2.imread(out_file)\n        assert drawn_img.shape == (h, w, 3)\n        os.remove(out_file)\n\n        # test gt_instances and pred_instances\n        pred_instances = InstanceData()\n        pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)\n        pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))\n        pred_instances.scores = torch.rand((num_bboxes, ))\n        det_data_sample.pred_instances = pred_instances\n\n        det_local_visualizer.add_datasample(\n            'image', image, det_data_sample, out_file=out_file)\n        self._assert_image_and_shape(out_file, (h, w * 2, 3))\n\n        det_local_visualizer.add_datasample(\n            'image', image, det_data_sample, draw_gt=False, out_file=out_file)\n        self._assert_image_and_shape(out_file, (h, w, 3))\n\n        det_local_visualizer.add_datasample(\n            'image',\n            image,\n            det_data_sample,\n            draw_pred=False,\n            out_file=out_file)\n        self._assert_image_and_shape(out_file, (h, w, 3))\n\n        # test gt_panoptic_seg and pred_panoptic_seg\n        det_local_visualizer.dataset_meta = dict(classes=('1', '2'))\n        gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)\n        panoptic_seg = PixelData(sem_seg=gt_sem_seg)\n\n        det_data_sample = DetDataSample()\n        det_data_sample.gt_panoptic_seg = panoptic_seg\n\n        pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)\n        panoptic_seg = PixelData(sem_seg=pred_sem_seg)\n        det_data_sample.pred_panoptic_seg = panoptic_seg\n\n        det_local_visualizer.add_datasample(\n            'image', image, det_data_sample, out_file=out_file)\n        self._assert_image_and_shape(out_file, (h, w * 2, 3))\n\n        # class information must be provided\n        det_local_visualizer.dataset_meta = {}\n        with self.assertRaises(AssertionError):\n            det_local_visualizer.add_datasample(\n                'image', image, det_data_sample, out_file=out_file)\n\n    def _assert_image_and_shape(self, out_file, out_shape):\n        assert os.path.exists(out_file)\n        drawn_img = cv2.imread(out_file)\n        assert drawn_img.shape == out_shape\n        os.remove(out_file)\n"
  },
  {
    "path": "tests/test_visualization/test_palette.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\nfrom mmdet.datasets import CocoDataset\nfrom mmdet.visualization import get_palette, jitter_color, palette_val\n\n\ndef test_palette():\n    assert palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)\n\n    # test list\n    palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]\n    palette_ = get_palette(palette, 3)\n    for color, color_ in zip(palette, palette_):\n        assert color == color_\n\n    # test tuple\n    palette = get_palette((1, 2, 3), 3)\n    assert len(palette) == 3\n    for color in palette:\n        assert color == (1, 2, 3)\n\n    # test color str\n    palette = get_palette('red', 3)\n    assert len(palette) == 3\n    for color in palette:\n        assert color == (255, 0, 0)\n\n    # test dataset str\n    palette = get_palette('coco', len(CocoDataset.METAINFO['classes']))\n    assert len(palette) == len(CocoDataset.METAINFO['classes'])\n    assert palette[0] == (220, 20, 60)\n\n    # TODO: Awaiting refactoring\n    # palette = get_palette('coco', len(CocoPanopticDataset.METAINFO['CLASSES'])) # noqa\n    # assert len(palette) == len(CocoPanopticDataset.METAINFO['CLASSES'])\n    # assert palette[-1] == (250, 141, 255)\n    # palette = get_palette('voc', len(VOCDataset.METAINFO['CLASSES']))\n    # assert len(palette) == len(VOCDataset.METAINFO['CLASSES'])\n    # assert palette[0] == (106, 0, 228)\n    # palette = get_palette('citys', len(CityscapesDataset.METAINFO['CLASSES'])) # noqa\n    # assert len(palette) == len(CityscapesDataset.METAINFO['CLASSES'])\n    # assert palette[0] == (220, 20, 60)\n\n    # test random\n    palette1 = get_palette('random', 3)\n    palette2 = get_palette(None, 3)\n    for color1, color2 in zip(palette1, palette2):\n        assert isinstance(color1, tuple)\n        assert isinstance(color2, tuple)\n        assert color1 == color2\n\n\ndef test_jitter_color():\n    color = tuple(np.random.randint(0, 255, 3, np.uint8))\n    jittered_color = jitter_color(color)\n    for c in jittered_color:\n        assert 0 <= c <= 255\n"
  },
  {
    "path": "tools/analysis_tools/analyze_logs.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport json\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\ndef cal_train_time(log_dicts, args):\n    for i, log_dict in enumerate(log_dicts):\n        print(f'{\"-\" * 5}Analyze train time of {args.json_logs[i]}{\"-\" * 5}')\n        all_times = []\n        for epoch in log_dict.keys():\n            if args.include_outliers:\n                all_times.append(log_dict[epoch]['time'])\n            else:\n                all_times.append(log_dict[epoch]['time'][1:])\n        if not all_times:\n            raise KeyError(\n                'Please reduce the log interval in the config so that'\n                'interval is less than iterations of one epoch.')\n        epoch_ave_time = np.array(list(map(lambda x: np.mean(x), all_times)))\n        slowest_epoch = epoch_ave_time.argmax()\n        fastest_epoch = epoch_ave_time.argmin()\n        std_over_epoch = epoch_ave_time.std()\n        print(f'slowest epoch {slowest_epoch + 1}, '\n              f'average time is {epoch_ave_time[slowest_epoch]:.4f} s/iter')\n        print(f'fastest epoch {fastest_epoch + 1}, '\n              f'average time is {epoch_ave_time[fastest_epoch]:.4f} s/iter')\n        print(f'time std over epochs is {std_over_epoch:.4f}')\n        print(f'average iter time: {np.mean(epoch_ave_time):.4f} s/iter\\n')\n\n\ndef plot_curve(log_dicts, args):\n    if args.backend is not None:\n        plt.switch_backend(args.backend)\n    sns.set_style(args.style)\n    # if legend is None, use {filename}_{key} as legend\n    legend = args.legend\n    if legend is None:\n        legend = []\n        for json_log in args.json_logs:\n            for metric in args.keys:\n                legend.append(f'{json_log}_{metric}')\n    assert len(legend) == (len(args.json_logs) * len(args.keys))\n    metrics = args.keys\n\n    # TODO: support dynamic eval interval(e.g. RTMDet) when plotting mAP.\n    num_metrics = len(metrics)\n    for i, log_dict in enumerate(log_dicts):\n        epochs = list(log_dict.keys())\n        for j, metric in enumerate(metrics):\n            print(f'plot curve of {args.json_logs[i]}, metric is {metric}')\n            if metric not in log_dict[epochs[int(args.eval_interval) - 1]]:\n                if 'mAP' in metric:\n                    raise KeyError(\n                        f'{args.json_logs[i]} does not contain metric '\n                        f'{metric}. Please check if \"--no-validate\" is '\n                        'specified when you trained the model. Or check '\n                        f'if the eval_interval {args.eval_interval} in args '\n                        'is equal to the eval_interval during training.')\n                raise KeyError(\n                    f'{args.json_logs[i]} does not contain metric {metric}. '\n                    'Please reduce the log interval in the config so that '\n                    'interval is less than iterations of one epoch.')\n\n            if 'mAP' in metric:\n                xs = []\n                ys = []\n                for epoch in epochs:\n                    ys += log_dict[epoch][metric]\n                    if log_dict[epoch][metric]:\n                        xs += [epoch]\n                plt.xlabel('epoch')\n                plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')\n            else:\n                xs = []\n                ys = []\n                for epoch in epochs:\n                    iters = log_dict[epoch]['step']\n                    xs.append(np.array(iters))\n                    ys.append(np.array(log_dict[epoch][metric][:len(iters)]))\n                xs = np.concatenate(xs)\n                ys = np.concatenate(ys)\n                plt.xlabel('iter')\n                plt.plot(\n                    xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)\n            plt.legend()\n        if args.title is not None:\n            plt.title(args.title)\n    if args.out is None:\n        plt.show()\n    else:\n        print(f'save curve to: {args.out}')\n        plt.savefig(args.out)\n        plt.cla()\n\n\ndef add_plot_parser(subparsers):\n    parser_plt = subparsers.add_parser(\n        'plot_curve', help='parser for plotting curves')\n    parser_plt.add_argument(\n        'json_logs',\n        type=str,\n        nargs='+',\n        help='path of train log in json format')\n    parser_plt.add_argument(\n        '--keys',\n        type=str,\n        nargs='+',\n        default=['bbox_mAP'],\n        help='the metric that you want to plot')\n    parser_plt.add_argument(\n        '--start-epoch',\n        type=str,\n        default='1',\n        help='the epoch that you want to start')\n    parser_plt.add_argument(\n        '--eval-interval',\n        type=str,\n        default='1',\n        help='the eval interval when training')\n    parser_plt.add_argument('--title', type=str, help='title of figure')\n    parser_plt.add_argument(\n        '--legend',\n        type=str,\n        nargs='+',\n        default=None,\n        help='legend of each plot')\n    parser_plt.add_argument(\n        '--backend', type=str, default=None, help='backend of plt')\n    parser_plt.add_argument(\n        '--style', type=str, default='dark', help='style of plt')\n    parser_plt.add_argument('--out', type=str, default=None)\n\n\ndef add_time_parser(subparsers):\n    parser_time = subparsers.add_parser(\n        'cal_train_time',\n        help='parser for computing the average time per training iteration')\n    parser_time.add_argument(\n        'json_logs',\n        type=str,\n        nargs='+',\n        help='path of train log in json format')\n    parser_time.add_argument(\n        '--include-outliers',\n        action='store_true',\n        help='include the first value of every epoch when computing '\n        'the average time')\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Analyze Json Log')\n    # currently only support plot curve and calculate average train time\n    subparsers = parser.add_subparsers(dest='task', help='task parser')\n    add_plot_parser(subparsers)\n    add_time_parser(subparsers)\n    args = parser.parse_args()\n    return args\n\n\ndef load_json_logs(json_logs):\n    # load and convert json_logs to log_dict, key is epoch, value is a sub dict\n    # keys of sub dict is different metrics, e.g. memory, bbox_mAP\n    # value of sub dict is a list of corresponding values of all iterations\n    log_dicts = [dict() for _ in json_logs]\n    for json_log, log_dict in zip(json_logs, log_dicts):\n        with open(json_log, 'r') as log_file:\n            epoch = 1\n            for i, line in enumerate(log_file):\n                log = json.loads(line.strip())\n                val_flag = False\n                # skip lines only contains one key\n                if not len(log) > 1:\n                    continue\n\n                if epoch not in log_dict:\n                    log_dict[epoch] = defaultdict(list)\n\n                for k, v in log.items():\n                    if '/' in k:\n                        log_dict[epoch][k.split('/')[-1]].append(v)\n                        val_flag = True\n                    elif val_flag:\n                        continue\n                    else:\n                        log_dict[epoch][k].append(v)\n\n                if 'epoch' in log.keys():\n                    epoch = log['epoch']\n\n    return log_dicts\n\n\ndef main():\n    args = parse_args()\n\n    json_logs = args.json_logs\n    for json_log in json_logs:\n        assert json_log.endswith('.json')\n\n    log_dicts = load_json_logs(json_logs)\n\n    eval(args.task)(log_dicts, args)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/analyze_results.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os.path as osp\nfrom multiprocessing import Pool\n\nimport mmcv\nimport numpy as np\nfrom mmengine.config import Config, DictAction\nfrom mmengine.fileio import load\nfrom mmengine.registry import init_default_scope\nfrom mmengine.runner import Runner\nfrom mmengine.structures import InstanceData, PixelData\nfrom mmengine.utils import ProgressBar, check_file_exist, mkdir_or_exist\n\nfrom mmdet.datasets import get_loading_pipeline\nfrom mmdet.evaluation import eval_map\nfrom mmdet.registry import DATASETS, RUNNERS\nfrom mmdet.structures import DetDataSample\nfrom mmdet.utils import replace_cfg_vals, update_data_root\nfrom mmdet.visualization import DetLocalVisualizer\n\n\ndef bbox_map_eval(det_result, annotation, nproc=4):\n    \"\"\"Evaluate mAP of single image det result.\n\n    Args:\n        det_result (list[list]): [[cls1_det, cls2_det, ...], ...].\n            The outer list indicates images, and the inner list indicates\n            per-class detected bboxes.\n        annotation (dict): Ground truth annotations where keys of\n             annotations are:\n\n            - bboxes: numpy array of shape (n, 4)\n            - labels: numpy array of shape (n, )\n            - bboxes_ignore (optional): numpy array of shape (k, 4)\n            - labels_ignore (optional): numpy array of shape (k, )\n\n        nproc (int): Processes used for computing mAP.\n            Default: 4.\n\n    Returns:\n        float: mAP\n    \"\"\"\n\n    # use only bbox det result\n    if isinstance(det_result, tuple):\n        bbox_det_result = [det_result[0]]\n    else:\n        bbox_det_result = [det_result]\n    # mAP\n    iou_thrs = np.linspace(\n        .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n\n    processes = []\n    workers = Pool(processes=nproc)\n    for thr in iou_thrs:\n        p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), {\n            'iou_thr': thr,\n            'logger': 'silent',\n            'nproc': 1\n        })\n        processes.append(p)\n\n    workers.close()\n    workers.join()\n\n    mean_aps = []\n    for p in processes:\n        mean_aps.append(p.get()[0])\n\n    return sum(mean_aps) / len(mean_aps)\n\n\nclass ResultVisualizer:\n    \"\"\"Display and save evaluation results.\n\n    Args:\n        show (bool): Whether to show the image. Default: True.\n        wait_time (float): Value of waitKey param. Default: 0.\n        score_thr (float): Minimum score of bboxes to be shown.\n           Default: 0.\n        runner (:obj:`Runner`): The runner of the visualization process.\n    \"\"\"\n\n    def __init__(self, show=False, wait_time=0, score_thr=0, runner=None):\n        self.show = show\n        self.wait_time = wait_time\n        self.score_thr = score_thr\n        self.visualizer = DetLocalVisualizer()\n        self.runner = runner\n        self.evaluator = runner.test_evaluator\n\n    def _save_image_gts_results(self,\n                                dataset,\n                                results,\n                                performances,\n                                out_dir=None,\n                                task='det'):\n        \"\"\"Display or save image with groung truths and predictions from a\n        model.\n\n        Args:\n            dataset (Dataset): A PyTorch dataset.\n            results (list): Object detection or panoptic segmentation\n                results from test results pkl file.\n            performances (dict): A dict contains samples's indices\n                in dataset and model's performance on them.\n            out_dir (str, optional): The filename to write the image.\n                Defaults: None.\n            task (str): The task to be performed. Defaults: 'det'\n        \"\"\"\n        mkdir_or_exist(out_dir)\n\n        for performance_info in performances:\n            index, performance = performance_info\n            data_info = dataset[index]\n            data_info['gt_instances'] = data_info['instances']\n\n            # calc save file path\n            filename = data_info['img_path']\n            fname, name = osp.splitext(osp.basename(filename))\n            save_filename = fname + '_' + str(round(performance, 3)) + name\n            out_file = osp.join(out_dir, save_filename)\n\n            if task == 'det':\n                gt_instances = InstanceData()\n                gt_instances.bboxes = results[index]['gt_instances']['bboxes']\n                gt_instances.labels = results[index]['gt_instances']['labels']\n\n                pred_instances = InstanceData()\n                pred_instances.bboxes = results[index]['pred_instances'][\n                    'bboxes']\n                pred_instances.labels = results[index]['pred_instances'][\n                    'labels']\n                pred_instances.scores = results[index]['pred_instances'][\n                    'scores']\n\n                data_samples = DetDataSample()\n                data_samples.pred_instances = pred_instances\n                data_samples.gt_instances = gt_instances\n\n            elif task == 'seg':\n                gt_panoptic_seg = PixelData()\n                gt_panoptic_seg.sem_seg = results[index]['gt_seg_map']\n\n                pred_panoptic_seg = PixelData()\n                pred_panoptic_seg.sem_seg = results[index][\n                    'pred_panoptic_seg']['sem_seg']\n\n                data_samples = DetDataSample()\n                data_samples.pred_panoptic_seg = pred_panoptic_seg\n                data_samples.gt_panoptic_seg = gt_panoptic_seg\n\n            img = mmcv.imread(filename, channel_order='rgb')\n            self.visualizer.add_datasample(\n                'image',\n                img,\n                data_samples,\n                show=self.show,\n                draw_gt=False,\n                pred_score_thr=self.score_thr,\n                out_file=out_file)\n\n    def evaluate_and_show(self,\n                          dataset,\n                          results,\n                          topk=20,\n                          show_dir='work_dir'):\n        \"\"\"Evaluate and show results.\n\n        Args:\n            dataset (Dataset): A PyTorch dataset.\n            results (list): Object detection or panoptic segmentation\n                results from test results pkl file.\n            topk (int): Number of the highest topk and\n                lowest topk after evaluation index sorting. Default: 20.\n            show_dir (str, optional): The filename to write the image.\n                Default: 'work_dir'\n        \"\"\"\n\n        self.visualizer.dataset_meta = dataset.metainfo\n\n        assert topk > 0\n        if (topk * 2) > len(dataset):\n            topk = len(dataset) // 2\n\n        good_dir = osp.abspath(osp.join(show_dir, 'good'))\n        bad_dir = osp.abspath(osp.join(show_dir, 'bad'))\n\n        if 'pred_panoptic_seg' in results[0].keys():\n            good_samples, bad_samples = self.panoptic_evaluate(\n                dataset, results, topk=topk)\n            self._save_image_gts_results(\n                dataset, results, good_samples, good_dir, task='seg')\n            self._save_image_gts_results(\n                dataset, results, bad_samples, bad_dir, task='seg')\n        elif 'pred_instances' in results[0].keys():\n            good_samples, bad_samples = self.detection_evaluate(\n                dataset, results, topk=topk)\n            self._save_image_gts_results(\n                dataset, results, good_samples, good_dir, task='det')\n            self._save_image_gts_results(\n                dataset, results, bad_samples, bad_dir, task='det')\n        else:\n            raise 'expect \\'pred_panoptic_seg\\' or \\'pred_instances\\' \\\n                in dict result'\n\n    def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):\n        \"\"\"Evaluation for object detection.\n\n        Args:\n            dataset (Dataset): A PyTorch dataset.\n            results (list): Object detection results from test\n                results pkl file.\n            topk (int): Number of the highest topk and\n                lowest topk after evaluation index sorting. Default: 20.\n            eval_fn (callable, optional): Eval function, Default: None.\n\n        Returns:\n            tuple: A tuple contains good samples and bad samples.\n                good_mAPs (dict[int, float]): A dict contains good\n                    samples's indices in dataset and model's\n                    performance on them.\n                bad_mAPs (dict[int, float]): A dict contains bad\n                    samples's indices in dataset and model's\n                    performance on them.\n        \"\"\"\n\n        if eval_fn is None:\n            eval_fn = bbox_map_eval\n        else:\n            assert callable(eval_fn)\n\n        prog_bar = ProgressBar(len(results))\n        _mAPs = {}\n        data_info = {}\n        for i, (result, ) in enumerate(zip(results)):\n\n            # self.dataset[i] should not call directly\n            # because there is a risk of mismatch\n            data_info = dataset.prepare_data(i)\n            data_info['bboxes'] = data_info['gt_bboxes'].tensor\n            data_info['labels'] = data_info['gt_bboxes_labels']\n\n            pred = result['pred_instances']\n            pred_bboxes = pred['bboxes'].cpu().numpy()\n            pred_scores = pred['scores'].cpu().numpy()\n            pred_labels = pred['labels'].cpu().numpy()\n\n            dets = []\n            for label in range(len(dataset.metainfo['classes'])):\n                index = np.where(pred_labels == label)[0]\n                pred_bbox_scores = np.hstack(\n                    [pred_bboxes[index], pred_scores[index].reshape((-1, 1))])\n                dets.append(pred_bbox_scores)\n            mAP = eval_fn(dets, data_info)\n\n            _mAPs[i] = mAP\n            prog_bar.update()\n        # descending select topk image\n        _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))\n        good_mAPs = _mAPs[-topk:]\n        bad_mAPs = _mAPs[:topk]\n\n        return good_mAPs, bad_mAPs\n\n    def panoptic_evaluate(self, dataset, results, topk=20):\n        \"\"\"Evaluation for panoptic segmentation.\n\n        Args:\n            dataset (Dataset): A PyTorch dataset.\n            results (list): Panoptic segmentation results from test\n                results pkl file.\n            topk (int): Number of the highest topk and\n                lowest topk after evaluation index sorting. Default: 20.\n\n        Returns:\n            tuple: A tuple contains good samples and bad samples.\n                good_pqs (dict[int, float]): A dict contains good\n                    samples's indices in dataset and model's\n                    performance on them.\n                bad_pqs (dict[int, float]): A dict contains bad\n                    samples's indices in dataset and model's\n                    performance on them.\n        \"\"\"\n        pqs = {}\n        prog_bar = ProgressBar(len(results))\n\n        for i in range(len(results)):\n            data_sample = {}\n            for k in dataset[i].keys():\n                data_sample[k] = dataset[i][k]\n\n            for k in results[i].keys():\n                data_sample[k] = results[i][k]\n\n            self.evaluator.process([data_sample])\n            metrics = self.evaluator.evaluate(1)\n\n            pqs[i] = metrics['coco_panoptic/PQ']\n            prog_bar.update()\n\n        # descending select topk image\n        pqs = list(sorted(pqs.items(), key=lambda kv: kv[1]))\n        good_pqs = pqs[-topk:]\n        bad_pqs = pqs[:topk]\n\n        return good_pqs, bad_pqs\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='MMDet eval image prediction result for each')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument(\n        'prediction_path', help='prediction path where test pkl result')\n    parser.add_argument(\n        'show_dir', help='directory where painted images will be saved')\n    parser.add_argument('--show', action='store_true', help='show results')\n    parser.add_argument(\n        '--wait-time',\n        type=float,\n        default=0,\n        help='the interval of show (s), 0 is block')\n    parser.add_argument(\n        '--topk',\n        default=20,\n        type=int,\n        help='saved Number of the highest topk '\n        'and lowest topk after index sorting')\n    parser.add_argument(\n        '--show-score-thr',\n        type=float,\n        default=0,\n        help='score threshold (default: 0.)')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    check_file_exist(args.prediction_path)\n\n    cfg = Config.fromfile(args.config)\n\n    # replace the ${key} with the value of cfg.key\n    cfg = replace_cfg_vals(cfg)\n\n    # update data root according to MMDET_DATASETS\n    update_data_root(cfg)\n\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    cfg.test_dataloader.dataset.test_mode = True\n\n    cfg.test_dataloader.pop('batch_size', 0)\n    if cfg.train_dataloader.dataset.type in ('MultiImageMixDataset',\n                                             'ClassBalancedDataset',\n                                             'RepeatDataset', 'ConcatDataset'):\n        cfg.test_dataloader.dataset.pipeline = get_loading_pipeline(\n            cfg.train_dataloader.dataset.dataset.pipeline)\n    else:\n        cfg.test_dataloader.dataset.pipeline = get_loading_pipeline(\n            cfg.train_dataloader.dataset.pipeline)\n    dataset = DATASETS.build(cfg.test_dataloader.dataset)\n    outputs = load(args.prediction_path)\n\n    cfg.work_dir = args.show_dir\n    # build the runner from config\n    if 'runner_type' not in cfg:\n        # build the default runner\n        runner = Runner.from_cfg(cfg)\n    else:\n        # build customized runner from the registry\n        # if 'runner_type' is set in the cfg\n        runner = RUNNERS.build(cfg)\n\n    result_visualizer = ResultVisualizer(args.show, args.wait_time,\n                                         args.show_score_thr, runner)\n    result_visualizer.evaluate_and_show(\n        dataset, outputs, topk=args.topk, show_dir=args.show_dir)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/benchmark.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\n\nfrom mmengine import MMLogger\nfrom mmengine.config import Config, DictAction\nfrom mmengine.dist import init_dist\nfrom mmengine.registry import init_default_scope\nfrom mmengine.utils import mkdir_or_exist\n\nfrom mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,\n                                   InferenceBenchmark)\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='MMDet benchmark')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('--checkpoint', help='checkpoint file')\n    parser.add_argument(\n        '--task',\n        choices=['inference', 'dataloader', 'dataset'],\n        default='dataloader',\n        help='Which task do you want to go to benchmark')\n    parser.add_argument(\n        '--repeat-num',\n        type=int,\n        default=1,\n        help='number of repeat times of measurement for averaging the results')\n    parser.add_argument(\n        '--max-iter', type=int, default=2000, help='num of max iter')\n    parser.add_argument(\n        '--log-interval', type=int, default=50, help='interval of logging')\n    parser.add_argument(\n        '--num-warmup', type=int, default=5, help='Number of warmup')\n    parser.add_argument(\n        '--fuse-conv-bn',\n        action='store_true',\n        help='Whether to fuse conv and bn, this will slightly increase'\n        'the inference speed')\n    parser.add_argument(\n        '--dataset-type',\n        choices=['train', 'val', 'test'],\n        default='test',\n        help='Benchmark dataset type. only supports train, val and test')\n    parser.add_argument(\n        '--work-dir',\n        help='the directory to save the file containing '\n        'benchmark metrics')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n    return args\n\n\ndef inference_benchmark(args, cfg, distributed, logger):\n    benchmark = InferenceBenchmark(\n        cfg,\n        args.checkpoint,\n        distributed,\n        args.fuse_conv_bn,\n        args.max_iter,\n        args.log_interval,\n        args.num_warmup,\n        logger=logger)\n    return benchmark\n\n\ndef dataloader_benchmark(args, cfg, distributed, logger):\n    benchmark = DataLoaderBenchmark(\n        cfg,\n        distributed,\n        args.dataset_type,\n        args.max_iter,\n        args.log_interval,\n        args.num_warmup,\n        logger=logger)\n    return benchmark\n\n\ndef dataset_benchmark(args, cfg, distributed, logger):\n    benchmark = DatasetBenchmark(\n        cfg,\n        args.dataset_type,\n        args.max_iter,\n        args.log_interval,\n        args.num_warmup,\n        logger=logger)\n    return benchmark\n\n\ndef main():\n    args = parse_args()\n    cfg = Config.fromfile(args.config)\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    distributed = False\n    if args.launcher != 'none':\n        init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))\n        distributed = True\n\n    log_file = None\n    if args.work_dir:\n        log_file = os.path.join(args.work_dir, 'benchmark.log')\n        mkdir_or_exist(args.work_dir)\n\n    logger = MMLogger.get_instance(\n        'mmdet', log_file=log_file, log_level='INFO')\n\n    benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)\n    benchmark.run(args.repeat_num)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/browse_dataset.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os.path as osp\n\nfrom mmengine.config import Config, DictAction\nfrom mmengine.registry import init_default_scope\nfrom mmengine.utils import ProgressBar\n\nfrom mmdet.models.utils import mask2ndarray\nfrom mmdet.registry import DATASETS, VISUALIZERS\nfrom mmdet.structures.bbox import BaseBoxes\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Browse a dataset')\n    parser.add_argument('config', help='train config file path')\n    parser.add_argument(\n        '--output-dir',\n        default=None,\n        type=str,\n        help='If there is no display interface, you can save it')\n    parser.add_argument('--not-show', default=False, action='store_true')\n    parser.add_argument(\n        '--show-interval',\n        type=float,\n        default=2,\n        help='the interval of show (s)')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    cfg = Config.fromfile(args.config)\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # register all modules in mmdet into the registries\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    dataset = DATASETS.build(cfg.train_dataloader.dataset)\n    visualizer = VISUALIZERS.build(cfg.visualizer)\n    visualizer.dataset_meta = dataset.metainfo\n\n    progress_bar = ProgressBar(len(dataset))\n    for item in dataset:\n        img = item['inputs'].permute(1, 2, 0).numpy()\n        data_sample = item['data_samples'].numpy()\n        gt_instances = data_sample.gt_instances\n        img_path = osp.basename(item['data_samples'].img_path)\n\n        out_file = osp.join(\n            args.output_dir,\n            osp.basename(img_path)) if args.output_dir is not None else None\n\n        img = img[..., [2, 1, 0]]  # bgr to rgb\n        gt_bboxes = gt_instances.get('bboxes', None)\n        if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):\n            gt_instances.bboxes = gt_bboxes.tensor\n        gt_masks = gt_instances.get('masks', None)\n        if gt_masks is not None:\n            masks = mask2ndarray(gt_masks)\n            gt_instances.masks = masks.astype(bool)\n        data_sample.gt_instances = gt_instances\n\n        visualizer.add_datasample(\n            osp.basename(img_path),\n            img,\n            data_sample,\n            draw_pred=False,\n            show=not args.not_show,\n            wait_time=args.show_interval,\n            out_file=out_file)\n\n        progress_bar.update()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/coco_error_analysis.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os\nfrom argparse import ArgumentParser\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\n\ndef makeplot(rs, ps, outDir, class_name, iou_type):\n    cs = np.vstack([\n        np.ones((2, 3)),\n        np.array([0.31, 0.51, 0.74]),\n        np.array([0.75, 0.31, 0.30]),\n        np.array([0.36, 0.90, 0.38]),\n        np.array([0.50, 0.39, 0.64]),\n        np.array([1, 0.6, 0]),\n    ])\n    areaNames = ['allarea', 'small', 'medium', 'large']\n    types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']\n    for i in range(len(areaNames)):\n        area_ps = ps[..., i, 0]\n        figure_title = iou_type + '-' + class_name + '-' + areaNames[i]\n        aps = [ps_.mean() for ps_ in area_ps]\n        ps_curve = [\n            ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps\n        ]\n        ps_curve.insert(0, np.zeros(ps_curve[0].shape))\n        fig = plt.figure()\n        ax = plt.subplot(111)\n        for k in range(len(types)):\n            ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)\n            ax.fill_between(\n                rs,\n                ps_curve[k],\n                ps_curve[k + 1],\n                color=cs[k],\n                label=str(f'[{aps[k]:.3f}]' + types[k]),\n            )\n        plt.xlabel('recall')\n        plt.ylabel('precision')\n        plt.xlim(0, 1.0)\n        plt.ylim(0, 1.0)\n        plt.title(figure_title)\n        plt.legend()\n        # plt.show()\n        fig.savefig(outDir + f'/{figure_title}.png')\n        plt.close(fig)\n\n\ndef autolabel(ax, rects):\n    \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n    for rect in rects:\n        height = rect.get_height()\n        if height > 0 and height <= 1:  # for percent values\n            text_label = '{:2.0f}'.format(height * 100)\n        else:\n            text_label = '{:2.0f}'.format(height)\n        ax.annotate(\n            text_label,\n            xy=(rect.get_x() + rect.get_width() / 2, height),\n            xytext=(0, 3),  # 3 points vertical offset\n            textcoords='offset points',\n            ha='center',\n            va='bottom',\n            fontsize='x-small',\n        )\n\n\ndef makebarplot(rs, ps, outDir, class_name, iou_type):\n    areaNames = ['allarea', 'small', 'medium', 'large']\n    types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']\n    fig, ax = plt.subplots()\n    x = np.arange(len(areaNames))  # the areaNames locations\n    width = 0.60  # the width of the bars\n    rects_list = []\n    figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot'\n    for i in range(len(types) - 1):\n        type_ps = ps[i, ..., 0]\n        aps = [ps_.mean() for ps_ in type_ps.T]\n        rects_list.append(\n            ax.bar(\n                x - width / 2 + (i + 1) * width / len(types),\n                aps,\n                width / len(types),\n                label=types[i],\n            ))\n\n    # Add some text for labels, title and custom x-axis tick labels, etc.\n    ax.set_ylabel('Mean Average Precision (mAP)')\n    ax.set_title(figure_title)\n    ax.set_xticks(x)\n    ax.set_xticklabels(areaNames)\n    ax.legend()\n\n    # Add score texts over bars\n    for rects in rects_list:\n        autolabel(ax, rects)\n\n    # Save plot\n    fig.savefig(outDir + f'/{figure_title}.png')\n    plt.close(fig)\n\n\ndef get_gt_area_group_numbers(cocoEval):\n    areaRng = cocoEval.params.areaRng\n    areaRngStr = [str(aRng) for aRng in areaRng]\n    areaRngLbl = cocoEval.params.areaRngLbl\n    areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl))\n    areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0)\n    for evalImg in cocoEval.evalImgs:\n        if evalImg:\n            for gtIgnore in evalImg['gtIgnore']:\n                if not gtIgnore:\n                    aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])]\n                    areaRngLbl2Number[aRngLbl] += 1\n    return areaRngLbl2Number\n\n\ndef make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True):\n    areaRngLbl2Number = get_gt_area_group_numbers(cocoEval)\n    areaRngLbl = areaRngLbl2Number.keys()\n    if verbose:\n        print('number of annotations per area group:', areaRngLbl2Number)\n\n    # Init figure\n    fig, ax = plt.subplots()\n    x = np.arange(len(areaRngLbl))  # the areaNames locations\n    width = 0.60  # the width of the bars\n    figure_title = 'number of annotations per area group'\n\n    rects = ax.bar(x, areaRngLbl2Number.values(), width)\n\n    # Add some text for labels, title and custom x-axis tick labels, etc.\n    ax.set_ylabel('Number of annotations')\n    ax.set_title(figure_title)\n    ax.set_xticks(x)\n    ax.set_xticklabels(areaRngLbl)\n\n    # Add score texts over bars\n    autolabel(ax, rects)\n\n    # Save plot\n    fig.tight_layout()\n    fig.savefig(outDir + f'/{figure_title}.png')\n    plt.close(fig)\n\n\ndef make_gt_area_histogram_plot(cocoEval, outDir):\n    n_bins = 100\n    areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()]\n\n    # init figure\n    figure_title = 'gt annotation areas histogram plot'\n    fig, ax = plt.subplots()\n\n    # Set the number of bins\n    ax.hist(np.sqrt(areas), bins=n_bins)\n\n    # Add some text for labels, title and custom x-axis tick labels, etc.\n    ax.set_xlabel('Squareroot Area')\n    ax.set_ylabel('Number of annotations')\n    ax.set_title(figure_title)\n\n    # Save plot\n    fig.tight_layout()\n    fig.savefig(outDir + f'/{figure_title}.png')\n    plt.close(fig)\n\n\ndef analyze_individual_category(k,\n                                cocoDt,\n                                cocoGt,\n                                catId,\n                                iou_type,\n                                areas=None):\n    nm = cocoGt.loadCats(catId)[0]\n    print(f'--------------analyzing {k + 1}-{nm[\"name\"]}---------------')\n    ps_ = {}\n    dt = copy.deepcopy(cocoDt)\n    nm = cocoGt.loadCats(catId)[0]\n    imgIds = cocoGt.getImgIds()\n    dt_anns = dt.dataset['annotations']\n    select_dt_anns = []\n    for ann in dt_anns:\n        if ann['category_id'] == catId:\n            select_dt_anns.append(ann)\n    dt.dataset['annotations'] = select_dt_anns\n    dt.createIndex()\n    # compute precision but ignore superclass confusion\n    gt = copy.deepcopy(cocoGt)\n    child_catIds = gt.getCatIds(supNms=[nm['supercategory']])\n    for idx, ann in enumerate(gt.dataset['annotations']):\n        if ann['category_id'] in child_catIds and ann['category_id'] != catId:\n            gt.dataset['annotations'][idx]['ignore'] = 1\n            gt.dataset['annotations'][idx]['iscrowd'] = 1\n            gt.dataset['annotations'][idx]['category_id'] = catId\n    cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)\n    cocoEval.params.imgIds = imgIds\n    cocoEval.params.maxDets = [100]\n    cocoEval.params.iouThrs = [0.1]\n    cocoEval.params.useCats = 1\n    if areas:\n        cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],\n                                   [areas[0], areas[1]], [areas[1], areas[2]]]\n    cocoEval.evaluate()\n    cocoEval.accumulate()\n    ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]\n    ps_['ps_supercategory'] = ps_supercategory\n    # compute precision but ignore any class confusion\n    gt = copy.deepcopy(cocoGt)\n    for idx, ann in enumerate(gt.dataset['annotations']):\n        if ann['category_id'] != catId:\n            gt.dataset['annotations'][idx]['ignore'] = 1\n            gt.dataset['annotations'][idx]['iscrowd'] = 1\n            gt.dataset['annotations'][idx]['category_id'] = catId\n    cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)\n    cocoEval.params.imgIds = imgIds\n    cocoEval.params.maxDets = [100]\n    cocoEval.params.iouThrs = [0.1]\n    cocoEval.params.useCats = 1\n    if areas:\n        cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],\n                                   [areas[0], areas[1]], [areas[1], areas[2]]]\n    cocoEval.evaluate()\n    cocoEval.accumulate()\n    ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]\n    ps_['ps_allcategory'] = ps_allcategory\n    return k, ps_\n\n\ndef analyze_results(res_file,\n                    ann_file,\n                    res_types,\n                    out_dir,\n                    extraplots=None,\n                    areas=None):\n    for res_type in res_types:\n        assert res_type in ['bbox', 'segm']\n    if areas:\n        assert len(areas) == 3, '3 integers should be specified as areas, \\\n            representing 3 area regions'\n\n    directory = os.path.dirname(out_dir + '/')\n    if not os.path.exists(directory):\n        print(f'-------------create {out_dir}-----------------')\n        os.makedirs(directory)\n\n    cocoGt = COCO(ann_file)\n    cocoDt = cocoGt.loadRes(res_file)\n    imgIds = cocoGt.getImgIds()\n    for res_type in res_types:\n        res_out_dir = out_dir + '/' + res_type + '/'\n        res_directory = os.path.dirname(res_out_dir)\n        if not os.path.exists(res_directory):\n            print(f'-------------create {res_out_dir}-----------------')\n            os.makedirs(res_directory)\n        iou_type = res_type\n        cocoEval = COCOeval(\n            copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type)\n        cocoEval.params.imgIds = imgIds\n        cocoEval.params.iouThrs = [0.75, 0.5, 0.1]\n        cocoEval.params.maxDets = [100]\n        if areas:\n            cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],\n                                       [areas[0], areas[1]],\n                                       [areas[1], areas[2]]]\n        cocoEval.evaluate()\n        cocoEval.accumulate()\n        ps = cocoEval.eval['precision']\n        ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))])\n        catIds = cocoGt.getCatIds()\n        recThrs = cocoEval.params.recThrs\n        with Pool(processes=48) as pool:\n            args = [(k, cocoDt, cocoGt, catId, iou_type, areas)\n                    for k, catId in enumerate(catIds)]\n            analyze_results = pool.starmap(analyze_individual_category, args)\n        for k, catId in enumerate(catIds):\n            nm = cocoGt.loadCats(catId)[0]\n            print(f'--------------saving {k + 1}-{nm[\"name\"]}---------------')\n            analyze_result = analyze_results[k]\n            assert k == analyze_result[0]\n            ps_supercategory = analyze_result[1]['ps_supercategory']\n            ps_allcategory = analyze_result[1]['ps_allcategory']\n            # compute precision but ignore superclass confusion\n            ps[3, :, k, :, :] = ps_supercategory\n            # compute precision but ignore any class confusion\n            ps[4, :, k, :, :] = ps_allcategory\n            # fill in background and false negative errors and plot\n            ps[ps == -1] = 0\n            ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0\n            ps[6, :, k, :, :] = 1.0\n            makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type)\n            if extraplots:\n                makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'],\n                            iou_type)\n        makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type)\n        if extraplots:\n            makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type)\n            make_gt_area_group_numbers_plot(\n                cocoEval=cocoEval, outDir=res_out_dir, verbose=True)\n            make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir)\n\n\ndef main():\n    parser = ArgumentParser(description='COCO Error Analysis Tool')\n    parser.add_argument('result', help='result file (json format) path')\n    parser.add_argument('out_dir', help='dir to save analyze result images')\n    parser.add_argument(\n        '--ann',\n        default='data/coco/annotations/instances_val2017.json',\n        help='annotation file path')\n    parser.add_argument(\n        '--types', type=str, nargs='+', default=['bbox'], help='result types')\n    parser.add_argument(\n        '--extraplots',\n        action='store_true',\n        help='export extra bar/stat plots')\n    parser.add_argument(\n        '--areas',\n        type=int,\n        nargs='+',\n        default=[1024, 9216, 10000000000],\n        help='area regions')\n    args = parser.parse_args()\n    analyze_results(\n        args.result,\n        args.ann,\n        args.types,\n        out_dir=args.out_dir,\n        extraplots=args.extraplots,\n        areas=args.areas)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/coco_occluded_separated_recall.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom argparse import ArgumentParser\n\nimport mmengine\nfrom mmengine.logging import print_log\n\nfrom mmdet.datasets import CocoDataset\nfrom mmdet.evaluation import CocoOccludedSeparatedMetric\n\n\ndef main():\n    parser = ArgumentParser(\n        description='Compute recall of COCO occluded and separated masks '\n        'presented in paper https://arxiv.org/abs/2210.10046.')\n    parser.add_argument('result', help='result file (pkl format) path')\n    parser.add_argument('--out', help='file path to save evaluation results')\n    parser.add_argument(\n        '--score-thr',\n        type=float,\n        default=0.3,\n        help='Score threshold for the recall calculation. Defaults to 0.3')\n    parser.add_argument(\n        '--iou-thr',\n        type=float,\n        default=0.75,\n        help='IoU threshold for the recall calculation. Defaults to 0.75.')\n    parser.add_argument(\n        '--ann',\n        default='data/coco/annotations/instances_val2017.json',\n        help='coco annotation file path')\n    args = parser.parse_args()\n\n    results = mmengine.load(args.result)\n    assert 'masks' in results[0]['pred_instances'], \\\n        'The results must be predicted by instance segmentation model.'\n    metric = CocoOccludedSeparatedMetric(\n        ann_file=args.ann, iou_thr=args.iou_thr, score_thr=args.score_thr)\n    metric.dataset_meta = CocoDataset.METAINFO\n    for datasample in results:\n        metric.process(data_batch=None, data_samples=[datasample])\n    metric_res = metric.compute_metrics(metric.results)\n    if args.out is not None:\n        mmengine.dump(metric_res, args.out)\n        print_log(f'Evaluation results have been saved to {args.out}.')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/confusion_matrix.py",
    "content": "import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MultipleLocator\nfrom mmcv.ops import nms\nfrom mmengine import Config, DictAction\nfrom mmengine.fileio import load\nfrom mmengine.registry import init_default_scope\nfrom mmengine.utils import ProgressBar\n\nfrom mmdet.evaluation import bbox_overlaps\nfrom mmdet.registry import DATASETS\nfrom mmdet.utils import replace_cfg_vals, update_data_root\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Generate confusion matrix from detection results')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument(\n        'prediction_path', help='prediction path where test .pkl result')\n    parser.add_argument(\n        'save_dir', help='directory where confusion matrix will be saved')\n    parser.add_argument(\n        '--show', action='store_true', help='show confusion matrix')\n    parser.add_argument(\n        '--color-theme',\n        default='plasma',\n        help='theme of the matrix color map')\n    parser.add_argument(\n        '--score-thr',\n        type=float,\n        default=0.3,\n        help='score threshold to filter detection bboxes')\n    parser.add_argument(\n        '--tp-iou-thr',\n        type=float,\n        default=0.5,\n        help='IoU threshold to be considered as matched')\n    parser.add_argument(\n        '--nms-iou-thr',\n        type=float,\n        default=None,\n        help='nms IoU threshold, only applied when users want to change the'\n        'nms IoU threshold.')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n    return args\n\n\ndef calculate_confusion_matrix(dataset,\n                               results,\n                               score_thr=0,\n                               nms_iou_thr=None,\n                               tp_iou_thr=0.5):\n    \"\"\"Calculate the confusion matrix.\n\n    Args:\n        dataset (Dataset): Test or val dataset.\n        results (list[ndarray]): A list of detection results in each image.\n        score_thr (float|optional): Score threshold to filter bboxes.\n            Default: 0.\n        nms_iou_thr (float|optional): nms IoU threshold, the detection results\n            have done nms in the detector, only applied when users want to\n            change the nms IoU threshold. Default: None.\n        tp_iou_thr (float|optional): IoU threshold to be considered as matched.\n            Default: 0.5.\n    \"\"\"\n    num_classes = len(dataset.metainfo['classes'])\n    confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1])\n    assert len(dataset) == len(results)\n    prog_bar = ProgressBar(len(results))\n    for idx, per_img_res in enumerate(results):\n        res_bboxes = per_img_res['pred_instances']\n        gts = dataset.get_data_info(idx)['instances']\n        analyze_per_img_dets(confusion_matrix, gts, res_bboxes, score_thr,\n                             tp_iou_thr, nms_iou_thr)\n        prog_bar.update()\n    return confusion_matrix\n\n\ndef analyze_per_img_dets(confusion_matrix,\n                         gts,\n                         result,\n                         score_thr=0,\n                         tp_iou_thr=0.5,\n                         nms_iou_thr=None):\n    \"\"\"Analyze detection results on each image.\n\n    Args:\n        confusion_matrix (ndarray): The confusion matrix,\n            has shape (num_classes + 1, num_classes + 1).\n        gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4).\n        gt_labels (ndarray): Ground truth labels, has shape (num_gt).\n        result (ndarray): Detection results, has shape\n            (num_classes, num_bboxes, 5).\n        score_thr (float): Score threshold to filter bboxes.\n            Default: 0.\n        tp_iou_thr (float): IoU threshold to be considered as matched.\n            Default: 0.5.\n        nms_iou_thr (float|optional): nms IoU threshold, the detection results\n            have done nms in the detector, only applied when users want to\n            change the nms IoU threshold. Default: None.\n    \"\"\"\n    true_positives = np.zeros(len(gts))\n    gt_bboxes = []\n    gt_labels = []\n    for gt in gts:\n        gt_bboxes.append(gt['bbox'])\n        gt_labels.append(gt['bbox_label'])\n\n    gt_bboxes = np.array(gt_bboxes)\n    gt_labels = np.array(gt_labels)\n\n    unique_label = np.unique(result['labels'].numpy())\n\n    for det_label in unique_label:\n        mask = (result['labels'] == det_label)\n        det_bboxes = result['bboxes'][mask].numpy()\n        det_scores = result['scores'][mask].numpy()\n\n        if nms_iou_thr:\n            det_bboxes, _ = nms(\n                det_bboxes, det_scores, nms_iou_thr, score_threshold=score_thr)\n        ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes)\n        for i, score in enumerate(det_scores):\n            det_match = 0\n            if score >= score_thr:\n                for j, gt_label in enumerate(gt_labels):\n                    if ious[i, j] >= tp_iou_thr:\n                        det_match += 1\n                        if gt_label == det_label:\n                            true_positives[j] += 1  # TP\n                        confusion_matrix[gt_label, det_label] += 1\n                if det_match == 0:  # BG FP\n                    confusion_matrix[-1, det_label] += 1\n    for num_tp, gt_label in zip(true_positives, gt_labels):\n        if num_tp == 0:  # FN\n            confusion_matrix[gt_label, -1] += 1\n\n\ndef plot_confusion_matrix(confusion_matrix,\n                          labels,\n                          save_dir=None,\n                          show=True,\n                          title='Normalized Confusion Matrix',\n                          color_theme='plasma'):\n    \"\"\"Draw confusion matrix with matplotlib.\n\n    Args:\n        confusion_matrix (ndarray): The confusion matrix.\n        labels (list[str]): List of class names.\n        save_dir (str|optional): If set, save the confusion matrix plot to the\n            given path. Default: None.\n        show (bool): Whether to show the plot. Default: True.\n        title (str): Title of the plot. Default: `Normalized Confusion Matrix`.\n        color_theme (str): Theme of the matrix color map. Default: `plasma`.\n    \"\"\"\n    # normalize the confusion matrix\n    per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]\n    confusion_matrix = \\\n        confusion_matrix.astype(np.float32) / per_label_sums * 100\n\n    num_classes = len(labels)\n    fig, ax = plt.subplots(\n        figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180)\n    cmap = plt.get_cmap(color_theme)\n    im = ax.imshow(confusion_matrix, cmap=cmap)\n    plt.colorbar(mappable=im, ax=ax)\n\n    title_font = {'weight': 'bold', 'size': 12}\n    ax.set_title(title, fontdict=title_font)\n    label_font = {'size': 10}\n    plt.ylabel('Ground Truth Label', fontdict=label_font)\n    plt.xlabel('Prediction Label', fontdict=label_font)\n\n    # draw locator\n    xmajor_locator = MultipleLocator(1)\n    xminor_locator = MultipleLocator(0.5)\n    ax.xaxis.set_major_locator(xmajor_locator)\n    ax.xaxis.set_minor_locator(xminor_locator)\n    ymajor_locator = MultipleLocator(1)\n    yminor_locator = MultipleLocator(0.5)\n    ax.yaxis.set_major_locator(ymajor_locator)\n    ax.yaxis.set_minor_locator(yminor_locator)\n\n    # draw grid\n    ax.grid(True, which='minor', linestyle='-')\n\n    # draw label\n    ax.set_xticks(np.arange(num_classes))\n    ax.set_yticks(np.arange(num_classes))\n    ax.set_xticklabels(labels)\n    ax.set_yticklabels(labels)\n\n    ax.tick_params(\n        axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)\n    plt.setp(\n        ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')\n\n    # draw confution matrix value\n    for i in range(num_classes):\n        for j in range(num_classes):\n            ax.text(\n                j,\n                i,\n                '{}%'.format(\n                    int(confusion_matrix[\n                        i,\n                        j]) if not np.isnan(confusion_matrix[i, j]) else -1),\n                ha='center',\n                va='center',\n                color='w',\n                size=7)\n\n    ax.set_ylim(len(confusion_matrix) - 0.5, -0.5)  # matplotlib>3.1.1\n\n    fig.tight_layout()\n    if save_dir is not None:\n        plt.savefig(\n            os.path.join(save_dir, 'confusion_matrix.png'), format='png')\n    if show:\n        plt.show()\n\n\ndef main():\n    args = parse_args()\n\n    cfg = Config.fromfile(args.config)\n\n    # replace the ${key} with the value of cfg.key\n    cfg = replace_cfg_vals(cfg)\n\n    # update data root according to MMDET_DATASETS\n    update_data_root(cfg)\n\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    results = load(args.prediction_path)\n\n    if not os.path.exists(args.save_dir):\n        os.makedirs(args.save_dir)\n\n    dataset = DATASETS.build(cfg.test_dataloader.dataset)\n\n    confusion_matrix = calculate_confusion_matrix(dataset, results,\n                                                  args.score_thr,\n                                                  args.nms_iou_thr,\n                                                  args.tp_iou_thr)\n    plot_confusion_matrix(\n        confusion_matrix,\n        dataset.metainfo['classes'] + ('background', ),\n        save_dir=args.save_dir,\n        show=args.show,\n        color_theme=args.color_theme)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/eval_metric.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\n\nimport mmengine\nfrom mmengine import Config, DictAction\nfrom mmengine.evaluator import Evaluator\nfrom mmengine.registry import init_default_scope\n\nfrom mmdet.registry import DATASETS\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Evaluate metric of the '\n                                     'results saved in pkl format')\n    parser.add_argument('config', help='Config of the model')\n    parser.add_argument('pkl_results', help='Results in pickle format')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    cfg = Config.fromfile(args.config)\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    dataset = DATASETS.build(cfg.test_dataloader.dataset)\n    predictions = mmengine.load(args.pkl_results)\n\n    evaluator = Evaluator(cfg.val_evaluator)\n    evaluator.dataset_meta = dataset.metainfo\n    eval_results = evaluator.offline_evaluate(predictions)\n    print(eval_results)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/get_flops.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport tempfile\nfrom functools import partial\nfrom pathlib import Path\n\nimport torch\nfrom mmengine.config import Config, DictAction\nfrom mmengine.logging import MMLogger\nfrom mmengine.model import revert_sync_batchnorm\nfrom mmengine.registry import init_default_scope\nfrom mmengine.runner import Runner\n\nfrom mmdet.registry import MODELS\n\ntry:\n    from mmengine.analysis import get_model_complexity_info\n    from mmengine.analysis.print_helper import _format_size\nexcept ImportError:\n    raise ImportError('Please upgrade mmengine >= 0.6.0')\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Get a detector flops')\n    parser.add_argument('config', help='train config file path')\n    parser.add_argument(\n        '--shape',\n        type=int,\n        nargs='+',\n        default=[1280, 800],\n        help='input image size')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n    return args\n\n\ndef inference(args, logger):\n    if str(torch.__version__) < '1.12':\n        logger.warning(\n            'Some config files, such as configs/yolact and configs/detectors,'\n            'may have compatibility issues with torch.jit when torch<1.12. '\n            'If you want to calculate flops for these models, '\n            'please make sure your pytorch version is >=1.12.')\n\n    config_name = Path(args.config)\n    if not config_name.exists():\n        logger.error(f'{config_name} not found.')\n\n    cfg = Config.fromfile(args.config)\n    cfg.work_dir = tempfile.TemporaryDirectory().name\n    cfg.log_level = 'WARN'\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    # TODO: The following usage is temporary and not safe\n    # use hard code to convert mmSyncBN to SyncBN. This is a known\n    # bug in mmengine, mmSyncBN requires a distributed environment，\n    # this question involves models like configs/strong_baselines\n    if hasattr(cfg, 'head_norm_cfg'):\n        cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)\n        cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(\n            type='SyncBN', requires_grad=True)\n        cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(\n            type='SyncBN', requires_grad=True)\n\n    if len(args.shape) == 1:\n        h = w = args.shape[0]\n    elif len(args.shape) == 2:\n        h, w = args.shape\n    else:\n        raise ValueError('invalid input shape')\n    result = {}\n\n    # Supports two ways to calculate flops,\n    # 1. randomly generate a picture\n    # 2. load a picture from the dataset\n    # In two stage detectors, _forward need batch_samples to get\n    # rpn_results_list, then use rpn_results_list to compute flops,\n    # so only the second way is supported\n    try:\n        model = MODELS.build(cfg.model)\n        if torch.cuda.is_available():\n            model.cuda()\n        model = revert_sync_batchnorm(model)\n        data_batch = {'inputs': [torch.rand(3, h, w)], 'batch_samples': [None]}\n        data = model.data_preprocessor(data_batch)\n        result['ori_shape'] = (h, w)\n        result['pad_shape'] = data['inputs'].shape[-2:]\n        model.eval()\n        outputs = get_model_complexity_info(\n            model,\n            None,\n            inputs=data['inputs'],\n            show_table=False,\n            show_arch=False)\n        flops = outputs['flops']\n        params = outputs['params']\n        result['compute_type'] = 'direct: randomly generate a picture'\n\n    except TypeError:\n        logger.warning(\n            'Failed to directly get FLOPs, try to get flops with real data')\n        data_loader = Runner.build_dataloader(cfg.val_dataloader)\n        data_batch = next(iter(data_loader))\n        model = MODELS.build(cfg.model)\n        if torch.cuda.is_available():\n            model = model.cuda()\n        model = revert_sync_batchnorm(model)\n        model.eval()\n        _forward = model.forward\n        data = model.data_preprocessor(data_batch)\n        result['ori_shape'] = data['data_samples'][0].ori_shape\n        result['pad_shape'] = data['data_samples'][0].pad_shape\n\n        del data_loader\n        model.forward = partial(_forward, data_samples=data['data_samples'])\n        outputs = get_model_complexity_info(\n            model,\n            None,\n            inputs=data['inputs'],\n            show_table=False,\n            show_arch=False)\n        flops = outputs['flops']\n        params = outputs['params']\n        result['compute_type'] = 'dataloader: load a picture from the dataset'\n\n    flops = _format_size(flops)\n    params = _format_size(params)\n    result['flops'] = flops\n    result['params'] = params\n\n    return result\n\n\ndef main():\n    args = parse_args()\n    logger = MMLogger.get_instance(name='MMLogger')\n    result = inference(args, logger)\n    split_line = '=' * 30\n    ori_shape = result['ori_shape']\n    pad_shape = result['pad_shape']\n    flops = result['flops']\n    params = result['params']\n    compute_type = result['compute_type']\n\n    if pad_shape != ori_shape:\n        print(f'{split_line}\\nUse size divisor set input shape '\n              f'from {ori_shape} to {pad_shape}')\n    print(f'{split_line}\\nCompute type: {compute_type}\\n'\n          f'Input shape: {pad_shape}\\nFlops: {flops}\\n'\n          f'Params: {params}\\n{split_line}')\n    print('!!!Please be cautious if you use the results in papers. '\n          'You may need to check if all ops are supported and verify '\n          'that the flops computation is correct.')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/optimize_anchors.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Optimize anchor settings on a specific dataset.\n\nThis script provides two method to optimize YOLO anchors including k-means\nanchor cluster and differential evolution. You can use ``--algorithm k-means``\nand ``--algorithm differential_evolution`` to switch two method.\n\nExample:\n    Use k-means anchor cluster::\n\n        python tools/analysis_tools/optimize_anchors.py ${CONFIG} \\\n        --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \\\n        --output-dir ${OUTPUT_DIR}\n    Use differential evolution to optimize anchors::\n\n        python tools/analysis_tools/optimize_anchors.py ${CONFIG} \\\n        --algorithm differential_evolution \\\n        --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \\\n        --output-dir ${OUTPUT_DIR}\n\"\"\"\nimport argparse\nimport os.path as osp\n\nimport numpy as np\nimport torch\nfrom mmengine.config import Config\nfrom mmengine.fileio import dump\nfrom mmengine.logging import MMLogger\nfrom mmengine.registry import init_default_scope\nfrom mmengine.utils import ProgressBar\nfrom scipy.optimize import differential_evolution\n\nfrom mmdet.registry import DATASETS\nfrom mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,\n                                   bbox_xyxy_to_cxcywh)\nfrom mmdet.utils import replace_cfg_vals, update_data_root\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Optimize anchor parameters.')\n    parser.add_argument('config', help='Train config file path.')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for calculating.')\n    parser.add_argument(\n        '--input-shape',\n        type=int,\n        nargs='+',\n        default=[608, 608],\n        help='input image size')\n    parser.add_argument(\n        '--algorithm',\n        default='differential_evolution',\n        help='Algorithm used for anchor optimizing.'\n        'Support k-means and differential_evolution for YOLO.')\n    parser.add_argument(\n        '--iters',\n        default=1000,\n        type=int,\n        help='Maximum iterations for optimizer.')\n    parser.add_argument(\n        '--output-dir',\n        default=None,\n        type=str,\n        help='Path to save anchor optimize result.')\n\n    args = parser.parse_args()\n    return args\n\n\nclass BaseAnchorOptimizer:\n    \"\"\"Base class for anchor optimizer.\n\n    Args:\n        dataset (obj:`Dataset`): Dataset object.\n        input_shape (list[int]): Input image shape of the model.\n            Format in [width, height].\n        logger (obj:`logging.Logger`): The logger for logging.\n        device (str, optional): Device used for calculating.\n            Default: 'cuda:0'\n        out_dir (str, optional): Path to save anchor optimize result.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 dataset,\n                 input_shape,\n                 logger,\n                 device='cuda:0',\n                 out_dir=None):\n        self.dataset = dataset\n        self.input_shape = input_shape\n        self.logger = logger\n        self.device = device\n        self.out_dir = out_dir\n        bbox_whs, img_shapes = self.get_whs_and_shapes()\n        ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])\n\n        # resize to input shape\n        self.bbox_whs = bbox_whs / ratios\n\n    def get_whs_and_shapes(self):\n        \"\"\"Get widths and heights of bboxes and shapes of images.\n\n        Returns:\n            tuple[np.ndarray]: Array of bbox shapes and array of image\n            shapes with shape (num_bboxes, 2) in [width, height] format.\n        \"\"\"\n        self.logger.info('Collecting bboxes from annotation...')\n        bbox_whs = []\n        img_shapes = []\n        prog_bar = ProgressBar(len(self.dataset))\n        for idx in range(len(self.dataset)):\n            data_info = self.dataset.get_data_info(idx)\n            img_shape = np.array([data_info['width'], data_info['height']])\n            gt_instances = data_info['instances']\n            for instance in gt_instances:\n                bbox = np.array(instance['bbox'])\n                wh = bbox[2:4] - bbox[0:2]\n                img_shapes.append(img_shape)\n                bbox_whs.append(wh)\n\n            prog_bar.update()\n        print('\\n')\n        bbox_whs = np.array(bbox_whs)\n        img_shapes = np.array(img_shapes)\n        self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')\n        return bbox_whs, img_shapes\n\n    def get_zero_center_bbox_tensor(self):\n        \"\"\"Get a tensor of bboxes centered at (0, 0).\n\n        Returns:\n            Tensor: Tensor of bboxes with shape (num_bboxes, 4)\n            in [xmin, ymin, xmax, ymax] format.\n        \"\"\"\n        whs = torch.from_numpy(self.bbox_whs).to(\n            self.device, dtype=torch.float32)\n        bboxes = bbox_cxcywh_to_xyxy(\n            torch.cat([torch.zeros_like(whs), whs], dim=1))\n        return bboxes\n\n    def optimize(self):\n        raise NotImplementedError\n\n    def save_result(self, anchors, path=None):\n        anchor_results = []\n        for w, h in anchors:\n            anchor_results.append([round(w), round(h)])\n        self.logger.info(f'Anchor optimize result:{anchor_results}')\n        if path:\n            json_path = osp.join(path, 'anchor_optimize_result.json')\n            dump(anchor_results, json_path)\n            self.logger.info(f'Result saved in {json_path}')\n\n\nclass YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):\n    r\"\"\"YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.\n    <https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.\n\n    Args:\n        num_anchors (int) : Number of anchors.\n        iters (int): Maximum iterations for k-means.\n    \"\"\"\n\n    def __init__(self, num_anchors, iters, **kwargs):\n\n        super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)\n        self.num_anchors = num_anchors\n        self.iters = iters\n\n    def optimize(self):\n        anchors = self.kmeans_anchors()\n        self.save_result(anchors, self.out_dir)\n\n    def kmeans_anchors(self):\n        self.logger.info(\n            f'Start cluster {self.num_anchors} YOLO anchors with K-means...')\n        bboxes = self.get_zero_center_bbox_tensor()\n        cluster_center_idx = torch.randint(\n            0, bboxes.shape[0], (self.num_anchors, )).to(self.device)\n\n        assignments = torch.zeros((bboxes.shape[0], )).to(self.device)\n        cluster_centers = bboxes[cluster_center_idx]\n        if self.num_anchors == 1:\n            cluster_centers = self.kmeans_maximization(bboxes, assignments,\n                                                       cluster_centers)\n            anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()\n            anchors = sorted(anchors, key=lambda x: x[0] * x[1])\n            return anchors\n\n        prog_bar = ProgressBar(self.iters)\n        for i in range(self.iters):\n            converged, assignments = self.kmeans_expectation(\n                bboxes, assignments, cluster_centers)\n            if converged:\n                self.logger.info(f'K-means process has converged at iter {i}.')\n                break\n            cluster_centers = self.kmeans_maximization(bboxes, assignments,\n                                                       cluster_centers)\n            prog_bar.update()\n        print('\\n')\n        avg_iou = bbox_overlaps(bboxes,\n                                cluster_centers).max(1)[0].mean().item()\n\n        anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()\n        anchors = sorted(anchors, key=lambda x: x[0] * x[1])\n        self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')\n\n        return anchors\n\n    def kmeans_maximization(self, bboxes, assignments, centers):\n        \"\"\"Maximization part of EM algorithm(Expectation-Maximization)\"\"\"\n        new_centers = torch.zeros_like(centers)\n        for i in range(centers.shape[0]):\n            mask = (assignments == i)\n            if mask.sum():\n                new_centers[i, :] = bboxes[mask].mean(0)\n        return new_centers\n\n    def kmeans_expectation(self, bboxes, assignments, centers):\n        \"\"\"Expectation part of EM algorithm(Expectation-Maximization)\"\"\"\n        ious = bbox_overlaps(bboxes, centers)\n        closest = ious.argmax(1)\n        converged = (closest == assignments).all()\n        return converged, closest\n\n\nclass YOLODEAnchorOptimizer(BaseAnchorOptimizer):\n    \"\"\"YOLO anchor optimizer using differential evolution algorithm.\n\n    Args:\n        num_anchors (int) : Number of anchors.\n        iters (int): Maximum iterations for k-means.\n        strategy (str): The differential evolution strategy to use.\n            Should be one of:\n\n                - 'best1bin'\n                - 'best1exp'\n                - 'rand1exp'\n                - 'randtobest1exp'\n                - 'currenttobest1exp'\n                - 'best2exp'\n                - 'rand2exp'\n                - 'randtobest1bin'\n                - 'currenttobest1bin'\n                - 'best2bin'\n                - 'rand2bin'\n                - 'rand1bin'\n\n            Default: 'best1bin'.\n        population_size (int): Total population size of evolution algorithm.\n            Default: 15.\n        convergence_thr (float): Tolerance for convergence, the\n            optimizing stops when ``np.std(pop) <= abs(convergence_thr)\n            + convergence_thr * np.abs(np.mean(population_energies))``,\n            respectively. Default: 0.0001.\n        mutation (tuple[float]): Range of dithering randomly changes the\n            mutation constant. Default: (0.5, 1).\n        recombination (float): Recombination constant of crossover probability.\n            Default: 0.7.\n    \"\"\"\n\n    def __init__(self,\n                 num_anchors,\n                 iters,\n                 strategy='best1bin',\n                 population_size=15,\n                 convergence_thr=0.0001,\n                 mutation=(0.5, 1),\n                 recombination=0.7,\n                 **kwargs):\n\n        super(YOLODEAnchorOptimizer, self).__init__(**kwargs)\n\n        self.num_anchors = num_anchors\n        self.iters = iters\n        self.strategy = strategy\n        self.population_size = population_size\n        self.convergence_thr = convergence_thr\n        self.mutation = mutation\n        self.recombination = recombination\n\n    def optimize(self):\n        anchors = self.differential_evolution()\n        self.save_result(anchors, self.out_dir)\n\n    def differential_evolution(self):\n        bboxes = self.get_zero_center_bbox_tensor()\n\n        bounds = []\n        for i in range(self.num_anchors):\n            bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])\n\n        result = differential_evolution(\n            func=self.avg_iou_cost,\n            bounds=bounds,\n            args=(bboxes, ),\n            strategy=self.strategy,\n            maxiter=self.iters,\n            popsize=self.population_size,\n            tol=self.convergence_thr,\n            mutation=self.mutation,\n            recombination=self.recombination,\n            updating='immediate',\n            disp=True)\n        self.logger.info(\n            f'Anchor evolution finish. Average IOU: {1 - result.fun}')\n        anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]\n        anchors = sorted(anchors, key=lambda x: x[0] * x[1])\n        return anchors\n\n    @staticmethod\n    def avg_iou_cost(anchor_params, bboxes):\n        assert len(anchor_params) % 2 == 0\n        anchor_whs = torch.tensor(\n            [[w, h]\n             for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(\n                 bboxes.device, dtype=bboxes.dtype)\n        anchor_boxes = bbox_cxcywh_to_xyxy(\n            torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))\n        ious = bbox_overlaps(bboxes, anchor_boxes)\n        max_ious, _ = ious.max(1)\n        cost = 1 - max_ious.mean().item()\n        return cost\n\n\ndef main():\n    logger = MMLogger.get_current_instance()\n    args = parse_args()\n    cfg = args.config\n    cfg = Config.fromfile(cfg)\n    init_default_scope(cfg.get('default_scope', 'mmdet'))\n\n    # replace the ${key} with the value of cfg.key\n    cfg = replace_cfg_vals(cfg)\n\n    # update data root according to MMDET_DATASETS\n    update_data_root(cfg)\n\n    input_shape = args.input_shape\n    assert len(input_shape) == 2\n\n    anchor_type = cfg.model.bbox_head.anchor_generator.type\n    assert anchor_type == 'YOLOAnchorGenerator', \\\n        f'Only support optimize YOLOAnchor, but get {anchor_type}.'\n\n    base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes\n    num_anchors = sum([len(sizes) for sizes in base_sizes])\n\n    train_data_cfg = cfg.train_dataloader\n    while 'dataset' in train_data_cfg:\n        train_data_cfg = train_data_cfg['dataset']\n    dataset = DATASETS.build(train_data_cfg)\n\n    if args.algorithm == 'k-means':\n        optimizer = YOLOKMeansAnchorOptimizer(\n            dataset=dataset,\n            input_shape=input_shape,\n            device=args.device,\n            num_anchors=num_anchors,\n            iters=args.iters,\n            logger=logger,\n            out_dir=args.output_dir)\n    elif args.algorithm == 'differential_evolution':\n        optimizer = YOLODEAnchorOptimizer(\n            dataset=dataset,\n            input_shape=input_shape,\n            device=args.device,\n            num_anchors=num_anchors,\n            iters=args.iters,\n            logger=logger,\n            out_dir=args.output_dir)\n    else:\n        raise NotImplementedError(\n            f'Only support k-means and differential_evolution, '\n            f'but get {args.algorithm}')\n\n    optimizer.optimize()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/robustness_eval.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom argparse import ArgumentParser\n\nimport numpy as np\nfrom mmengine.fileio import load\n\n\ndef print_coco_results(results):\n\n    def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):\n        titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n        typeStr = '(AP)' if ap == 1 else '(AR)'\n        iouStr = '0.50:0.95' \\\n            if iouThr is None else f'{iouThr:0.2f}'\n        iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | '\n        iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}'\n        print(iStr)\n\n    stats = np.zeros((12, ))\n    stats[0] = _print(results[0], 1)\n    stats[1] = _print(results[1], 1, iouThr=.5)\n    stats[2] = _print(results[2], 1, iouThr=.75)\n    stats[3] = _print(results[3], 1, areaRng='small')\n    stats[4] = _print(results[4], 1, areaRng='medium')\n    stats[5] = _print(results[5], 1, areaRng='large')\n    # TODO support recall metric\n    '''\n    stats[6] = _print(results[6], 0, maxDets=1)\n    stats[7] = _print(results[7], 0, maxDets=10)\n    stats[8] = _print(results[8], 0)\n    stats[9] = _print(results[9], 0, areaRng='small')\n    stats[10] = _print(results[10], 0, areaRng='medium')\n    stats[11] = _print(results[11], 0, areaRng='large')\n    '''\n\n\ndef get_coco_style_results(filename,\n                           task='bbox',\n                           metric=None,\n                           prints='mPC',\n                           aggregate='benchmark'):\n\n    assert aggregate in ['benchmark', 'all']\n\n    if prints == 'all':\n        prints = ['P', 'mPC', 'rPC']\n    elif isinstance(prints, str):\n        prints = [prints]\n    for p in prints:\n        assert p in ['P', 'mPC', 'rPC']\n\n    if metric is None:\n        metrics = [\n            'mAP',\n            'mAP_50',\n            'mAP_75',\n            'mAP_s',\n            'mAP_m',\n            'mAP_l',\n        ]\n    elif isinstance(metric, list):\n        metrics = metric\n    else:\n        metrics = [metric]\n\n    for metric_name in metrics:\n        assert metric_name in [\n            'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n        ]\n\n    eval_output = load(filename)\n\n    num_distortions = len(list(eval_output.keys()))\n    results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32')\n\n    for corr_i, distortion in enumerate(eval_output):\n        for severity in eval_output[distortion]:\n            for metric_j, metric_name in enumerate(metrics):\n                metric_dict = eval_output[distortion][severity]\n\n                new_metric_dict = {}\n                for k, v in metric_dict.items():\n                    if '/' in k:\n                        new_metric_dict[k.split('/')[-1]] = v\n                mAP = new_metric_dict['_'.join((task, metric_name))]\n                results[corr_i, severity, metric_j] = mAP\n\n    P = results[0, 0, :]\n    if aggregate == 'benchmark':\n        mPC = np.mean(results[:15, 1:, :], axis=(0, 1))\n    else:\n        mPC = np.mean(results[:, 1:, :], axis=(0, 1))\n    rPC = mPC / P\n\n    print(f'\\nmodel: {osp.basename(filename)}')\n    if metric is None:\n        if 'P' in prints:\n            print(f'Performance on Clean Data [P] ({task})')\n            print_coco_results(P)\n        if 'mPC' in prints:\n            print(f'Mean Performance under Corruption [mPC] ({task})')\n            print_coco_results(mPC)\n        if 'rPC' in prints:\n            print(f'Relative Performance under Corruption [rPC] ({task})')\n            print_coco_results(rPC)\n    else:\n        if 'P' in prints:\n            print(f'Performance on Clean Data [P] ({task})')\n            for metric_i, metric_name in enumerate(metrics):\n                print(f'{metric_name:5} =  {P[metric_i]:0.3f}')\n        if 'mPC' in prints:\n            print(f'Mean Performance under Corruption [mPC] ({task})')\n            for metric_i, metric_name in enumerate(metrics):\n                print(f'{metric_name:5} =  {mPC[metric_i]:0.3f}')\n        if 'rPC' in prints:\n            print(f'Relative Performance under Corruption [rPC] ({task})')\n            for metric_i, metric_name in enumerate(metrics):\n                print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %')\n\n    return results\n\n\ndef get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):\n\n    assert aggregate in ['benchmark', 'all']\n\n    if prints == 'all':\n        prints = ['P', 'mPC', 'rPC']\n    elif isinstance(prints, str):\n        prints = [prints]\n    for p in prints:\n        assert p in ['P', 'mPC', 'rPC']\n\n    eval_output = load(filename)\n\n    num_distortions = len(list(eval_output.keys()))\n    results = np.zeros((num_distortions, 6, 20), dtype='float32')\n\n    for i, distortion in enumerate(eval_output):\n        for severity in eval_output[distortion]:\n            mAP = [\n                eval_output[distortion][severity][j]['ap']\n                for j in range(len(eval_output[distortion][severity]))\n            ]\n            results[i, severity, :] = mAP\n\n    P = results[0, 0, :]\n    if aggregate == 'benchmark':\n        mPC = np.mean(results[:15, 1:, :], axis=(0, 1))\n    else:\n        mPC = np.mean(results[:, 1:, :], axis=(0, 1))\n    rPC = mPC / P\n\n    print(f'\\nmodel: {osp.basename(filename)}')\n    if 'P' in prints:\n        print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}')\n    if 'mPC' in prints:\n        print('Mean Performance under Corruption [mPC] in AP50 = '\n              f'{np.mean(mPC):0.3f}')\n    if 'rPC' in prints:\n        print('Relative Performance under Corruption [rPC] in % = '\n              f'{np.mean(rPC) * 100:0.1f}')\n\n    return np.mean(results, axis=2, keepdims=True)\n\n\ndef get_results(filename,\n                dataset='coco',\n                task='bbox',\n                metric=None,\n                prints='mPC',\n                aggregate='benchmark'):\n    assert dataset in ['coco', 'voc', 'cityscapes']\n\n    if dataset in ['coco', 'cityscapes']:\n        results = get_coco_style_results(\n            filename,\n            task=task,\n            metric=metric,\n            prints=prints,\n            aggregate=aggregate)\n    elif dataset == 'voc':\n        if task != 'bbox':\n            print('Only bbox analysis is supported for Pascal VOC')\n            print('Will report bbox results\\n')\n        if metric not in [None, ['AP'], ['AP50']]:\n            print('Only the AP50 metric is supported for Pascal VOC')\n            print('Will report AP50 metric\\n')\n        results = get_voc_style_results(\n            filename, prints=prints, aggregate=aggregate)\n\n    return results\n\n\ndef get_distortions_from_file(filename):\n\n    eval_output = load(filename)\n\n    return get_distortions_from_results(eval_output)\n\n\ndef get_distortions_from_results(eval_output):\n    distortions = []\n    for i, distortion in enumerate(eval_output):\n        distortions.append(distortion.replace('_', ' '))\n    return distortions\n\n\ndef main():\n    parser = ArgumentParser(description='Corruption Result Analysis')\n    parser.add_argument('filename', help='result file path')\n    parser.add_argument(\n        '--dataset',\n        type=str,\n        choices=['coco', 'voc', 'cityscapes'],\n        default='coco',\n        help='dataset type')\n    parser.add_argument(\n        '--task',\n        type=str,\n        nargs='+',\n        choices=['bbox', 'segm'],\n        default=['bbox'],\n        help='task to report')\n    parser.add_argument(\n        '--metric',\n        nargs='+',\n        choices=[\n            None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',\n            'AR100', 'ARs', 'ARm', 'ARl'\n        ],\n        default=None,\n        help='metric to report')\n    parser.add_argument(\n        '--prints',\n        type=str,\n        nargs='+',\n        choices=['P', 'mPC', 'rPC'],\n        default='mPC',\n        help='corruption benchmark metric to print')\n    parser.add_argument(\n        '--aggregate',\n        type=str,\n        choices=['all', 'benchmark'],\n        default='benchmark',\n        help='aggregate all results or only those \\\n        for benchmark corruptions')\n\n    args = parser.parse_args()\n\n    for task in args.task:\n        get_results(\n            args.filename,\n            dataset=args.dataset,\n            task=task,\n            metric=args.metric,\n            prints=args.prints,\n            aggregate=args.aggregate)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/analysis_tools/test_robustness.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport copy\nimport os\nimport os.path as osp\n\nfrom mmengine.config import Config, DictAction\nfrom mmengine.dist import get_dist_info\nfrom mmengine.evaluator import DumpResults\nfrom mmengine.fileio import dump\nfrom mmengine.runner import Runner\n\nfrom mmdet.engine.hooks.utils import trigger_visualization_hook\nfrom mmdet.registry import RUNNERS\nfrom tools.analysis_tools.robustness_eval import get_results\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='MMDet test detector')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('checkpoint', help='checkpoint file')\n    parser.add_argument(\n        '--out',\n        type=str,\n        help='dump predictions to a pickle file for offline evaluation')\n    parser.add_argument(\n        '--corruptions',\n        type=str,\n        nargs='+',\n        default='benchmark',\n        choices=[\n            'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',\n            'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',\n            'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',\n            'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',\n            'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',\n            'spatter', 'saturate'\n        ],\n        help='corruptions')\n    parser.add_argument(\n        '--work-dir',\n        help='the directory to save the file containing evaluation metrics')\n    parser.add_argument(\n        '--severities',\n        type=int,\n        nargs='+',\n        default=[0, 1, 2, 3, 4, 5],\n        help='corruption severity levels')\n    parser.add_argument(\n        '--summaries',\n        type=bool,\n        default=False,\n        help='Print summaries for every corruption and severity')\n    parser.add_argument('--show', action='store_true', help='show results')\n    parser.add_argument(\n        '--show-dir', help='directory where painted images will be saved')\n    parser.add_argument(\n        '--wait-time', type=float, default=2, help='the interval of show (s)')\n    parser.add_argument('--seed', type=int, default=None, help='random seed')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    parser.add_argument(\n        '--final-prints',\n        type=str,\n        nargs='+',\n        choices=['P', 'mPC', 'rPC'],\n        default='mPC',\n        help='corruption benchmark metric to print at the end')\n    parser.add_argument(\n        '--final-prints-aggregate',\n        type=str,\n        choices=['all', 'benchmark'],\n        default='benchmark',\n        help='aggregate all results or only those for benchmark corruptions')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    assert args.out or args.show or args.show_dir, \\\n        ('Please specify at least one operation (save or show the results) '\n         'with the argument \"--out\", \"--show\" or \"show-dir\"')\n\n    # load config\n    cfg = Config.fromfile(args.config)\n    cfg.launcher = args.launcher\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # work_dir is determined in this priority: CLI > segment in file > filename\n    if args.work_dir is not None:\n        # update configs according to CLI args if args.work_dir is not None\n        cfg.work_dir = args.work_dir\n    elif cfg.get('work_dir', None) is None:\n        # use config filename as default work_dir if cfg.work_dir is None\n        cfg.work_dir = osp.join('./work_dirs',\n                                osp.splitext(osp.basename(args.config))[0])\n\n    cfg.model.backbone.init_cfg.type = None\n    cfg.test_dataloader.dataset.test_mode = True\n\n    cfg.load_from = args.checkpoint\n    if args.show or args.show_dir:\n        cfg = trigger_visualization_hook(cfg, args)\n\n    # build the runner from config\n    if 'runner_type' not in cfg:\n        # build the default runner\n        runner = Runner.from_cfg(cfg)\n    else:\n        # build customized runner from the registry\n        # if 'runner_type' is set in the cfg\n        runner = RUNNERS.build(cfg)\n\n    # add `DumpResults` dummy metric\n    if args.out is not None:\n        assert args.out.endswith(('.pkl', '.pickle')), \\\n            'The dump file must be a pkl file.'\n        runner.test_evaluator.metrics.append(\n            DumpResults(out_file_path=args.out))\n\n    if 'all' in args.corruptions:\n        corruptions = [\n            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',\n            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',\n            'brightness', 'contrast', 'elastic_transform', 'pixelate',\n            'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',\n            'saturate'\n        ]\n    elif 'benchmark' in args.corruptions:\n        corruptions = [\n            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',\n            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',\n            'brightness', 'contrast', 'elastic_transform', 'pixelate',\n            'jpeg_compression'\n        ]\n    elif 'noise' in args.corruptions:\n        corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']\n    elif 'blur' in args.corruptions:\n        corruptions = [\n            'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'\n        ]\n    elif 'weather' in args.corruptions:\n        corruptions = ['snow', 'frost', 'fog', 'brightness']\n    elif 'digital' in args.corruptions:\n        corruptions = [\n            'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'\n        ]\n    elif 'holdout' in args.corruptions:\n        corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']\n    elif 'None' in args.corruptions:\n        corruptions = ['None']\n        args.severities = [0]\n    else:\n        corruptions = args.corruptions\n\n    aggregated_results = {}\n    for corr_i, corruption in enumerate(corruptions):\n        aggregated_results[corruption] = {}\n        for sev_i, corruption_severity in enumerate(args.severities):\n            # evaluate severity 0 (= no corruption) only once\n            if corr_i > 0 and corruption_severity == 0:\n                aggregated_results[corruption][0] = \\\n                    aggregated_results[corruptions[0]][0]\n                continue\n\n            test_loader_cfg = copy.deepcopy(cfg.test_dataloader)\n            # assign corruption and severity\n            if corruption_severity > 0:\n                corruption_trans = dict(\n                    type='Corrupt',\n                    corruption=corruption,\n                    severity=corruption_severity)\n                # TODO: hard coded \"1\", we assume that the first step is\n                # loading images, which needs to be fixed in the future\n                test_loader_cfg.dataset.pipeline.insert(1, corruption_trans)\n\n            test_loader = runner.build_dataloader(test_loader_cfg)\n\n            runner.test_loop.dataloader = test_loader\n            # set random seeds\n            if args.seed is not None:\n                runner.set_randomness(args.seed)\n\n            # print info\n            print(f'\\nTesting {corruption} at severity {corruption_severity}')\n\n            eval_results = runner.test()\n            if args.out:\n                eval_results_filename = (\n                    osp.splitext(args.out)[0] + '_results' +\n                    osp.splitext(args.out)[1])\n                aggregated_results[corruption][\n                    corruption_severity] = eval_results\n                dump(aggregated_results, eval_results_filename)\n\n    rank, _ = get_dist_info()\n    if rank == 0:\n        eval_results_filename = (\n            osp.splitext(args.out)[0] + '_results' + osp.splitext(args.out)[1])\n        # print final results\n        print('\\nAggregated results:')\n        prints = args.final_prints\n        aggregate = args.final_prints_aggregate\n\n        if cfg.dataset_type == 'VOCDataset':\n            get_results(\n                eval_results_filename,\n                dataset='voc',\n                prints=prints,\n                aggregate=aggregate)\n        else:\n            get_results(\n                eval_results_filename,\n                dataset='coco',\n                prints=prints,\n                aggregate=aggregate)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/dataset_converters/cityscapes.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport os.path as osp\n\nimport cityscapesscripts.helpers.labels as CSLabels\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nfrom mmengine.fileio import dump\nfrom mmengine.utils import (Timer, mkdir_or_exist, track_parallel_progress,\n                            track_progress)\n\n\ndef collect_files(img_dir, gt_dir):\n    suffix = 'leftImg8bit.png'\n    files = []\n    for img_file in glob.glob(osp.join(img_dir, '**/*.png')):\n        assert img_file.endswith(suffix), img_file\n        inst_file = gt_dir + img_file[\n            len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'\n        # Note that labelIds are not converted to trainId for seg map\n        segm_file = gt_dir + img_file[\n            len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'\n        files.append((img_file, inst_file, segm_file))\n    assert len(files), f'No images found in {img_dir}'\n    print(f'Loaded {len(files)} images from {img_dir}')\n\n    return files\n\n\ndef collect_annotations(files, nproc=1):\n    print('Loading annotation images')\n    if nproc > 1:\n        images = track_parallel_progress(load_img_info, files, nproc=nproc)\n    else:\n        images = track_progress(load_img_info, files)\n\n    return images\n\n\ndef load_img_info(files):\n    img_file, inst_file, segm_file = files\n    inst_img = mmcv.imread(inst_file, 'unchanged')\n    # ids < 24 are stuff labels (filtering them first is about 5% faster)\n    unique_inst_ids = np.unique(inst_img[inst_img >= 24])\n    anno_info = []\n    for inst_id in unique_inst_ids:\n        # For non-crowd annotations, inst_id // 1000 is the label_id\n        # Crowd annotations have <1000 instance ids\n        label_id = inst_id // 1000 if inst_id >= 1000 else inst_id\n        label = CSLabels.id2label[label_id]\n        if not label.hasInstances or label.ignoreInEval:\n            continue\n\n        category_id = label.id\n        iscrowd = int(inst_id < 1000)\n        mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')\n        mask_rle = maskUtils.encode(mask[:, :, None])[0]\n\n        area = maskUtils.area(mask_rle)\n        # convert to COCO style XYWH format\n        bbox = maskUtils.toBbox(mask_rle)\n\n        # for json encoding\n        mask_rle['counts'] = mask_rle['counts'].decode()\n\n        anno = dict(\n            iscrowd=iscrowd,\n            category_id=category_id,\n            bbox=bbox.tolist(),\n            area=area.tolist(),\n            segmentation=mask_rle)\n        anno_info.append(anno)\n    video_name = osp.basename(osp.dirname(img_file))\n    img_info = dict(\n        # remove img_prefix for filename\n        file_name=osp.join(video_name, osp.basename(img_file)),\n        height=inst_img.shape[0],\n        width=inst_img.shape[1],\n        anno_info=anno_info,\n        segm_file=osp.join(video_name, osp.basename(segm_file)))\n\n    return img_info\n\n\ndef cvt_annotations(image_infos, out_json_name):\n    out_json = dict()\n    img_id = 0\n    ann_id = 0\n    out_json['images'] = []\n    out_json['categories'] = []\n    out_json['annotations'] = []\n    for image_info in image_infos:\n        image_info['id'] = img_id\n        anno_infos = image_info.pop('anno_info')\n        out_json['images'].append(image_info)\n        for anno_info in anno_infos:\n            anno_info['image_id'] = img_id\n            anno_info['id'] = ann_id\n            out_json['annotations'].append(anno_info)\n            ann_id += 1\n        img_id += 1\n    for label in CSLabels.labels:\n        if label.hasInstances and not label.ignoreInEval:\n            cat = dict(id=label.id, name=label.name)\n            out_json['categories'].append(cat)\n\n    if len(out_json['annotations']) == 0:\n        out_json.pop('annotations')\n\n    dump(out_json, out_json_name)\n    return out_json\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Convert Cityscapes annotations to COCO format')\n    parser.add_argument('cityscapes_path', help='cityscapes data path')\n    parser.add_argument('--img-dir', default='leftImg8bit', type=str)\n    parser.add_argument('--gt-dir', default='gtFine', type=str)\n    parser.add_argument('-o', '--out-dir', help='output path')\n    parser.add_argument(\n        '--nproc', default=1, type=int, help='number of process')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    cityscapes_path = args.cityscapes_path\n    out_dir = args.out_dir if args.out_dir else cityscapes_path\n    mkdir_or_exist(out_dir)\n\n    img_dir = osp.join(cityscapes_path, args.img_dir)\n    gt_dir = osp.join(cityscapes_path, args.gt_dir)\n\n    set_name = dict(\n        train='instancesonly_filtered_gtFine_train.json',\n        val='instancesonly_filtered_gtFine_val.json',\n        test='instancesonly_filtered_gtFine_test.json')\n\n    for split, json_name in set_name.items():\n        print(f'Converting {split} into {json_name}')\n        with Timer(print_tmpl='It took {}s to convert Cityscapes annotation'):\n            files = collect_files(\n                osp.join(img_dir, split), osp.join(gt_dir, split))\n            image_infos = collect_annotations(files, nproc=args.nproc)\n            cvt_annotations(image_infos, osp.join(out_dir, json_name))\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/dataset_converters/images2coco.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\n\nfrom mmengine.fileio import dump, list_from_file\nfrom mmengine.utils import mkdir_or_exist, scandir, track_iter_progress\nfrom PIL import Image\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Convert images to coco format without annotations')\n    parser.add_argument('img_path', help='The root path of images')\n    parser.add_argument(\n        'classes', type=str, help='The text file name of storage class list')\n    parser.add_argument(\n        'out',\n        type=str,\n        help='The output annotation json file name, The save dir is in the '\n        'same directory as img_path')\n    parser.add_argument(\n        '-e',\n        '--exclude-extensions',\n        type=str,\n        nargs='+',\n        help='The suffix of images to be excluded, such as \"png\" and \"bmp\"')\n    args = parser.parse_args()\n    return args\n\n\ndef collect_image_infos(path, exclude_extensions=None):\n    img_infos = []\n\n    images_generator = scandir(path, recursive=True)\n    for image_path in track_iter_progress(list(images_generator)):\n        if exclude_extensions is None or (\n                exclude_extensions is not None\n                and not image_path.lower().endswith(exclude_extensions)):\n            image_path = os.path.join(path, image_path)\n            img_pillow = Image.open(image_path)\n            img_info = {\n                'filename': image_path,\n                'width': img_pillow.width,\n                'height': img_pillow.height,\n            }\n            img_infos.append(img_info)\n    return img_infos\n\n\ndef cvt_to_coco_json(img_infos, classes):\n    image_id = 0\n    coco = dict()\n    coco['images'] = []\n    coco['type'] = 'instance'\n    coco['categories'] = []\n    coco['annotations'] = []\n    image_set = set()\n\n    for category_id, name in enumerate(classes):\n        category_item = dict()\n        category_item['supercategory'] = str('none')\n        category_item['id'] = int(category_id)\n        category_item['name'] = str(name)\n        coco['categories'].append(category_item)\n\n    for img_dict in img_infos:\n        file_name = img_dict['filename']\n        assert file_name not in image_set\n        image_item = dict()\n        image_item['id'] = int(image_id)\n        image_item['file_name'] = str(file_name)\n        image_item['height'] = int(img_dict['height'])\n        image_item['width'] = int(img_dict['width'])\n        coco['images'].append(image_item)\n        image_set.add(file_name)\n\n        image_id += 1\n    return coco\n\n\ndef main():\n    args = parse_args()\n    assert args.out.endswith(\n        'json'), 'The output file name must be json suffix'\n\n    # 1 load image list info\n    img_infos = collect_image_infos(args.img_path, args.exclude_extensions)\n\n    # 2 convert to coco format data\n    classes = list_from_file(args.classes)\n    coco_info = cvt_to_coco_json(img_infos, classes)\n\n    # 3 dump\n    save_dir = os.path.join(args.img_path, '..', 'annotations')\n    mkdir_or_exist(save_dir)\n    save_path = os.path.join(save_dir, args.out)\n    dump(coco_info, save_path)\n    print(f'save json file: {save_path}')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/dataset_converters/pascal_voc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nfrom mmengine.fileio import dump, list_from_file\nfrom mmengine.utils import mkdir_or_exist, track_progress\n\nfrom mmdet.evaluation import voc_classes\n\nlabel_ids = {name: i for i, name in enumerate(voc_classes())}\n\n\ndef parse_xml(args):\n    xml_path, img_path = args\n    tree = ET.parse(xml_path)\n    root = tree.getroot()\n    size = root.find('size')\n    w = int(size.find('width').text)\n    h = int(size.find('height').text)\n    bboxes = []\n    labels = []\n    bboxes_ignore = []\n    labels_ignore = []\n    for obj in root.findall('object'):\n        name = obj.find('name').text\n        label = label_ids[name]\n        difficult = int(obj.find('difficult').text)\n        bnd_box = obj.find('bndbox')\n        bbox = [\n            int(bnd_box.find('xmin').text),\n            int(bnd_box.find('ymin').text),\n            int(bnd_box.find('xmax').text),\n            int(bnd_box.find('ymax').text)\n        ]\n        if difficult:\n            bboxes_ignore.append(bbox)\n            labels_ignore.append(label)\n        else:\n            bboxes.append(bbox)\n            labels.append(label)\n    if not bboxes:\n        bboxes = np.zeros((0, 4))\n        labels = np.zeros((0, ))\n    else:\n        bboxes = np.array(bboxes, ndmin=2) - 1\n        labels = np.array(labels)\n    if not bboxes_ignore:\n        bboxes_ignore = np.zeros((0, 4))\n        labels_ignore = np.zeros((0, ))\n    else:\n        bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1\n        labels_ignore = np.array(labels_ignore)\n    annotation = {\n        'filename': img_path,\n        'width': w,\n        'height': h,\n        'ann': {\n            'bboxes': bboxes.astype(np.float32),\n            'labels': labels.astype(np.int64),\n            'bboxes_ignore': bboxes_ignore.astype(np.float32),\n            'labels_ignore': labels_ignore.astype(np.int64)\n        }\n    }\n    return annotation\n\n\ndef cvt_annotations(devkit_path, years, split, out_file):\n    if not isinstance(years, list):\n        years = [years]\n    annotations = []\n    for year in years:\n        filelist = osp.join(devkit_path,\n                            f'VOC{year}/ImageSets/Main/{split}.txt')\n        if not osp.isfile(filelist):\n            print(f'filelist does not exist: {filelist}, '\n                  f'skip voc{year} {split}')\n            return\n        img_names = list_from_file(filelist)\n        xml_paths = [\n            osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')\n            for img_name in img_names\n        ]\n        img_paths = [\n            f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names\n        ]\n        part_annotations = track_progress(parse_xml,\n                                          list(zip(xml_paths, img_paths)))\n        annotations.extend(part_annotations)\n    if out_file.endswith('json'):\n        annotations = cvt_to_coco_json(annotations)\n    dump(annotations, out_file)\n    return annotations\n\n\ndef cvt_to_coco_json(annotations):\n    image_id = 0\n    annotation_id = 0\n    coco = dict()\n    coco['images'] = []\n    coco['type'] = 'instance'\n    coco['categories'] = []\n    coco['annotations'] = []\n    image_set = set()\n\n    def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag):\n        annotation_item = dict()\n        annotation_item['segmentation'] = []\n\n        seg = []\n        # bbox[] is x1,y1,x2,y2\n        # left_top\n        seg.append(int(bbox[0]))\n        seg.append(int(bbox[1]))\n        # left_bottom\n        seg.append(int(bbox[0]))\n        seg.append(int(bbox[3]))\n        # right_bottom\n        seg.append(int(bbox[2]))\n        seg.append(int(bbox[3]))\n        # right_top\n        seg.append(int(bbox[2]))\n        seg.append(int(bbox[1]))\n\n        annotation_item['segmentation'].append(seg)\n\n        xywh = np.array(\n            [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]])\n        annotation_item['area'] = int(xywh[2] * xywh[3])\n        if difficult_flag == 1:\n            annotation_item['ignore'] = 0\n            annotation_item['iscrowd'] = 1\n        else:\n            annotation_item['ignore'] = 0\n            annotation_item['iscrowd'] = 0\n        annotation_item['image_id'] = int(image_id)\n        annotation_item['bbox'] = xywh.astype(int).tolist()\n        annotation_item['category_id'] = int(category_id)\n        annotation_item['id'] = int(annotation_id)\n        coco['annotations'].append(annotation_item)\n        return annotation_id + 1\n\n    for category_id, name in enumerate(voc_classes()):\n        category_item = dict()\n        category_item['supercategory'] = str('none')\n        category_item['id'] = int(category_id)\n        category_item['name'] = str(name)\n        coco['categories'].append(category_item)\n\n    for ann_dict in annotations:\n        file_name = ann_dict['filename']\n        ann = ann_dict['ann']\n        assert file_name not in image_set\n        image_item = dict()\n        image_item['id'] = int(image_id)\n        image_item['file_name'] = str(file_name)\n        image_item['height'] = int(ann_dict['height'])\n        image_item['width'] = int(ann_dict['width'])\n        coco['images'].append(image_item)\n        image_set.add(file_name)\n\n        bboxes = ann['bboxes'][:, :4]\n        labels = ann['labels']\n        for bbox_id in range(len(bboxes)):\n            bbox = bboxes[bbox_id]\n            label = labels[bbox_id]\n            annotation_id = addAnnItem(\n                annotation_id, image_id, label, bbox, difficult_flag=0)\n\n        bboxes_ignore = ann['bboxes_ignore'][:, :4]\n        labels_ignore = ann['labels_ignore']\n        for bbox_id in range(len(bboxes_ignore)):\n            bbox = bboxes_ignore[bbox_id]\n            label = labels_ignore[bbox_id]\n            annotation_id = addAnnItem(\n                annotation_id, image_id, label, bbox, difficult_flag=1)\n\n        image_id += 1\n\n    return coco\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Convert PASCAL VOC annotations to mmdetection format')\n    parser.add_argument('devkit_path', help='pascal voc devkit path')\n    parser.add_argument('-o', '--out-dir', help='output path')\n    parser.add_argument(\n        '--out-format',\n        default='pkl',\n        choices=('pkl', 'coco'),\n        help='output format, \"coco\" indicates coco annotation format')\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    devkit_path = args.devkit_path\n    out_dir = args.out_dir if args.out_dir else devkit_path\n    mkdir_or_exist(out_dir)\n\n    years = []\n    if osp.isdir(osp.join(devkit_path, 'VOC2007')):\n        years.append('2007')\n    if osp.isdir(osp.join(devkit_path, 'VOC2012')):\n        years.append('2012')\n    if '2007' in years and '2012' in years:\n        years.append(['2007', '2012'])\n    if not years:\n        raise IOError(f'The devkit path {devkit_path} contains neither '\n                      '\"VOC2007\" nor \"VOC2012\" subfolder')\n    out_fmt = f'.{args.out_format}'\n    if args.out_format == 'coco':\n        out_fmt = '.json'\n    for year in years:\n        if year == '2007':\n            prefix = 'voc07'\n        elif year == '2012':\n            prefix = 'voc12'\n        elif year == ['2007', '2012']:\n            prefix = 'voc0712'\n        for split in ['train', 'val', 'trainval']:\n            dataset_name = prefix + '_' + split\n            print(f'processing {dataset_name} ...')\n            cvt_annotations(devkit_path, year, split,\n                            osp.join(out_dir, dataset_name + out_fmt))\n        if not isinstance(year, list):\n            dataset_name = prefix + '_test'\n            print(f'processing {dataset_name} ...')\n            cvt_annotations(devkit_path, year, 'test',\n                            osp.join(out_dir, dataset_name + out_fmt))\n    print('Done!')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/deployment/mmdet2torchserve.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom mmengine.config import Config\nfrom mmengine.utils import mkdir_or_exist\n\ntry:\n    from model_archiver.model_packaging import package_model\n    from model_archiver.model_packaging_utils import ModelExportUtils\nexcept ImportError:\n    package_model = None\n\n\ndef mmdet2torchserve(\n    config_file: str,\n    checkpoint_file: str,\n    output_folder: str,\n    model_name: str,\n    model_version: str = '1.0',\n    force: bool = False,\n):\n    \"\"\"Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.\n\n    Args:\n        config_file:\n            In MMDetection config format.\n            The contents vary for each task repository.\n        checkpoint_file:\n            In MMDetection checkpoint format.\n            The contents vary for each task repository.\n        output_folder:\n            Folder where `{model_name}.mar` will be created.\n            The file created will be in TorchServe archive format.\n        model_name:\n            If not None, used for naming the `{model_name}.mar` file\n            that will be created under `output_folder`.\n            If None, `{Path(checkpoint_file).stem}` will be used.\n        model_version:\n            Model's version.\n        force:\n            If True, if there is an existing `{model_name}.mar`\n            file under `output_folder` it will be overwritten.\n    \"\"\"\n    mkdir_or_exist(output_folder)\n\n    config = Config.fromfile(config_file)\n\n    with TemporaryDirectory() as tmpdir:\n        config.dump(f'{tmpdir}/config.py')\n\n        args = Namespace(\n            **{\n                'model_file': f'{tmpdir}/config.py',\n                'serialized_file': checkpoint_file,\n                'handler': f'{Path(__file__).parent}/mmdet_handler.py',\n                'model_name': model_name or Path(checkpoint_file).stem,\n                'version': model_version,\n                'export_path': output_folder,\n                'force': force,\n                'requirements_file': None,\n                'extra_files': None,\n                'runtime': 'python',\n                'archive_format': 'default'\n            })\n        manifest = ModelExportUtils.generate_manifest_json(args)\n        package_model(args, manifest)\n\n\ndef parse_args():\n    parser = ArgumentParser(\n        description='Convert MMDetection models to TorchServe `.mar` format.')\n    parser.add_argument('config', type=str, help='config file path')\n    parser.add_argument('checkpoint', type=str, help='checkpoint file path')\n    parser.add_argument(\n        '--output-folder',\n        type=str,\n        required=True,\n        help='Folder where `{model_name}.mar` will be created.')\n    parser.add_argument(\n        '--model-name',\n        type=str,\n        default=None,\n        help='If not None, used for naming the `{model_name}.mar`'\n        'file that will be created under `output_folder`.'\n        'If None, `{Path(checkpoint_file).stem}` will be used.')\n    parser.add_argument(\n        '--model-version',\n        type=str,\n        default='1.0',\n        help='Number used for versioning.')\n    parser.add_argument(\n        '-f',\n        '--force',\n        action='store_true',\n        help='overwrite the existing `{model_name}.mar`')\n    args = parser.parse_args()\n\n    return args\n\n\nif __name__ == '__main__':\n    args = parse_args()\n\n    if package_model is None:\n        raise ImportError('`torch-model-archiver` is required.'\n                          'Try: pip install torch-model-archiver')\n\n    mmdet2torchserve(args.config, args.checkpoint, args.output_folder,\n                     args.model_name, args.model_version, args.force)\n"
  },
  {
    "path": "tools/deployment/mmdet_handler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport base64\nimport os\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom ts.torch_handler.base_handler import BaseHandler\n\nfrom mmdet.apis import inference_detector, init_detector\n\n\nclass MMdetHandler(BaseHandler):\n    threshold = 0.5\n\n    def initialize(self, context):\n        properties = context.system_properties\n        self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.device = torch.device(self.map_location + ':' +\n                                   str(properties.get('gpu_id')) if torch.cuda.\n                                   is_available() else self.map_location)\n        self.manifest = context.manifest\n\n        model_dir = properties.get('model_dir')\n        serialized_file = self.manifest['model']['serializedFile']\n        checkpoint = os.path.join(model_dir, serialized_file)\n        self.config_file = os.path.join(model_dir, 'config.py')\n\n        self.model = init_detector(self.config_file, checkpoint, self.device)\n        self.initialized = True\n\n    def preprocess(self, data):\n        images = []\n\n        for row in data:\n            image = row.get('data') or row.get('body')\n            if isinstance(image, str):\n                image = base64.b64decode(image)\n            image = mmcv.imfrombytes(image)\n            images.append(image)\n\n        return images\n\n    def inference(self, data, *args, **kwargs):\n        results = inference_detector(self.model, data)\n        return results\n\n    def postprocess(self, data):\n        # Format output following the example ObjectDetectionHandler format\n        output = []\n        for data_sample in data:\n            pred_instances = data_sample.pred_instances\n            bboxes = pred_instances.bboxes.cpu().numpy().astype(\n                np.float32).tolist()\n            labels = pred_instances.labels.cpu().numpy().astype(\n                np.int32).tolist()\n            scores = pred_instances.scores.cpu().numpy().astype(\n                np.float32).tolist()\n            preds = []\n            for idx in range(len(labels)):\n                cls_score, bbox, cls_label = scores[idx], bboxes[idx], labels[\n                    idx]\n                if cls_score >= self.threshold:\n                    class_name = self.model.dataset_meta['classes'][cls_label]\n                    result = dict(\n                        class_label=cls_label,\n                        class_name=class_name,\n                        bbox=bbox,\n                        score=cls_score)\n                    preds.append(result)\n            output.append(preds)\n        return output\n"
  },
  {
    "path": "tools/deployment/test_torchserver.py",
    "content": "import os\nfrom argparse import ArgumentParser\n\nimport mmcv\nimport requests\nimport torch\nfrom mmengine.structures import InstanceData\n\nfrom mmdet.apis import inference_detector, init_detector\nfrom mmdet.registry import VISUALIZERS\nfrom mmdet.structures import DetDataSample\n\n\ndef parse_args():\n    parser = ArgumentParser()\n    parser.add_argument('img', help='Image file')\n    parser.add_argument('config', help='Config file')\n    parser.add_argument('checkpoint', help='Checkpoint file')\n    parser.add_argument('model_name', help='The model name in the server')\n    parser.add_argument(\n        '--inference-addr',\n        default='127.0.0.1:8080',\n        help='Address and port of the inference server')\n    parser.add_argument(\n        '--device', default='cuda:0', help='Device used for inference')\n    parser.add_argument(\n        '--score-thr', type=float, default=0.5, help='bbox score threshold')\n    parser.add_argument(\n        '--work-dir',\n        type=str,\n        default=None,\n        help='output directory to save drawn results.')\n    args = parser.parse_args()\n    return args\n\n\ndef align_ts_output(inputs, metainfo, device):\n    bboxes = []\n    labels = []\n    scores = []\n    for i, pred in enumerate(inputs):\n        bboxes.append(pred['bbox'])\n        labels.append(pred['class_label'])\n        scores.append(pred['score'])\n    pred_instances = InstanceData(metainfo=metainfo)\n    pred_instances.bboxes = torch.tensor(\n        bboxes, dtype=torch.float32, device=device)\n    pred_instances.labels = torch.tensor(\n        labels, dtype=torch.int64, device=device)\n    pred_instances.scores = torch.tensor(\n        scores, dtype=torch.float32, device=device)\n    ts_data_sample = DetDataSample(pred_instances=pred_instances)\n    return ts_data_sample\n\n\ndef main(args):\n    # build the model from a config file and a checkpoint file\n    model = init_detector(args.config, args.checkpoint, device=args.device)\n    # test a single image\n    pytorch_results = inference_detector(model, args.img)\n    keep = pytorch_results.pred_instances.scores >= args.score_thr\n    pytorch_results.pred_instances = pytorch_results.pred_instances[keep]\n\n    # init visualizer\n    visualizer = VISUALIZERS.build(model.cfg.visualizer)\n    # the dataset_meta is loaded from the checkpoint and\n    # then pass to the model in init_detector\n    visualizer.dataset_meta = model.dataset_meta\n\n    # show the results\n    img = mmcv.imread(args.img)\n    img = mmcv.imconvert(img, 'bgr', 'rgb')\n    pt_out_file = None\n    ts_out_file = None\n    if args.work_dir is not None:\n        os.makedirs(args.work_dir, exist_ok=True)\n        pt_out_file = os.path.join(args.work_dir, 'pytorch_result.png')\n        ts_out_file = os.path.join(args.work_dir, 'torchserve_result.png')\n    visualizer.add_datasample(\n        'pytorch_result',\n        img.copy(),\n        data_sample=pytorch_results,\n        draw_gt=False,\n        out_file=pt_out_file,\n        show=True,\n        wait_time=0)\n\n    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name\n    with open(args.img, 'rb') as image:\n        response = requests.post(url, image)\n    metainfo = pytorch_results.pred_instances.metainfo\n    ts_results = align_ts_output(response.json(), metainfo, args.device)\n\n    visualizer.add_datasample(\n        'torchserve_result',\n        img,\n        data_sample=ts_results,\n        draw_gt=False,\n        out_file=ts_out_file,\n        show=True,\n        wait_time=0)\n\n    assert torch.allclose(pytorch_results.pred_instances.bboxes,\n                          ts_results.pred_instances.bboxes)\n    assert torch.allclose(pytorch_results.pred_instances.labels,\n                          ts_results.pred_instances.labels)\n    assert torch.allclose(pytorch_results.pred_instances.scores,\n                          ts_results.pred_instances.scores)\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    main(args)\n"
  },
  {
    "path": "tools/dist_test.sh",
    "content": "#!/usr/bin/env bash\n\nCONFIG=$1\nCHECKPOINT=$2\nGPUS=$3\nNNODES=${NNODES:-1}\nNODE_RANK=${NODE_RANK:-0}\nPORT=${PORT:-29500}\nMASTER_ADDR=${MASTER_ADDR:-\"127.0.0.1\"}\n\nPYTHONPATH=\"$(dirname $0)/..\":$PYTHONPATH \\\npython -m torch.distributed.launch \\\n    --nnodes=$NNODES \\\n    --node_rank=$NODE_RANK \\\n    --master_addr=$MASTER_ADDR \\\n    --nproc_per_node=$GPUS \\\n    --master_port=$PORT \\\n    $(dirname \"$0\")/test.py \\\n    $CONFIG \\\n    $CHECKPOINT \\\n    --launcher pytorch \\\n    ${@:4}\n"
  },
  {
    "path": "tools/dist_train.sh",
    "content": "#!/usr/bin/env bash\n\nCONFIG=$1\nGPUS=$2\nNNODES=${NNODES:-1}\nNODE_RANK=${NODE_RANK:-0}\nPORT=${PORT:-29500}\nMASTER_ADDR=${MASTER_ADDR:-\"127.0.0.1\"}\n\nPYTHONPATH=\"$(dirname $0)/..\":$PYTHONPATH \\\npython -m torch.distributed.launch \\\n    --nnodes=$NNODES \\\n    --node_rank=$NODE_RANK \\\n    --master_addr=$MASTER_ADDR \\\n    --nproc_per_node=$GPUS \\\n    --master_port=$PORT \\\n    $(dirname \"$0\")/train.py \\\n    $CONFIG \\\n    --launcher pytorch ${@:3}\n"
  },
  {
    "path": "tools/misc/download_dataset.py",
    "content": "import argparse\nimport tarfile\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom tarfile import TarFile\nfrom zipfile import ZipFile\n\nimport torch\nfrom mmengine.utils.path import mkdir_or_exist\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Download datasets for training')\n    parser.add_argument(\n        '--dataset-name', type=str, help='dataset name', default='coco2017')\n    parser.add_argument(\n        '--save-dir',\n        type=str,\n        help='the dir to save dataset',\n        default='data/coco')\n    parser.add_argument(\n        '--unzip',\n        action='store_true',\n        help='whether unzip dataset or not, zipped files will be saved')\n    parser.add_argument(\n        '--delete',\n        action='store_true',\n        help='delete the download zipped files')\n    parser.add_argument(\n        '--threads', type=int, help='number of threading', default=4)\n    args = parser.parse_args()\n    return args\n\n\ndef download(url, dir, unzip=True, delete=False, threads=1):\n\n    def download_one(url, dir):\n        f = dir / Path(url).name\n        if Path(url).is_file():\n            Path(url).rename(f)\n        elif not f.exists():\n            print(f'Downloading {url} to {f}')\n            torch.hub.download_url_to_file(url, f, progress=True)\n        if unzip and f.suffix in ('.zip', '.tar'):\n            print(f'Unzipping {f.name}')\n            if f.suffix == '.zip':\n                ZipFile(f).extractall(path=dir)\n            elif f.suffix == '.tar':\n                TarFile(f).extractall(path=dir)\n            if delete:\n                f.unlink()\n                print(f'Delete {f}')\n\n    dir = Path(dir)\n    if threads > 1:\n        pool = ThreadPool(threads)\n        pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))\n        pool.close()\n        pool.join()\n    else:\n        for u in [url] if isinstance(url, (str, Path)) else url:\n            download_one(u, dir)\n\n\ndef download_objects365v2(url, dir, unzip=True, delete=False, threads=1):\n\n    def download_single(url, dir):\n\n        if 'train' in url:\n            saving_dir = dir / Path('train_zip')\n            mkdir_or_exist(saving_dir)\n            f = saving_dir / Path(url).name\n\n            unzip_dir = dir / Path('train')\n            mkdir_or_exist(unzip_dir)\n        elif 'val' in url:\n            saving_dir = dir / Path('val')\n            mkdir_or_exist(saving_dir)\n            f = saving_dir / Path(url).name\n\n            unzip_dir = dir / Path('val')\n            mkdir_or_exist(unzip_dir)\n        else:\n            raise NotImplementedError\n\n        if Path(url).is_file():\n            Path(url).rename(f)\n        elif not f.exists():\n            print(f'Downloading {url} to {f}')\n            torch.hub.download_url_to_file(url, f, progress=True)\n\n        if unzip and str(f).endswith('.tar.gz'):\n            print(f'Unzipping {f.name}')\n            tar = tarfile.open(f)\n            tar.extractall(path=unzip_dir)\n            if delete:\n                f.unlink()\n                print(f'Delete {f}')\n\n    # process annotations\n    full_url = []\n    for _url in url:\n        if 'zhiyuan_objv2_train.tar.gz' in _url or \\\n                'zhiyuan_objv2_val.json' in _url:\n            full_url.append(_url)\n        elif 'train' in _url:\n            for i in range(51):\n                full_url.append(f'{_url}patch{i}.tar.gz')\n        elif 'val/images/v1' in _url:\n            for i in range(16):\n                full_url.append(f'{_url}patch{i}.tar.gz')\n        elif 'val/images/v2' in _url:\n            for i in range(16, 44):\n                full_url.append(f'{_url}patch{i}.tar.gz')\n        else:\n            raise NotImplementedError\n\n    dir = Path(dir)\n    if threads > 1:\n        pool = ThreadPool(threads)\n        pool.imap(lambda x: download_single(*x), zip(full_url, repeat(dir)))\n        pool.close()\n        pool.join()\n    else:\n        for u in full_url:\n            download_single(u, dir)\n\n\ndef main():\n    args = parse_args()\n    path = Path(args.save_dir)\n    if not path.exists():\n        path.mkdir(parents=True, exist_ok=True)\n    data2url = dict(\n        # TODO: Support for downloading Panoptic Segmentation of COCO\n        coco2017=[\n            'http://images.cocodataset.org/zips/train2017.zip',\n            'http://images.cocodataset.org/zips/val2017.zip',\n            'http://images.cocodataset.org/zips/test2017.zip',\n            'http://images.cocodataset.org/zips/unlabeled2017.zip',\n            'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',  # noqa\n            'http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',  # noqa\n            'http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip',  # noqa\n            'http://images.cocodataset.org/annotations/image_info_test2017.zip',  # noqa\n            'http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip',  # noqa\n        ],\n        lvis=[\n            'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip',  # noqa\n            'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip',  # noqa\n        ],\n        voc2007=[\n            'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',  # noqa\n            'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',  # noqa\n            'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar',  # noqa\n        ],\n        # Note: There is no download link for Objects365-V1 right now. If you\n        # would like to download Objects365-V1, please visit\n        # http://www.objects365.org/ to concat the author.\n        objects365v2=[\n            # training annotations\n            'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/zhiyuan_objv2_train.tar.gz',  # noqa\n            # validation annotations\n            'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/zhiyuan_objv2_val.json',  # noqa\n            # training url root\n            'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/',  # noqa\n            # validation url root_1\n            'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v1/',  # noqa\n            # validation url root_2\n            'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v2/'  # noqa\n        ])\n    url = data2url.get(args.dataset_name, None)\n    if url is None:\n        print('Only support COCO, VOC, LVIS, and Objects365v2 now!')\n        return\n    if args.dataset_name == 'objects365v2':\n        download_objects365v2(\n            url,\n            dir=path,\n            unzip=args.unzip,\n            delete=args.delete,\n            threads=args.threads)\n    else:\n        download(\n            url,\n            dir=path,\n            unzip=args.unzip,\n            delete=args.delete,\n            threads=args.threads)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/misc/gen_coco_panoptic_test_info.py",
    "content": "import argparse\nimport os.path as osp\n\nfrom mmengine.fileio import dump, load\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Generate COCO test image information '\n        'for COCO panoptic segmentation.')\n    parser.add_argument('data_root', help='Path to COCO annotation directory.')\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n    data_root = args.data_root\n    val_info = load(osp.join(data_root, 'panoptic_val2017.json'))\n    test_old_info = load(osp.join(data_root, 'image_info_test-dev2017.json'))\n\n    # replace categories from image_info_test-dev2017.json\n    # with categories from panoptic_val2017.json which\n    # has attribute `isthing`.\n    test_info = test_old_info\n    test_info.update({'categories': val_info['categories']})\n    dump(test_info, osp.join(data_root,\n                             'panoptic_image_info_test-dev2017.json'))\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/misc/get_crowdhuman_id_hw.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Get image shape on CrowdHuman dataset.\n\nHere is an example to run this script.\n\nExample:\n    python tools/misc/get_crowdhuman_id_hw.py ${CONFIG} \\\n    --dataset ${DATASET_TYPE}\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport os.path as osp\nfrom multiprocessing import Pool\n\nimport mmcv\nfrom mmengine.config import Config\nfrom mmengine.fileio import FileClient, dump\nfrom mmengine.logging import print_log\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Collect image metas')\n    parser.add_argument('config', help='Config file path')\n    parser.add_argument(\n        '--dataset',\n        choices=['train', 'val'],\n        help='Collect image metas from which dataset')\n    parser.add_argument(\n        '--nproc',\n        default=10,\n        type=int,\n        help='Processes used for get image metas')\n    args = parser.parse_args()\n    return args\n\n\ndef get_image_metas(anno_str, img_prefix):\n    id_hw = {}\n    file_client = FileClient(backend='disk')\n    anno_dict = json.loads(anno_str)\n    img_path = osp.join(img_prefix, f\"{anno_dict['ID']}.jpg\")\n    img_id = anno_dict['ID']\n    img_bytes = file_client.get(img_path)\n    img = mmcv.imfrombytes(img_bytes, backend='cv2')\n    id_hw[img_id] = img.shape[:2]\n    return id_hw\n\n\ndef main():\n    args = parse_args()\n\n    # get ann_file and img_prefix from config files\n    cfg = Config.fromfile(args.config)\n    file_client_args = cfg.get('file_client_args', dict(backend='disk'))\n    file_client = FileClient(**file_client_args)\n    dataset = args.dataset\n    dataloader_cfg = cfg.get(f'{dataset}_dataloader')\n    ann_file = osp.join(dataloader_cfg.dataset.data_root,\n                        dataloader_cfg.dataset.ann_file)\n    img_prefix = osp.join(dataloader_cfg.dataset.data_root,\n                          dataloader_cfg.dataset.data_prefix['img'])\n\n    # load image metas\n    print_log(\n        f'loading CrowdHuman {dataset} annotation...', level=logging.INFO)\n    anno_strs = file_client.get_text(ann_file).strip().split('\\n')\n    pool = Pool(args.nproc)\n    # get image metas with multiple processes\n    id_hw_temp = pool.starmap(\n        get_image_metas,\n        zip(anno_strs, [img_prefix for _ in range(len(anno_strs))]),\n    )\n    pool.close()\n\n    # save image metas\n    id_hw = {}\n    for sub_dict in id_hw_temp:\n        id_hw.update(sub_dict)\n\n    data_root = osp.dirname(ann_file)\n    save_path = osp.join(data_root, f'id_hw_{dataset}.json')\n    print_log(\n        f'\\nsaving \"id_hw_{dataset}.json\" in \"{data_root}\"',\n        level=logging.INFO)\n    dump(id_hw, save_path, file_format='json')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/misc/get_image_metas.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Get image metas on a specific dataset.\n\nHere is an example to run this script.\n\nExample:\n    python tools/misc/get_image_metas.py ${CONFIG} \\\n    --out ${OUTPUT FILE NAME}\n\"\"\"\nimport argparse\nimport csv\nimport os.path as osp\nfrom multiprocessing import Pool\n\nimport mmcv\nfrom mmengine.config import Config\nfrom mmengine.fileio import FileClient, dump\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Collect image metas')\n    parser.add_argument('config', help='Config file path')\n    parser.add_argument(\n        '--dataset',\n        default='val',\n        choices=['train', 'val', 'test'],\n        help='Collect image metas from which dataset')\n    parser.add_argument(\n        '--out',\n        default='validation-image-metas.pkl',\n        help='The output image metas file name. The save dir is in the '\n        'same directory as `dataset.ann_file` path')\n    parser.add_argument(\n        '--nproc',\n        default=4,\n        type=int,\n        help='Processes used for get image metas')\n    args = parser.parse_args()\n    return args\n\n\ndef get_metas_from_csv_style_ann_file(ann_file):\n    data_infos = []\n    cp_filename = None\n    with open(ann_file, 'r') as f:\n        reader = csv.reader(f)\n        for i, line in enumerate(reader):\n            if i == 0:\n                continue\n            img_id = line[0]\n            filename = f'{img_id}.jpg'\n            if filename != cp_filename:\n                data_infos.append(dict(filename=filename))\n                cp_filename = filename\n    return data_infos\n\n\ndef get_metas_from_txt_style_ann_file(ann_file):\n    with open(ann_file) as f:\n        lines = f.readlines()\n    i = 0\n    data_infos = []\n    while i < len(lines):\n        filename = lines[i].rstrip()\n        data_infos.append(dict(filename=filename))\n        skip_lines = int(lines[i + 2]) + 3\n        i += skip_lines\n    return data_infos\n\n\ndef get_image_metas(data_info, img_prefix):\n    file_client = FileClient(backend='disk')\n    filename = data_info.get('filename', None)\n    if filename is not None:\n        if img_prefix is not None:\n            filename = osp.join(img_prefix, filename)\n        img_bytes = file_client.get(filename)\n        img = mmcv.imfrombytes(img_bytes, flag='color')\n        shape = img.shape\n        meta = dict(filename=filename, ori_shape=shape)\n    else:\n        raise NotImplementedError('Missing `filename` in data_info')\n    return meta\n\n\ndef main():\n    args = parse_args()\n    assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'\n\n    # load config files\n    cfg = Config.fromfile(args.config)\n    dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')\n    ann_file = osp.join(dataloader_cfg.dataset.data_root,\n                        dataloader_cfg.dataset.ann_file)\n    img_prefix = osp.join(dataloader_cfg.dataset.data_root,\n                          dataloader_cfg.dataset.data_prefix['img'])\n\n    print(f'{\"-\" * 5} Start Processing {\"-\" * 5}')\n    if ann_file.endswith('csv'):\n        data_infos = get_metas_from_csv_style_ann_file(ann_file)\n    elif ann_file.endswith('txt'):\n        data_infos = get_metas_from_txt_style_ann_file(ann_file)\n    else:\n        shuffix = ann_file.split('.')[-1]\n        raise NotImplementedError('File name must be csv or txt suffix but '\n                                  f'get {shuffix}')\n\n    print(f'Successfully load annotation file from {ann_file}')\n    print(f'Processing {len(data_infos)} images...')\n    pool = Pool(args.nproc)\n    # get image metas with multiple processes\n    image_metas = pool.starmap(\n        get_image_metas,\n        zip(data_infos, [img_prefix for _ in range(len(data_infos))]),\n    )\n    pool.close()\n\n    # save image metas\n    root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]\n    save_path = osp.join(root_path, args.out)\n    dump(image_metas, save_path, protocol=4)\n    print(f'Image meta file save to: {save_path}')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/misc/print_config.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\n\nfrom mmengine import Config, DictAction\n\nfrom mmdet.utils import replace_cfg_vals, update_data_root\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Print the whole config')\n    parser.add_argument('config', help='config file path')\n    parser.add_argument(\n        '--save-path',\n        default=None,\n        help='save path of whole config, suffixed with .py, .json or .yml')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    cfg = Config.fromfile(args.config)\n\n    # replace the ${key} with the value of cfg.key\n    cfg = replace_cfg_vals(cfg)\n\n    # update data root according to MMDET_DATASETS\n    update_data_root(cfg)\n\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n    print(f'Config:\\n{cfg.pretty_text}')\n\n    if args.save_path is not None:\n        save_path = args.save_path\n\n        suffix = os.path.splitext(save_path)[-1]\n        assert suffix in ['.py', '.json', '.yml']\n\n        if not os.path.exists(os.path.split(save_path)[0]):\n            os.makedirs(os.path.split(save_path)[0])\n        cfg.dump(save_path)\n        print(f'Config saving at {save_path}')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/misc/split_coco.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os.path as osp\n\nimport numpy as np\nfrom mmengine.fileio import dump, load\nfrom mmengine.utils import mkdir_or_exist, track_parallel_progress\n\nprog_description = '''K-Fold coco split.\n\nTo split coco data for semi-supervised object detection:\n    python tools/misc/split_coco.py\n'''\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        '--data-root',\n        type=str,\n        help='The data root of coco dataset.',\n        default='./data/coco/')\n    parser.add_argument(\n        '--out-dir',\n        type=str,\n        help='The output directory of coco semi-supervised annotations.',\n        default='./data/coco/semi_anns/')\n    parser.add_argument(\n        '--labeled-percent',\n        type=float,\n        nargs='+',\n        help='The percentage of labeled data in the training set.',\n        default=[1, 2, 5, 10])\n    parser.add_argument(\n        '--fold',\n        type=int,\n        help='K-fold cross validation for semi-supervised object detection.',\n        default=5)\n    args = parser.parse_args()\n    return args\n\n\ndef split_coco(data_root, out_dir, percent, fold):\n    \"\"\"Split COCO data for Semi-supervised object detection.\n\n    Args:\n        data_root (str): The data root of coco dataset.\n        out_dir (str): The output directory of coco semi-supervised\n            annotations.\n        percent (float): The percentage of labeled data in the training set.\n        fold (int): The fold of dataset and set as random seed for data split.\n    \"\"\"\n\n    def save_anns(name, images, annotations):\n        sub_anns = dict()\n        sub_anns['images'] = images\n        sub_anns['annotations'] = annotations\n        sub_anns['licenses'] = anns['licenses']\n        sub_anns['categories'] = anns['categories']\n        sub_anns['info'] = anns['info']\n\n        mkdir_or_exist(out_dir)\n        dump(sub_anns, f'{out_dir}/{name}.json')\n\n    # set random seed with the fold\n    np.random.seed(fold)\n    ann_file = osp.join(data_root, 'annotations/instances_train2017.json')\n    anns = load(ann_file)\n\n    image_list = anns['images']\n    labeled_total = int(percent / 100. * len(image_list))\n    labeled_inds = set(\n        np.random.choice(range(len(image_list)), size=labeled_total))\n    labeled_ids, labeled_images, unlabeled_images = [], [], []\n\n    for i in range(len(image_list)):\n        if i in labeled_inds:\n            labeled_images.append(image_list[i])\n            labeled_ids.append(image_list[i]['id'])\n        else:\n            unlabeled_images.append(image_list[i])\n\n    # get all annotations of labeled images\n    labeled_ids = set(labeled_ids)\n    labeled_annotations, unlabeled_annotations = [], []\n\n    for ann in anns['annotations']:\n        if ann['image_id'] in labeled_ids:\n            labeled_annotations.append(ann)\n        else:\n            unlabeled_annotations.append(ann)\n\n    # save labeled and unlabeled\n    labeled_name = f'instances_train2017.{fold}@{percent}'\n    unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled'\n\n    save_anns(labeled_name, labeled_images, labeled_annotations)\n    save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations)\n\n\ndef multi_wrapper(args):\n    return split_coco(*args)\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    arguments_list = [(args.data_root, args.out_dir, p, f)\n                      for f in range(1, args.fold + 1)\n                      for p in args.labeled_percent]\n    track_parallel_progress(multi_wrapper, arguments_list, args.fold)\n"
  },
  {
    "path": "tools/model_converters/detectron2_to_mmdet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nfrom collections import OrderedDict\n\nimport torch\nfrom mmengine.fileio import load\nfrom mmengine.runner import save_checkpoint\n\n\ndef convert(src: str, dst: str, prefix: str = 'd2_model') -> None:\n    \"\"\"Convert Detectron2 checkpoint to MMDetection style.\n\n    Args:\n        src (str): The Detectron2 checkpoint path, should endswith `pkl`.\n        dst (str): The MMDetection checkpoint path.\n        prefix (str): The prefix of MMDetection model, defaults to 'd2_model'.\n    \"\"\"\n    # load arch_settings\n    assert src.endswith('pkl'), \\\n        'the source Detectron2 checkpoint should endswith `pkl`.'\n    d2_model = load(src, encoding='latin1').get('model')\n    assert d2_model is not None\n\n    # convert to mmdet style\n    dst_state_dict = OrderedDict()\n    for name, value in d2_model.items():\n        if not isinstance(value, torch.Tensor):\n            value = torch.from_numpy(value)\n        dst_state_dict[f'{prefix}.{name}'] = value\n\n    mmdet_model = dict(state_dict=dst_state_dict, meta=dict())\n    save_checkpoint(mmdet_model, dst)\n    print(f'Convert Detectron2 model {src} to MMDetection model {dst}')\n\n\ndef main():\n    parser = argparse.ArgumentParser(\n        description='Convert Detectron2 checkpoint to MMDetection style')\n    parser.add_argument('src', help='Detectron2 model path')\n    parser.add_argument('dst', help='MMDetectron model save path')\n    parser.add_argument(\n        '--prefix', default='d2_model', type=str, help='prefix of the model')\n    args = parser.parse_args()\n    convert(args.src, args.dst, args.prefix)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/model_converters/detectron2pytorch.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nfrom collections import OrderedDict\n\nimport torch\nfrom mmengine.fileio import load\n\narch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}\n\n\ndef convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):\n    # detectron replace bn with affine channel layer\n    state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +\n                                                              '_b'])\n    state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +\n                                                                '_s'])\n    bn_size = state_dict[torch_name + '.weight'].size()\n    state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)\n    state_dict[torch_name + '.running_var'] = torch.ones(bn_size)\n    converted_names.add(caffe_name + '_b')\n    converted_names.add(caffe_name + '_s')\n\n\ndef convert_conv_fc(blobs, state_dict, caffe_name, torch_name,\n                    converted_names):\n    state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +\n                                                                '_w'])\n    converted_names.add(caffe_name + '_w')\n    if caffe_name + '_b' in blobs:\n        state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +\n                                                                  '_b'])\n        converted_names.add(caffe_name + '_b')\n\n\ndef convert(src, dst, depth):\n    \"\"\"Convert keys in detectron pretrained ResNet models to pytorch style.\"\"\"\n    # load arch_settings\n    if depth not in arch_settings:\n        raise ValueError('Only support ResNet-50 and ResNet-101 currently')\n    block_nums = arch_settings[depth]\n    # load caffe model\n    caffe_model = load(src, encoding='latin1')\n    blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model\n    # convert to pytorch style\n    state_dict = OrderedDict()\n    converted_names = set()\n    convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)\n    convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)\n    for i in range(1, len(block_nums) + 1):\n        for j in range(block_nums[i - 1]):\n            if j == 0:\n                convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',\n                                f'layer{i}.{j}.downsample.0', converted_names)\n                convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',\n                           f'layer{i}.{j}.downsample.1', converted_names)\n            for k, letter in enumerate(['a', 'b', 'c']):\n                convert_conv_fc(blobs, state_dict,\n                                f'res{i + 1}_{j}_branch2{letter}',\n                                f'layer{i}.{j}.conv{k+1}', converted_names)\n                convert_bn(blobs, state_dict,\n                           f'res{i + 1}_{j}_branch2{letter}_bn',\n                           f'layer{i}.{j}.bn{k + 1}', converted_names)\n    # check if all layers are converted\n    for key in blobs:\n        if key not in converted_names:\n            print(f'Not Convert: {key}')\n    # save checkpoint\n    checkpoint = dict()\n    checkpoint['state_dict'] = state_dict\n    torch.save(checkpoint, dst)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Convert model keys')\n    parser.add_argument('src', help='src detectron model path')\n    parser.add_argument('dst', help='save path')\n    parser.add_argument('depth', type=int, help='ResNet model depth')\n    args = parser.parse_args()\n    convert(args.src, args.dst, args.depth)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/model_converters/publish_model.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport subprocess\n\nimport torch\nfrom mmengine.logging import print_log\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='Process a checkpoint to be published')\n    parser.add_argument('in_file', help='input checkpoint filename')\n    parser.add_argument('out_file', help='output checkpoint filename')\n    parser.add_argument(\n        '--save-keys',\n        nargs='+',\n        type=str,\n        default=['meta', 'state_dict'],\n        help='keys to save in the published checkpoint')\n    args = parser.parse_args()\n    return args\n\n\ndef process_checkpoint(in_file, out_file, save_keys=['meta', 'state_dict']):\n    checkpoint = torch.load(in_file, map_location='cpu')\n\n    # only keep `meta` and `state_dict` for smaller file size\n    ckpt_keys = list(checkpoint.keys())\n    for k in ckpt_keys:\n        if k not in save_keys:\n            print_log(\n                f'Key `{k}` will be removed because it is not in '\n                f'save_keys. If you want to keep it, '\n                f'please set --save-keys.',\n                logger='current')\n            checkpoint.pop(k, None)\n\n    # if it is necessary to remove some sensitive data in checkpoint['meta'],\n    # add the code here.\n    if torch.__version__ >= '1.6':\n        torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)\n    else:\n        torch.save(checkpoint, out_file)\n    sha = subprocess.check_output(['sha256sum', out_file]).decode()\n    if out_file.endswith('.pth'):\n        out_file_name = out_file[:-4]\n    else:\n        out_file_name = out_file\n    final_file = out_file_name + f'-{sha[:8]}.pth'\n    subprocess.Popen(['mv', out_file, final_file])\n    print_log(\n        f'The published model is saved at {final_file}.', logger='current')\n\n\ndef main():\n    args = parse_args()\n    process_checkpoint(args.in_file, args.out_file, args.save_keys)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/model_converters/regnet2mmdet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nfrom collections import OrderedDict\n\nimport torch\n\n\ndef convert_stem(model_key, model_weight, state_dict, converted_names):\n    new_key = model_key.replace('stem.conv', 'conv1')\n    new_key = new_key.replace('stem.bn', 'bn1')\n    state_dict[new_key] = model_weight\n    converted_names.add(model_key)\n    print(f'Convert {model_key} to {new_key}')\n\n\ndef convert_head(model_key, model_weight, state_dict, converted_names):\n    new_key = model_key.replace('head.fc', 'fc')\n    state_dict[new_key] = model_weight\n    converted_names.add(model_key)\n    print(f'Convert {model_key} to {new_key}')\n\n\ndef convert_reslayer(model_key, model_weight, state_dict, converted_names):\n    split_keys = model_key.split('.')\n    layer, block, module = split_keys[:3]\n    block_id = int(block[1:])\n    layer_name = f'layer{int(layer[1:])}'\n    block_name = f'{block_id - 1}'\n\n    if block_id == 1 and module == 'bn':\n        new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'\n    elif block_id == 1 and module == 'proj':\n        new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'\n    elif module == 'f':\n        if split_keys[3] == 'a_bn':\n            module_name = 'bn1'\n        elif split_keys[3] == 'b_bn':\n            module_name = 'bn2'\n        elif split_keys[3] == 'c_bn':\n            module_name = 'bn3'\n        elif split_keys[3] == 'a':\n            module_name = 'conv1'\n        elif split_keys[3] == 'b':\n            module_name = 'conv2'\n        elif split_keys[3] == 'c':\n            module_name = 'conv3'\n        new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'\n    else:\n        raise ValueError(f'Unsupported conversion of key {model_key}')\n    print(f'Convert {model_key} to {new_key}')\n    state_dict[new_key] = model_weight\n    converted_names.add(model_key)\n\n\ndef convert(src, dst):\n    \"\"\"Convert keys in pycls pretrained RegNet models to mmdet style.\"\"\"\n    # load caffe model\n    regnet_model = torch.load(src)\n    blobs = regnet_model['model_state']\n    # convert to pytorch style\n    state_dict = OrderedDict()\n    converted_names = set()\n    for key, weight in blobs.items():\n        if 'stem' in key:\n            convert_stem(key, weight, state_dict, converted_names)\n        elif 'head' in key:\n            convert_head(key, weight, state_dict, converted_names)\n        elif key.startswith('s'):\n            convert_reslayer(key, weight, state_dict, converted_names)\n\n    # check if all layers are converted\n    for key in blobs:\n        if key not in converted_names:\n            print(f'not converted: {key}')\n    # save checkpoint\n    checkpoint = dict()\n    checkpoint['state_dict'] = state_dict\n    torch.save(checkpoint, dst)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Convert model keys')\n    parser.add_argument('src', help='src detectron model path')\n    parser.add_argument('dst', help='save path')\n    args = parser.parse_args()\n    convert(args.src, args.dst)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/model_converters/selfsup2mmdet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nfrom collections import OrderedDict\n\nimport torch\n\n\ndef moco_convert(src, dst):\n    \"\"\"Convert keys in pycls pretrained moco models to mmdet style.\"\"\"\n    # load caffe model\n    moco_model = torch.load(src)\n    blobs = moco_model['state_dict']\n    # convert to pytorch style\n    state_dict = OrderedDict()\n    for k, v in blobs.items():\n        if not k.startswith('module.encoder_q.'):\n            continue\n        old_k = k\n        k = k.replace('module.encoder_q.', '')\n        state_dict[k] = v\n        print(old_k, '->', k)\n    # save checkpoint\n    checkpoint = dict()\n    checkpoint['state_dict'] = state_dict\n    torch.save(checkpoint, dst)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Convert model keys')\n    parser.add_argument('src', help='src detectron model path')\n    parser.add_argument('dst', help='save path')\n    parser.add_argument(\n        '--selfsup', type=str, choices=['moco', 'swav'], help='save path')\n    args = parser.parse_args()\n    if args.selfsup == 'moco':\n        moco_convert(args.src, args.dst)\n    elif args.selfsup == 'swav':\n        print('SWAV does not need to convert the keys')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/model_converters/upgrade_model_version.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport re\nimport tempfile\nfrom collections import OrderedDict\n\nimport torch\nfrom mmengine import Config\n\n\ndef is_head(key):\n    valid_head_list = [\n        'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'\n    ]\n\n    return any(key.startswith(h) for h in valid_head_list)\n\n\ndef parse_config(config_strings):\n    temp_file = tempfile.NamedTemporaryFile()\n    config_path = f'{temp_file.name}.py'\n    with open(config_path, 'w') as f:\n        f.write(config_strings)\n\n    config = Config.fromfile(config_path)\n    is_two_stage = True\n    is_ssd = False\n    is_retina = False\n    reg_cls_agnostic = False\n    if 'rpn_head' not in config.model:\n        is_two_stage = False\n        # check whether it is SSD\n        if config.model.bbox_head.type == 'SSDHead':\n            is_ssd = True\n        elif config.model.bbox_head.type == 'RetinaHead':\n            is_retina = True\n    elif isinstance(config.model['bbox_head'], list):\n        reg_cls_agnostic = True\n    elif 'reg_class_agnostic' in config.model.bbox_head:\n        reg_cls_agnostic = config.model.bbox_head \\\n            .reg_class_agnostic\n    temp_file.close()\n    return is_two_stage, is_ssd, is_retina, reg_cls_agnostic\n\n\ndef reorder_cls_channel(val, num_classes=81):\n    # bias\n    if val.dim() == 1:\n        new_val = torch.cat((val[1:], val[:1]), dim=0)\n    # weight\n    else:\n        out_channels, in_channels = val.shape[:2]\n        # conv_cls for softmax output\n        if out_channels != num_classes and out_channels % num_classes == 0:\n            new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])\n            new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)\n            new_val = new_val.reshape(val.size())\n        # fc_cls\n        elif out_channels == num_classes:\n            new_val = torch.cat((val[1:], val[:1]), dim=0)\n        # agnostic | retina_cls | rpn_cls\n        else:\n            new_val = val\n\n    return new_val\n\n\ndef truncate_cls_channel(val, num_classes=81):\n\n    # bias\n    if val.dim() == 1:\n        if val.size(0) % num_classes == 0:\n            new_val = val[:num_classes - 1]\n        else:\n            new_val = val\n    # weight\n    else:\n        out_channels, in_channels = val.shape[:2]\n        # conv_logits\n        if out_channels % num_classes == 0:\n            new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]\n            new_val = new_val.reshape(-1, *val.shape[1:])\n        # agnostic\n        else:\n            new_val = val\n\n    return new_val\n\n\ndef truncate_reg_channel(val, num_classes=81):\n    # bias\n    if val.dim() == 1:\n        # fc_reg | rpn_reg\n        if val.size(0) % num_classes == 0:\n            new_val = val.reshape(num_classes, -1)[:num_classes - 1]\n            new_val = new_val.reshape(-1)\n        # agnostic\n        else:\n            new_val = val\n    # weight\n    else:\n        out_channels, in_channels = val.shape[:2]\n        # fc_reg | rpn_reg\n        if out_channels % num_classes == 0:\n            new_val = val.reshape(num_classes, -1, in_channels,\n                                  *val.shape[2:])[1:]\n            new_val = new_val.reshape(-1, *val.shape[1:])\n        # agnostic\n        else:\n            new_val = val\n\n    return new_val\n\n\ndef convert(in_file, out_file, num_classes):\n    \"\"\"Convert keys in checkpoints.\n\n    There can be some breaking changes during the development of mmdetection,\n    and this tool is used for upgrading checkpoints trained with old versions\n    to the latest one.\n    \"\"\"\n    checkpoint = torch.load(in_file)\n    in_state_dict = checkpoint.pop('state_dict')\n    out_state_dict = OrderedDict()\n    meta_info = checkpoint['meta']\n    is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(\n        '#' + meta_info['config'])\n    if meta_info['mmdet_version'] <= '0.5.3' and is_retina:\n        upgrade_retina = True\n    else:\n        upgrade_retina = False\n\n    # MMDetection v2.5.0 unifies the class order in RPN\n    # if the model is trained in version<v2.5.0\n    # The RPN model should be upgraded to be used in version>=2.5.0\n    if meta_info['mmdet_version'] < '2.5.0':\n        upgrade_rpn = True\n    else:\n        upgrade_rpn = False\n\n    for key, val in in_state_dict.items():\n        new_key = key\n        new_val = val\n        if is_two_stage and is_head(key):\n            new_key = 'roi_head.{}'.format(key)\n\n        # classification\n        if upgrade_rpn:\n            m = re.search(\n                r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'\n                r'fovea_cls).(weight|bias)', new_key)\n        else:\n            m = re.search(\n                r'(conv_cls|retina_cls|fc_cls|fcos_cls|'\n                r'fovea_cls).(weight|bias)', new_key)\n        if m is not None:\n            print(f'reorder cls channels of {new_key}')\n            new_val = reorder_cls_channel(val, num_classes)\n\n        # regression\n        if upgrade_rpn:\n            m = re.search(r'(fc_reg).(weight|bias)', new_key)\n        else:\n            m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)\n        if m is not None and not reg_cls_agnostic:\n            print(f'truncate regression channels of {new_key}')\n            new_val = truncate_reg_channel(val, num_classes)\n\n        # mask head\n        m = re.search(r'(conv_logits).(weight|bias)', new_key)\n        if m is not None:\n            print(f'truncate mask prediction channels of {new_key}')\n            new_val = truncate_cls_channel(val, num_classes)\n\n        m = re.search(r'(cls_convs|reg_convs).\\d.(weight|bias)', key)\n        # Legacy issues in RetinaNet since V1.x\n        # Use ConvModule instead of nn.Conv2d in RetinaNet\n        # cls_convs.0.weight -> cls_convs.0.conv.weight\n        if m is not None and upgrade_retina:\n            param = m.groups()[1]\n            new_key = key.replace(param, f'conv.{param}')\n            out_state_dict[new_key] = val\n            print(f'rename the name of {key} to {new_key}')\n            continue\n\n        m = re.search(r'(cls_convs).\\d.(weight|bias)', key)\n        if m is not None and is_ssd:\n            print(f'reorder cls channels of {new_key}')\n            new_val = reorder_cls_channel(val, num_classes)\n\n        out_state_dict[new_key] = new_val\n    checkpoint['state_dict'] = out_state_dict\n    torch.save(checkpoint, out_file)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Upgrade model version')\n    parser.add_argument('in_file', help='input checkpoint file')\n    parser.add_argument('out_file', help='output checkpoint file')\n    parser.add_argument(\n        '--num-classes',\n        type=int,\n        default=81,\n        help='number of classes of the original model')\n    args = parser.parse_args()\n    convert(args.in_file, args.out_file, args.num_classes)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/model_converters/upgrade_ssd_version.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport tempfile\nfrom collections import OrderedDict\n\nimport torch\nfrom mmengine import Config\n\n\ndef parse_config(config_strings):\n    temp_file = tempfile.NamedTemporaryFile()\n    config_path = f'{temp_file.name}.py'\n    with open(config_path, 'w') as f:\n        f.write(config_strings)\n\n    config = Config.fromfile(config_path)\n    # check whether it is SSD\n    if config.model.bbox_head.type != 'SSDHead':\n        raise AssertionError('This is not a SSD model.')\n\n\ndef convert(in_file, out_file):\n    checkpoint = torch.load(in_file)\n    in_state_dict = checkpoint.pop('state_dict')\n    out_state_dict = OrderedDict()\n    meta_info = checkpoint['meta']\n    parse_config('#' + meta_info['config'])\n    for key, value in in_state_dict.items():\n        if 'extra' in key:\n            layer_idx = int(key.split('.')[2])\n            new_key = 'neck.extra_layers.{}.{}.conv.'.format(\n                layer_idx // 2, layer_idx % 2) + key.split('.')[-1]\n        elif 'l2_norm' in key:\n            new_key = 'neck.l2_norm.weight'\n        elif 'bbox_head' in key:\n            new_key = key[:21] + '.0' + key[21:]\n        else:\n            new_key = key\n        out_state_dict[new_key] = value\n    checkpoint['state_dict'] = out_state_dict\n\n    if torch.__version__ >= '1.6':\n        torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)\n    else:\n        torch.save(checkpoint, out_file)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Upgrade SSD version')\n    parser.add_argument('in_file', help='input checkpoint file')\n    parser.add_argument('out_file', help='output checkpoint file')\n\n    args = parser.parse_args()\n    convert(args.in_file, args.out_file)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/slurm_test.sh",
    "content": "#!/usr/bin/env bash\n\nset -x\n\nPARTITION=$1\nJOB_NAME=$2\nCONFIG=$3\nCHECKPOINT=$4\nGPUS=${GPUS:-8}\nGPUS_PER_NODE=${GPUS_PER_NODE:-8}\nCPUS_PER_TASK=${CPUS_PER_TASK:-5}\nPY_ARGS=${@:5}\nSRUN_ARGS=${SRUN_ARGS:-\"\"}\n\nPYTHONPATH=\"$(dirname $0)/..\":$PYTHONPATH \\\nsrun -p ${PARTITION} \\\n    --job-name=${JOB_NAME} \\\n    --gres=gpu:${GPUS_PER_NODE} \\\n    --ntasks=${GPUS} \\\n    --ntasks-per-node=${GPUS_PER_NODE} \\\n    --cpus-per-task=${CPUS_PER_TASK} \\\n    --kill-on-bad-exit=1 \\\n    ${SRUN_ARGS} \\\n    python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher=\"slurm\" ${PY_ARGS}\n"
  },
  {
    "path": "tools/slurm_train.sh",
    "content": "#!/usr/bin/env bash\n\nset -x\n\nPARTITION=$1\nJOB_NAME=$2\nCONFIG=$3\nWORK_DIR=$4\nGPUS=${GPUS:-8}\nGPUS_PER_NODE=${GPUS_PER_NODE:-8}\nCPUS_PER_TASK=${CPUS_PER_TASK:-5}\nSRUN_ARGS=${SRUN_ARGS:-\"\"}\nPY_ARGS=${@:5}\n\nPYTHONPATH=\"$(dirname $0)/..\":$PYTHONPATH \\\nsrun -p ${PARTITION} \\\n    --job-name=${JOB_NAME} \\\n    --gres=gpu:${GPUS_PER_NODE} \\\n    --ntasks=${GPUS} \\\n    --ntasks-per-node=${GPUS_PER_NODE} \\\n    --cpus-per-task=${CPUS_PER_TASK} \\\n    --kill-on-bad-exit=1 \\\n    ${SRUN_ARGS} \\\n    python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher=\"slurm\" ${PY_ARGS}\n"
  },
  {
    "path": "tools/test.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\nimport warnings\nfrom copy import deepcopy\n\nfrom mmengine import ConfigDict\nfrom mmengine.config import Config, DictAction\nfrom mmengine.runner import Runner\n\nfrom mmdet.engine.hooks.utils import trigger_visualization_hook\nfrom mmdet.evaluation import DumpDetResults\nfrom mmdet.registry import RUNNERS\n\n\n# TODO: support fuse_conv_bn and format_only\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='MMDet test (and eval) a model')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument('checkpoint', help='checkpoint file')\n    parser.add_argument(\n        '--work-dir',\n        help='the directory to save the file containing evaluation metrics')\n    parser.add_argument(\n        '--out',\n        type=str,\n        help='dump predictions to a pickle file for offline evaluation')\n    parser.add_argument(\n        '--show', action='store_true', help='show prediction results')\n    parser.add_argument(\n        '--show-dir',\n        help='directory where painted images will be saved. '\n        'If specified, it will be automatically saved '\n        'to the work_dir/timestamp/show_dir')\n    parser.add_argument(\n        '--wait-time', type=float, default=2, help='the interval of show (s)')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--tta', action='store_true')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    # load config\n    cfg = Config.fromfile(args.config)\n    cfg.launcher = args.launcher\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # work_dir is determined in this priority: CLI > segment in file > filename\n    if args.work_dir is not None:\n        # update configs according to CLI args if args.work_dir is not None\n        cfg.work_dir = args.work_dir\n    elif cfg.get('work_dir', None) is None:\n        # use config filename as default work_dir if cfg.work_dir is None\n        cfg.work_dir = osp.join('./work_dirs',\n                                osp.splitext(osp.basename(args.config))[0])\n\n    cfg.load_from = args.checkpoint\n\n    if args.show or args.show_dir:\n        cfg = trigger_visualization_hook(cfg, args)\n\n    if args.tta:\n\n        if 'tta_model' not in cfg:\n            warnings.warn('Cannot find ``tta_model`` in config, '\n                          'we will set it as default.')\n            cfg.tta_model = dict(\n                type='DetTTAModel',\n                tta_cfg=dict(\n                    nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))\n        if 'tta_pipeline' not in cfg:\n            warnings.warn('Cannot find ``tta_pipeline`` in config, '\n                          'we will set it as default.')\n            test_data_cfg = cfg.test_dataloader.dataset\n            while 'dataset' in test_data_cfg:\n                test_data_cfg = test_data_cfg['dataset']\n            cfg.tta_pipeline = deepcopy(test_data_cfg.pipeline)\n            flip_tta = dict(\n                type='TestTimeAug',\n                transforms=[\n                    [\n                        dict(type='RandomFlip', prob=1.),\n                        dict(type='RandomFlip', prob=0.)\n                    ],\n                    [\n                        dict(\n                            type='PackDetInputs',\n                            meta_keys=('img_id', 'img_path', 'ori_shape',\n                                       'img_shape', 'scale_factor', 'flip',\n                                       'flip_direction'))\n                    ],\n                ])\n            cfg.tta_pipeline[-1] = flip_tta\n        cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model)\n        cfg.test_dataloader.dataset.pipeline = cfg.tta_pipeline\n\n    # build the runner from config\n    if 'runner_type' not in cfg:\n        # build the default runner\n        runner = Runner.from_cfg(cfg)\n    else:\n        # build customized runner from the registry\n        # if 'runner_type' is set in the cfg\n        runner = RUNNERS.build(cfg)\n\n    # add `DumpResults` dummy metric\n    if args.out is not None:\n        assert args.out.endswith(('.pkl', '.pickle')), \\\n            'The dump file must be a pkl file.'\n        runner.test_evaluator.metrics.append(\n            DumpDetResults(out_file_path=args.out))\n\n    # start testing\n    runner.test()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tools/train.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport logging\nimport os\nimport os.path as osp\n\nfrom mmengine.config import Config, DictAction\nfrom mmengine.logging import print_log\nfrom mmengine.registry import RUNNERS\nfrom mmengine.runner import Runner\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Train a detector')\n    parser.add_argument('config', help='train config file path')\n    parser.add_argument('--work-dir', help='the dir to save logs and models')\n    parser.add_argument(\n        '--amp',\n        action='store_true',\n        default=False,\n        help='enable automatic-mixed-precision training')\n    parser.add_argument(\n        '--auto-scale-lr',\n        action='store_true',\n        help='enable automatically scaling LR.')\n    parser.add_argument(\n        '--resume',\n        nargs='?',\n        type=str,\n        const='auto',\n        help='If specify checkpoint path, resume from it, while if not '\n        'specify, try to auto resume from the latest checkpoint '\n        'in the work directory.')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    # load config\n    cfg = Config.fromfile(args.config)\n    cfg.launcher = args.launcher\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    # work_dir is determined in this priority: CLI > segment in file > filename\n    if args.work_dir is not None:\n        # update configs according to CLI args if args.work_dir is not None\n        cfg.work_dir = args.work_dir\n    elif cfg.get('work_dir', None) is None:\n        # use config filename as default work_dir if cfg.work_dir is None\n        cfg.work_dir = osp.join('./work_dirs',\n                                osp.splitext(osp.basename(args.config))[0])\n\n    # enable automatic-mixed-precision training\n    if args.amp is True:\n        optim_wrapper = cfg.optim_wrapper.type\n        if optim_wrapper == 'AmpOptimWrapper':\n            print_log(\n                'AMP training is already enabled in your config.',\n                logger='current',\n                level=logging.WARNING)\n        else:\n            assert optim_wrapper == 'OptimWrapper', (\n                '`--amp` is only supported when the optimizer wrapper type is '\n                f'`OptimWrapper` but got {optim_wrapper}.')\n            cfg.optim_wrapper.type = 'AmpOptimWrapper'\n            cfg.optim_wrapper.loss_scale = 'dynamic'\n\n    # enable automatically scaling LR\n    if args.auto_scale_lr:\n        if 'auto_scale_lr' in cfg and \\\n                'enable' in cfg.auto_scale_lr and \\\n                'base_batch_size' in cfg.auto_scale_lr:\n            cfg.auto_scale_lr.enable = True\n        else:\n            raise RuntimeError('Can not find \"auto_scale_lr\" or '\n                               '\"auto_scale_lr.enable\" or '\n                               '\"auto_scale_lr.base_batch_size\" in your'\n                               ' configuration file.')\n\n    # resume is determined in this priority: resume from > auto_resume\n    if args.resume == 'auto':\n        cfg.resume = True\n        cfg.load_from = None\n    elif args.resume is not None:\n        cfg.resume = True\n        cfg.load_from = args.resume\n\n    # build the runner from config\n    if 'runner_type' not in cfg:\n        # build the default runner\n        runner = Runner.from_cfg(cfg)\n    else:\n        # build customized runner from the registry\n        # if 'runner_type' is set in the cfg\n        runner = RUNNERS.build(cfg)\n\n    # start training\n    runner.train()\n\n\nif __name__ == '__main__':\n    main()\n"
  }
]